Compare commits
221 Commits
Author | SHA1 | Date | |
---|---|---|---|
39cd81de92 | |||
62c74d7749 | |||
254ec19412 | |||
97bbd1bf9e | |||
54aec2fa8b | |||
e1dfcddc79 | |||
344add3885 | |||
752dfc4bda | |||
72be0eb189 | |||
fdc00811ce | |||
6c5bdef567 | |||
ea545b395b | |||
f6b1d1cc66 | |||
d1993187b6 | |||
adfcfb6788 | |||
07995a3ca3 | |||
dd76eba73e | |||
b13da548b9 | |||
fe0efb25e8 | |||
b0b00c4a47 | |||
19ca962b15 | |||
d479f0c810 | |||
1d5dac1b1d | |||
96c3d98256 | |||
0b3dc8ed8c | |||
9a75eb11cb | |||
92dd02aaf6 | |||
41bfd24919 | |||
fddc8aa410 | |||
735ee5206a | |||
a86bf52390 | |||
2deee0e01f | |||
2d7d6e61be | |||
4ec17f7eb5 | |||
fcad02e1de | |||
708fab3082 | |||
3bbb70b3d3 | |||
0c80f4fa87 | |||
21486225c8 | |||
a2920c3757 | |||
6e0f58e7a9 | |||
dee74aa440 | |||
4acd7229d3 | |||
9608ac3486 | |||
ad9d1625a6 | |||
1a558edd0b | |||
5976c392ad | |||
a92b2d6a00 | |||
7d4bf881f7 | |||
05be0984b4 | |||
cdbc18fc4e | |||
2995aedf1d | |||
45f9b32e0f | |||
1d0b662b42 | |||
38f5cb5b71 | |||
476328b302 | |||
4c3efb532d | |||
dafe3197ab | |||
90d7425afe | |||
2d81f7b0c0 | |||
04e24b14f0 | |||
a2bf852818 | |||
0ac612476a | |||
0c6b83d656 | |||
4e6dc58727 | |||
66bbd4200c | |||
326c835e60 | |||
1a48cbf164 | |||
3480777d89 | |||
a71bc08ff4 | |||
df766e668f | |||
0a8f3ae0b3 | |||
da6e67b321 | |||
dec00364b3 | |||
5637087cc9 | |||
5ad4bdc482 | |||
823867f5b7 | |||
c6772c92b8 | |||
79f6a79cfc | |||
4c7f100d22 | |||
9070d11f4c | |||
124b93f31c | |||
0f22f53b36 | |||
3784dbf029 | |||
4c95d58c41 | |||
38d4675921 | |||
7b8aa893fa | |||
fb2678f96e | |||
486ed27299 | |||
df4827f2c0 | |||
ef1b436350 | |||
b19b4bfcb0 | |||
e64b9f9204 | |||
9c33683c25 | |||
ba20987ae7 | |||
729d41fe6a | |||
905147a5ee | |||
0c41e0d06b | |||
b37b59b726 | |||
60b9b48e71 | |||
abf8b5d475 | |||
7eebe1483e | |||
9a76091785 | |||
c386b06fc6 | |||
6bcfc5c1a4 | |||
768e10d0b3 | |||
e7244387c7 | |||
5ade6c25f3 | |||
784fa1c2e3 | |||
66f4e6a809 | |||
8074d2b0c3 | |||
b02d49ab26 | |||
82a0cd2ad4 | |||
ee1a9c3230 | |||
db24c01106 | |||
ae3cfa8f0d | |||
b56c111e93 | |||
bbeb0256f1 | |||
005a5b9677 | |||
55bee04856 | |||
42fd40a124 | |||
f21508b9e1 | |||
ee7a308de4 | |||
636e674ee7 | |||
b02b374b46 | |||
1c13afa8f9 | |||
69b92fab7e | |||
6ab77df3f5 | |||
264c19582b | |||
8acd4d9afc | |||
65b0cea6bd | |||
cfe01b2e6a | |||
b19b032be3 | |||
5441708634 | |||
3c9b370255 | |||
510544770b | |||
e8293841c2 | |||
46114bf28e | |||
0d7e61f06f | |||
fd6a54dfbc | |||
1ea5722b8f | |||
bc8fadf494 | |||
a76934ad33 | |||
d7a122a026 | |||
6c25588e63 | |||
17a1f579d0 | |||
998db63933 | |||
c0fa14d94a | |||
6fd129844d | |||
baae780c99 | |||
09a1da25ed | |||
298c6aaef6 | |||
a329324139 | |||
a83e2ffeab | |||
5d7449a121 | |||
ebbe4958c6 | |||
73b2cc4977 | |||
7ecfde8150 | |||
796480a38b | |||
4ae6aede60 | |||
e0085e6612 | |||
194da6f867 | |||
3fade35260 | |||
5e39918fe1 | |||
f4dc47a805 | |||
12c65bacf1 | |||
ba37f3562d | |||
fce4659388 | |||
0a15870a82 | |||
9866de5e3d | |||
9d3f183ba9 | |||
fe233f3b3d | |||
be3bd0f90b | |||
3c053adbb5 | |||
c040ec22f7 | |||
43f627ba92 | |||
2b67de2e3f | |||
477859662a | |||
ccd7241e2f | |||
f37ef25bdd | |||
b93bbab454 | |||
9cebc837d5 | |||
1bc1d81a00 | |||
dda72456d7 | |||
8f2f3dd710 | |||
85959a99ea | |||
36700a0a87 | |||
dd4b42bac1 | |||
9626c28619 | |||
463c03462a | |||
a086427a7d | |||
4d431383d3 | |||
d10332a15d | |||
43772efc6e | |||
0af2da0437 | |||
d09db6c2e9 | |||
bc871bd19d | |||
b11a6a029d | |||
6a7be83efe | |||
58169da46a | |||
158f49e246 | |||
3e4a67f350 | |||
e0e5b4426a | |||
7158b304f5 | |||
833eca6d2f | |||
151acf5d96 | |||
4a363fb4a7 | |||
229adeb746 | |||
1eff9a1e89 | |||
ed4f0a0edc | |||
13bed6226e | |||
d937daedb3 | |||
8cce51135c | |||
0cfe1b3f13 | |||
05c16a6e59 | |||
3294b516d3 | |||
139bcedc53 | |||
cf9ea3c4c7 | |||
e84fde3e14 | |||
1de47507ff | |||
1a9948a488 |
12
Cargo.toml
12
Cargo.toml
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.8.15"
|
||||
version = "0.9.1"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -18,7 +18,6 @@ apt-pkg-native = "0.3.1" # custom patched version
|
||||
base64 = "0.12"
|
||||
bitflags = "1.2.1"
|
||||
bytes = "0.5"
|
||||
chrono = "0.4" # Date and time library for Rust
|
||||
crc32fast = "1"
|
||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||
anyhow = "1.0"
|
||||
@ -26,7 +25,7 @@ futures = "0.3"
|
||||
h2 = { version = "0.2", features = ["stream"] }
|
||||
handlebars = "3.0"
|
||||
http = "0.2"
|
||||
hyper = "0.13"
|
||||
hyper = "0.13.6"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
@ -39,11 +38,11 @@ pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.3.5", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
proxmox = { version = "0.4.3", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
pxar = { version = "0.6.0", features = [ "tokio-io", "futures-io" ] }
|
||||
pxar = { version = "0.6.1", features = [ "tokio-io", "futures-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "6"
|
||||
@ -62,6 +61,7 @@ walkdir = "2"
|
||||
xdg = "2.2"
|
||||
zstd = { version = "0.4", features = [ "bindgen" ] }
|
||||
nom = "5.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
20
README.rst
20
README.rst
@ -13,7 +13,7 @@ Versioning of proxmox helper crates
|
||||
|
||||
To use current git master code of the proxmox* helper crates, add::
|
||||
|
||||
git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox"
|
||||
git = "git://git.proxmox.com/git/proxmox"
|
||||
|
||||
or::
|
||||
|
||||
@ -22,6 +22,7 @@ or::
|
||||
to the proxmox dependency, and update the version to reflect the current,
|
||||
pre-release version number (e.g., "0.1.1-dev.1" instead of "0.1.0").
|
||||
|
||||
|
||||
Local cargo config
|
||||
==================
|
||||
|
||||
@ -35,3 +36,20 @@ checksums are not compatible.
|
||||
To reference new dependencies (or updated versions) that are not yet packaged,
|
||||
the dependency needs to point directly to a path or git source (e.g., see
|
||||
example for proxmox crate above).
|
||||
|
||||
|
||||
Build
|
||||
=====
|
||||
on Debian Buster
|
||||
|
||||
Setup:
|
||||
1. # echo 'deb http://download.proxmox.com/debian/devel/ buster main' >> /etc/apt/sources.list.d/proxmox-devel.list
|
||||
2. # sudo wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
3. # sudo apt update
|
||||
4. # sudo apt install devscripts debcargo clang
|
||||
5. # git clone git://git.proxmox.com/git/proxmox-backup.git
|
||||
6. # sudo mk-build-deps -ir
|
||||
|
||||
Note: 2. may be skipped if you already added the PVE or PBS package repository
|
||||
|
||||
You are now able to build using the Makefile or cargo itself.
|
||||
|
183
debian/changelog
vendored
183
debian/changelog
vendored
@ -1,3 +1,186 @@
|
||||
rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
||||
|
||||
* TLS speedups (use SslAcceptor::mozilla_intermediate_v5)
|
||||
|
||||
* introduction.rst: add History
|
||||
|
||||
* fix #2847: proxmox-backup-client: add change-owner cmd
|
||||
|
||||
* proxmox-backup-client key: rename 'paper-key' command to 'paperkey'
|
||||
|
||||
* don't require WorkerTask in backup/ (introduce TaskState trait)
|
||||
|
||||
* fix #3070: replace internal with public URLs
|
||||
|
||||
* backup: index readers: drop useless shared lock
|
||||
|
||||
* add "Build" section to README.rst
|
||||
|
||||
* reader: actually allow users to downlod their own backups
|
||||
|
||||
* reader: track index chunks and limit access
|
||||
|
||||
* Userid: fix borrow/deref recursion
|
||||
|
||||
* depend on proxmox 0.4.3
|
||||
|
||||
* api: datastore: require allocate privilege for deletion
|
||||
|
||||
* fuse_loop: handle unmap on crashed instance
|
||||
|
||||
* fuse_loop: wait for instance to close after killing
|
||||
|
||||
* fuse_loop: add automatic cleanup of run files and dangling instances
|
||||
|
||||
* mount/map: use names for map/unmap for easier use
|
||||
|
||||
* ui: network: remove create VLAN option
|
||||
|
||||
* ui: Dashboard/TaskSummary: add Verifies to the Summary
|
||||
|
||||
* ui: implment task history limit and make it configurable
|
||||
|
||||
* docs: installation: add system requirements section
|
||||
|
||||
* client: implement map/unmap commands for .img backups
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 14 Oct 2020 13:42:12 +0200
|
||||
|
||||
rust-proxmox-backup (0.9.0-2) unstable; urgency=medium
|
||||
|
||||
* ui: RemoteEdit: only send delete on update
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 02 Oct 2020 15:37:45 +0200
|
||||
|
||||
rust-proxmox-backup (0.9.0-1) unstable; urgency=medium
|
||||
|
||||
* use ParallelHandler to verify chunks
|
||||
|
||||
* client: add new paper-key command to CLI tool
|
||||
|
||||
* server: split task list in active and archived
|
||||
|
||||
* tools: add logrotate module and use it for archived tasks, allowing to save
|
||||
more than 100 thousands of tasks efficiently in the archive
|
||||
|
||||
* require square [brackets] for ipv6 addresses and fix ipv6 handling for
|
||||
remotes/sync jobs
|
||||
|
||||
* ui: RemoteEdit: make comment and fingerprint deletable
|
||||
|
||||
* api/disks: create zfs: enable import systemd service unit for newly created
|
||||
ZFS pools
|
||||
|
||||
* client and remotes: add support to specify a custom port number. The server
|
||||
is still always listening on 8007, but you can now use things like reverse
|
||||
proxies or port mapping.
|
||||
|
||||
* ui: RemoteEdit: allow to specify a port in the host field
|
||||
|
||||
* client pull: log progress
|
||||
|
||||
* various fixes and improvements
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 01 Oct 2020 16:19:40 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.21-1) unstable; urgency=medium
|
||||
|
||||
* depend on crossbeam-channel
|
||||
|
||||
* speedup sync jobs (allow up to 4 worker threads)
|
||||
|
||||
* improve docs
|
||||
|
||||
* use jobstate mechanism for verify/garbage_collection schedules
|
||||
|
||||
* proxy: fix error handling in prune scheduling
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 25 Sep 2020 13:20:19 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.20-1) unstable; urgency=medium
|
||||
|
||||
* improve sync speed
|
||||
|
||||
* benchmark: use compressable data to get more realistic result
|
||||
|
||||
* docs: add onlineHelp to some panels
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 24 Sep 2020 13:15:45 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.19-1) unstable; urgency=medium
|
||||
|
||||
* src/api2/reader.rs: use std::fs::read instead of tokio::fs::read
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 22 Sep 2020 13:30:27 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.18-1) unstable; urgency=medium
|
||||
|
||||
* src/client/pull.rs: allow up to 20 concurrent download streams
|
||||
|
||||
* docs: add version and date to HTML index
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 22 Sep 2020 12:39:26 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.17-1) unstable; urgency=medium
|
||||
|
||||
* src/client/pull.rs: open temporary manifest with truncate(true)
|
||||
|
||||
* depend on proxmox 0.4.1
|
||||
|
||||
* fix #3017: check array boundaries before using
|
||||
|
||||
* datastore/prune schedules: use JobState for tracking of schedules
|
||||
|
||||
* improve docs
|
||||
|
||||
* fix #3015: allow user self-service
|
||||
|
||||
* add verification scheduling to proxmox-backup-proxy
|
||||
|
||||
* fix #3014: allow DataStoreAdmins to list DS config
|
||||
|
||||
* depend on pxar 0.6.1
|
||||
|
||||
* fix #2942: implement lacp bond mode and bond_xmit_hash_policy
|
||||
|
||||
* api2/pull: make pull worker abortable
|
||||
|
||||
* fix #2870: renew tickets in HttpClient
|
||||
|
||||
* always allow retrieving (censored) subscription info
|
||||
|
||||
* fix #2957: allow Sys.Audit access to node RRD
|
||||
|
||||
* backup: check all referenced chunks actually exist
|
||||
|
||||
* backup: check verify state of previous backup before allowing reuse
|
||||
|
||||
* avoid chrono dependency
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 21 Sep 2020 14:08:32 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.16-1) unstable; urgency=medium
|
||||
|
||||
* BackupDir: make constructor fallible
|
||||
|
||||
* handle invalid mtime when formating entries
|
||||
|
||||
* ui/docs: add onlineHelp button for syncjobs
|
||||
|
||||
* docs: add section for calendar events
|
||||
|
||||
* tools/systemd/parse_time: enable */x syntax for calendar events
|
||||
|
||||
* docs: set html img width limitation through css
|
||||
|
||||
* docs: use alabaster theme
|
||||
|
||||
* server: set http2 max frame size
|
||||
|
||||
* doc: Add section "FAQ"
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 11 Sep 2020 15:54:57 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.15-1) unstable; urgency=medium
|
||||
|
||||
* verify: skip benchmark directory
|
||||
|
25
debian/control
vendored
25
debian/control
vendored
@ -11,8 +11,8 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-base64-0.12+default-dev,
|
||||
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||
librust-bytes-0.5+default-dev,
|
||||
librust-chrono-0.4+default-dev,
|
||||
librust-crc32fast-1+default-dev,
|
||||
librust-crossbeam-channel-0.4+default-dev,
|
||||
librust-endian-trait-0.6+arrays-dev,
|
||||
librust-endian-trait-0.6+default-dev,
|
||||
librust-futures-0.3+default-dev,
|
||||
@ -20,7 +20,7 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-h2-0.2+stream-dev,
|
||||
librust-handlebars-3+default-dev,
|
||||
librust-http-0.2+default-dev,
|
||||
librust-hyper-0.13+default-dev,
|
||||
librust-hyper-0.13+default-dev (>= 0.13.6-~~),
|
||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||
librust-libc-0.2+default-dev,
|
||||
librust-log-0.4+default-dev,
|
||||
@ -34,14 +34,14 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-utils-0.1+default-dev,
|
||||
librust-proxmox-0.3+api-macro-dev (>= 0.3.5-~~),
|
||||
librust-proxmox-0.3+default-dev (>= 0.3.5-~~),
|
||||
librust-proxmox-0.3+sortable-macro-dev (>= 0.3.5-~~),
|
||||
librust-proxmox-0.3+websocket-dev (>= 0.3.5-~~),
|
||||
librust-proxmox-0.4+api-macro-dev (>= 0.4.3-~~),
|
||||
librust-proxmox-0.4+default-dev (>= 0.4.3-~~),
|
||||
librust-proxmox-0.4+sortable-macro-dev (>= 0.4.3-~~),
|
||||
librust-proxmox-0.4+websocket-dev (>= 0.4.3-~~),
|
||||
librust-proxmox-fuse-0.1+default-dev,
|
||||
librust-pxar-0.6+default-dev,
|
||||
librust-pxar-0.6+futures-io-dev,
|
||||
librust-pxar-0.6+tokio-io-dev,
|
||||
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
|
||||
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
|
||||
librust-pxar-0.6+tokio-io-dev (>= 0.6.1-~~),
|
||||
librust-regex-1+default-dev (>= 1.2-~~),
|
||||
librust-rustyline-6+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
@ -78,6 +78,7 @@ Build-Depends: debhelper (>= 11),
|
||||
uuid-dev,
|
||||
debhelper (>= 12~),
|
||||
bash-completion,
|
||||
pve-eslint,
|
||||
python3-docutils,
|
||||
python3-pygments,
|
||||
rsync,
|
||||
@ -106,7 +107,7 @@ Depends: fonts-font-awesome,
|
||||
pbs-i18n,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
proxmox-widget-toolkit (>= 2.3-1),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
smartmontools,
|
||||
${misc:Depends},
|
||||
@ -118,7 +119,9 @@ Description: Proxmox Backup Server daemon with tools and GUI
|
||||
|
||||
Package: proxmox-backup-client
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
||||
Depends: qrencode,
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
Description: Proxmox Backup Client tools
|
||||
This package contains the Proxmox Backup client, which provides a
|
||||
simple command line tool to create and restore backups.
|
||||
|
6
debian/control.in
vendored
6
debian/control.in
vendored
@ -7,7 +7,7 @@ Depends: fonts-font-awesome,
|
||||
pbs-i18n,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
proxmox-widget-toolkit (>= 2.3-1),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
smartmontools,
|
||||
${misc:Depends},
|
||||
@ -19,7 +19,9 @@ Description: Proxmox Backup Server daemon with tools and GUI
|
||||
|
||||
Package: proxmox-backup-client
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
||||
Depends: qrencode,
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
Description: Proxmox Backup Client tools
|
||||
This package contains the Proxmox Backup client, which provides a
|
||||
simple command line tool to create and restore backups.
|
||||
|
1
debian/debcargo.toml
vendored
1
debian/debcargo.toml
vendored
@ -14,6 +14,7 @@ section = "admin"
|
||||
build_depends = [
|
||||
"debhelper (>= 12~)",
|
||||
"bash-completion",
|
||||
"pve-eslint",
|
||||
"python3-docutils",
|
||||
"python3-pygments",
|
||||
"rsync",
|
||||
|
@ -74,8 +74,10 @@ onlinehelpinfo:
|
||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||
|
||||
.PHONY: html
|
||||
html: ${GENERATED_SYNOPSIS}
|
||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||
cp custom.css $(BUILDDIR)/html/_static/
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
|
@ -44,12 +44,13 @@ def scan_extjs_files(wwwdir="../www"): # a bit rough i know, but we can optimize
|
||||
js_files.append(os.path.join(root, filename))
|
||||
for js_file in js_files:
|
||||
fd = open(js_file).read()
|
||||
match = re.search("onlineHelp:\s*[\'\"](.*?)[\'\"]", fd) # match object is tuple
|
||||
if match:
|
||||
anchor = match.groups()[0]
|
||||
allmatch = re.findall("onlineHelp:\s*[\'\"](.*?)[\'\"]", fd, re.M)
|
||||
for match in allmatch:
|
||||
anchor = match
|
||||
anchor = re.sub('_', '-', anchor) # normalize labels
|
||||
logger.info("found onlineHelp: {} in {}".format(anchor, js_file))
|
||||
used_anchors.append(anchor)
|
||||
|
||||
return used_anchors
|
||||
|
||||
|
||||
|
11
docs/_templates/index-sidebar.html
vendored
Normal file
11
docs/_templates/index-sidebar.html
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
<h3>Navigation</h3>
|
||||
{{ toctree(includehidden=theme_sidebar_includehidden, collapse=True, titles_only=True) }}
|
||||
{% if theme_extra_nav_links %}
|
||||
<hr />
|
||||
<h3>Links</h3>
|
||||
<ul>
|
||||
{% for text, uri in theme_extra_nav_links.items() %}
|
||||
<li class="toctree-l1"><a href="{{ uri }}">{{ text }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
7
docs/_templates/sidebar-header.html
vendored
Normal file
7
docs/_templates/sidebar-header.html
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
<p class="logo">
|
||||
<a href="index.html">
|
||||
<img class="logo" src="_static/proxmox-logo.svg" alt="Logo">
|
||||
</a>
|
||||
</p>
|
||||
<h1 class="logo logo-name"><a href="index.html">Proxmox Backup</a></h1>
|
||||
<hr style="width:100%;">
|
File diff suppressed because it is too large
Load Diff
712
docs/backup-client.rst
Normal file
712
docs/backup-client.rst
Normal file
@ -0,0 +1,712 @@
|
||||
Backup Client Usage
|
||||
===================
|
||||
|
||||
The command line client is called :command:`proxmox-backup-client`.
|
||||
|
||||
|
||||
Repository Locations
|
||||
--------------------
|
||||
|
||||
The client uses the following notation to specify a datastore repository
|
||||
on the backup server.
|
||||
|
||||
[[username@]server[:port]:]datastore
|
||||
|
||||
The default value for ``username`` is ``root@pam``. If no server is specified,
|
||||
the default is the local host (``localhost``).
|
||||
|
||||
You can specify a port if your backup server is only reachable on a different
|
||||
port (e.g. with NAT and port forwarding).
|
||||
|
||||
Note that if the server is an IPv6 address, you have to write it with
|
||||
square brackets (e.g. [fe80::01]).
|
||||
|
||||
You can pass the repository with the ``--repository`` command
|
||||
line option, or by setting the ``PBS_REPOSITORY`` environment
|
||||
variable.
|
||||
|
||||
Here some examples of valid repositories and the real values
|
||||
|
||||
================================ ============ ================== ===========
|
||||
Example User Host:Port Datastore
|
||||
================================ ============ ================== ===========
|
||||
mydatastore ``root@pam`` localhost:8007 mydatastore
|
||||
myhostname:mydatastore ``root@pam`` myhostname:8007 mydatastore
|
||||
user@pbs@myhostname:mydatastore ``user@pbs`` myhostname:8007 mydatastore
|
||||
192.168.55.55:1234:mydatastore ``root@pam`` 192.168.55.55:1234 mydatastore
|
||||
[ff80::51]:mydatastore ``root@pam`` [ff80::51]:8007 mydatastore
|
||||
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
|
||||
================================ ============ ================== ===========
|
||||
|
||||
Environment Variables
|
||||
---------------------
|
||||
|
||||
``PBS_REPOSITORY``
|
||||
The default backup repository.
|
||||
|
||||
``PBS_PASSWORD``
|
||||
When set, this value is used for the password required for the
|
||||
backup server.
|
||||
|
||||
``PBS_ENCRYPTION_PASSWORD``
|
||||
When set, this value is used to access the secret encryption key (if
|
||||
protected by password).
|
||||
|
||||
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
||||
certificate (only used if the system CA certificates cannot
|
||||
validate the certificate).
|
||||
|
||||
|
||||
Output Format
|
||||
-------------
|
||||
|
||||
Most commands support the ``--output-format`` parameter. It accepts
|
||||
the following values:
|
||||
|
||||
:``text``: Text format (default). Structured data is rendered as a table.
|
||||
|
||||
:``json``: JSON (single line).
|
||||
|
||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||
|
||||
|
||||
Please use the following environment variables to modify output behavior:
|
||||
|
||||
``PROXMOX_OUTPUT_FORMAT``
|
||||
Defines the default output format.
|
||||
|
||||
``PROXMOX_OUTPUT_NO_BORDER``
|
||||
If set (to any value), do not render table borders.
|
||||
|
||||
``PROXMOX_OUTPUT_NO_HEADER``
|
||||
If set (to any value), do not render table headers.
|
||||
|
||||
.. note:: The ``text`` format is designed to be human readable, and
|
||||
not meant to be parsed by automation tools. Please use the ``json``
|
||||
format if you need to process the output.
|
||||
|
||||
|
||||
.. _creating-backups:
|
||||
|
||||
Creating Backups
|
||||
----------------
|
||||
|
||||
This section explains how to create a backup from within the machine. This can
|
||||
be a physical host, a virtual machine, or a container. Such backups may contain file
|
||||
and image archives. There are no restrictions in this case.
|
||||
|
||||
.. note:: If you want to backup virtual machines or containers on Proxmox VE, see :ref:`pve-integration`.
|
||||
|
||||
For the following example you need to have a backup server set up, working
|
||||
credentials and need to know the repository name.
|
||||
In the following examples we use ``backup-server:store1``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup root.pxar:/ --repository backup-server:store1
|
||||
Starting backup: host/elsa/2019-12-03T09:35:01Z
|
||||
Client name: elsa
|
||||
skip mount point: "/boot/efi"
|
||||
skip mount point: "/dev"
|
||||
skip mount point: "/run"
|
||||
skip mount point: "/sys"
|
||||
Uploaded 12129 chunks in 87 seconds (564 MB/s).
|
||||
End Time: 2019-12-03T10:36:29+01:00
|
||||
|
||||
This will prompt you for a password and then uploads a file archive named
|
||||
``root.pxar`` containing all the files in the ``/`` directory.
|
||||
|
||||
.. Caution:: Please note that the proxmox-backup-client does not
|
||||
automatically include mount points. Instead, you will see a short
|
||||
``skip mount point`` notice for each of them. The idea is to
|
||||
create a separate file archive for each mounted disk. You can
|
||||
explicitly include them using the ``--include-dev`` option
|
||||
(i.e. ``--include-dev /boot/efi``). You can use this option
|
||||
multiple times for each mount point that should be included.
|
||||
|
||||
The ``--repository`` option can get quite long and is used by all
|
||||
commands. You can avoid having to enter this value by setting the
|
||||
environment variable ``PBS_REPOSITORY``. Note that if you would like this to remain set
|
||||
over multiple sessions, you should instead add the below line to your
|
||||
``.bashrc`` file.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# export PBS_REPOSITORY=backup-server:store1
|
||||
|
||||
After this you can execute all commands without specifying the ``--repository``
|
||||
option.
|
||||
|
||||
One single backup is allowed to contain more than one archive. For example, if
|
||||
you want to backup two disks mounted at ``/mnt/disk1`` and ``/mnt/disk2``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup disk1.pxar:/mnt/disk1 disk2.pxar:/mnt/disk2
|
||||
|
||||
This creates a backup of both disks.
|
||||
|
||||
The backup command takes a list of backup specifications, which
|
||||
include the archive name on the server, the type of the archive, and the
|
||||
archive source at the client. The format is:
|
||||
|
||||
<archive-name>.<type>:<source-path>
|
||||
|
||||
Common types are ``.pxar`` for file archives, and ``.img`` for block
|
||||
device images. To create a backup of a block device run the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup mydata.img:/dev/mylvm/mydata
|
||||
|
||||
|
||||
Excluding files/folders from a backup
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Sometimes it is desired to exclude certain files or folders from a backup archive.
|
||||
To tell the Proxmox Backup client when and how to ignore files and directories,
|
||||
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
|
||||
Whenever the backup client encounters such a file in a directory, it interprets
|
||||
each line as glob match patterns for files and directories that are to be excluded
|
||||
from the backup.
|
||||
|
||||
The file must contain a single glob pattern per line. Empty lines are ignored.
|
||||
The same is true for lines starting with ``#``, which indicates a comment.
|
||||
A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
|
||||
to an explicit inclusion. This makes it possible to exclude all entries in a
|
||||
directory except for a few single files/subdirectories.
|
||||
Lines ending in ``/`` match only on directories.
|
||||
The directory containing the ``.pxarexclude`` file is considered to be the root of
|
||||
the given patterns. It is only possible to match files in this directory and its subdirectories.
|
||||
|
||||
``\`` is used to escape special glob characters.
|
||||
``?`` matches any single character.
|
||||
``*`` matches any character, including an empty string.
|
||||
``**`` is used to match subdirectories. It can be used to, for example, exclude
|
||||
all files ending in ``.tmp`` within the directory or subdirectories with the
|
||||
following pattern ``**/*.tmp``.
|
||||
``[...]`` matches a single character from any of the provided characters within
|
||||
the brackets. ``[!...]`` does the complementary and matches any single character
|
||||
not contained within the brackets. It is also possible to specify ranges with two
|
||||
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
||||
alphabetic character and ``[0-9]`` matches any one single digit.
|
||||
|
||||
The order of the glob match patterns defines whether a file is included or
|
||||
excluded, that is to say later entries override previous ones.
|
||||
This is also true for match patterns encountered deeper down the directory tree,
|
||||
which can override a previous exclusion.
|
||||
Be aware that excluded directories will **not** be read by the backup client.
|
||||
Thus, a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
||||
``.pxarexclude`` files are treated as regular files and will be included in the
|
||||
backup archive.
|
||||
|
||||
For example, consider the following directory structure:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# ls -aR folder
|
||||
folder/:
|
||||
. .. .pxarexclude subfolder0 subfolder1
|
||||
|
||||
folder/subfolder0:
|
||||
. .. file0 file1 file2 file3 .pxarexclude
|
||||
|
||||
folder/subfolder1:
|
||||
. .. file0 file1 file2 file3
|
||||
|
||||
The different ``.pxarexclude`` files contain the following:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# cat folder/.pxarexclude
|
||||
/subfolder0/file1
|
||||
/subfolder1/*
|
||||
!/subfolder1/file2
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# cat folder/subfolder0/.pxarexclude
|
||||
file3
|
||||
|
||||
This would exclude ``file1`` and ``file3`` in ``subfolder0`` and all of
|
||||
``subfolder1`` except ``file2``.
|
||||
|
||||
Restoring this backup will result in:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
ls -aR restored
|
||||
restored/:
|
||||
. .. .pxarexclude subfolder0 subfolder1
|
||||
|
||||
restored/subfolder0:
|
||||
. .. file0 file2 .pxarexclude
|
||||
|
||||
restored/subfolder1:
|
||||
. .. file2
|
||||
|
||||
|
||||
Encryption
|
||||
----------
|
||||
|
||||
Proxmox Backup supports client-side encryption with AES-256 in GCM_
|
||||
mode. To set this up, you first need to create an encryption key:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client key create my-backup.key
|
||||
Encryption Key Password: **************
|
||||
|
||||
The key is password protected by default. If you do not need this
|
||||
extra protection, you can also create it without a password:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client key create /path/to/my-backup.key --kdf none
|
||||
|
||||
Having created this key, it is now possible to create an encrypted backup, by
|
||||
passing the ``--keyfile`` parameter, with the path to the key file.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup etc.pxar:/etc --keyfile /path/to/my-backup.key
|
||||
Password: *********
|
||||
Encryption Key Password: **************
|
||||
...
|
||||
|
||||
.. Note:: If you do not specify the name of the backup key, the key will be
|
||||
created in the default location
|
||||
``~/.config/proxmox-backup/encryption-key.json``. ``proxmox-backup-client``
|
||||
will also search this location by default, in case the ``--keyfile``
|
||||
parameter is not specified.
|
||||
|
||||
You can avoid entering the passwords by setting the environment
|
||||
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
||||
|
||||
|
||||
Using a master key to store and recover encryption keys
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can also use ``proxmox-backup-client key`` to create an RSA public/private
|
||||
key pair, which can be used to store an encrypted version of the symmetric
|
||||
backup encryption key alongside each backup and recover it later.
|
||||
|
||||
To set up a master key:
|
||||
|
||||
1. Create an encryption key for the backup:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client key create
|
||||
creating default key at: "~/.config/proxmox-backup/encryption-key.json"
|
||||
Encryption Key Password: **********
|
||||
...
|
||||
|
||||
The resulting file will be saved to ``~/.config/proxmox-backup/encryption-key.json``.
|
||||
|
||||
2. Create an RSA public/private key pair:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client key create-master-key
|
||||
Master Key Password: *********
|
||||
...
|
||||
|
||||
This will create two files in your current directory, ``master-public.pem``
|
||||
and ``master-private.pem``.
|
||||
|
||||
3. Import the newly created ``master-public.pem`` public certificate, so that
|
||||
``proxmox-backup-client`` can find and use it upon backup.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client key import-master-pubkey /path/to/master-public.pem
|
||||
Imported public master key to "~/.config/proxmox-backup/master-public.pem"
|
||||
|
||||
4. With all these files in place, run a backup job:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup etc.pxar:/etc
|
||||
|
||||
The key will be stored in your backup, under the name ``rsa-encrypted.key``.
|
||||
|
||||
.. Note:: The ``--keyfile`` parameter can be excluded, if the encryption key
|
||||
is in the default path. If you specified another path upon creation, you
|
||||
must pass the ``--keyfile`` parameter.
|
||||
|
||||
5. To test that everything worked, you can restore the key from the backup:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client restore /path/to/backup/ rsa-encrypted.key /path/to/target
|
||||
|
||||
.. Note:: You should not need an encryption key to extract this file. However, if
|
||||
a key exists at the default location
|
||||
(``~/.config/proxmox-backup/encryption-key.json``) the program will prompt
|
||||
you for an encryption key password. Simply moving ``encryption-key.json``
|
||||
out of this directory will fix this issue.
|
||||
|
||||
6. Then, use the previously generated master key to decrypt the file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out /path/to/target
|
||||
Enter pass phrase for ./master-private.pem: *********
|
||||
|
||||
7. The target file will now contain the encryption key information in plain
|
||||
text. The success of this can be confirmed by passing the resulting ``json``
|
||||
file, with the ``--keyfile`` parameter, when decrypting files from the backup.
|
||||
|
||||
.. warning:: Without their key, backed up files will be inaccessible. Thus, you should
|
||||
keep keys ordered and in a place that is separate from the contents being
|
||||
backed up. It can happen, for example, that you back up an entire system, using
|
||||
a key on that system. If the system then becomes inaccessible for any reason
|
||||
and needs to be restored, this will not be possible as the encryption key will be
|
||||
lost along with the broken system. In preparation for the worst case scenario,
|
||||
you should consider keeping a paper copy of this key locked away in
|
||||
a safe place.
|
||||
|
||||
|
||||
Restoring Data
|
||||
--------------
|
||||
|
||||
The regular creation of backups is a necessary step to avoiding data
|
||||
loss. More importantly, however, is the restoration. It is good practice to perform
|
||||
periodic recovery tests to ensure that you can access the data in
|
||||
case of problems.
|
||||
|
||||
First, you need to find the snapshot which you want to restore. The snapshot
|
||||
command provides a list of all the snapshots on the server:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client snapshots
|
||||
┌────────────────────────────────┬─────────────┬────────────────────────────────────┐
|
||||
│ snapshot │ size │ files │
|
||||
╞════════════════════════════════╪═════════════╪════════════════════════════════════╡
|
||||
│ host/elsa/2019-12-03T09:30:15Z │ 51788646825 │ root.pxar catalog.pcat1 index.json │
|
||||
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
|
||||
│ host/elsa/2019-12-03T09:35:01Z │ 51790622048 │ root.pxar catalog.pcat1 index.json │
|
||||
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
|
||||
...
|
||||
|
||||
You can inspect the catalog to find specific files.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client catalog dump host/elsa/2019-12-03T09:35:01Z
|
||||
...
|
||||
d "./root.pxar.didx/etc/cifs-utils"
|
||||
l "./root.pxar.didx/etc/cifs-utils/idmap-plugin"
|
||||
d "./root.pxar.didx/etc/console-setup"
|
||||
...
|
||||
|
||||
The restore command lets you restore a single archive from the
|
||||
backup.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
|
||||
|
||||
To get the contents of any archive, you can restore the ``index.json`` file in the
|
||||
repository to the target path '-'. This will dump the contents to the standard output.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z index.json -
|
||||
|
||||
|
||||
Interactive Restores
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If you only want to restore a few individual files, it is often easier
|
||||
to use the interactive recovery shell.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client catalog shell host/elsa/2019-12-03T09:35:01Z root.pxar
|
||||
Starting interactive shell
|
||||
pxar:/ > ls
|
||||
bin boot dev etc home lib lib32
|
||||
...
|
||||
|
||||
The interactive recovery shell is a minimal command line interface that
|
||||
utilizes the metadata stored in the catalog to quickly list, navigate and
|
||||
search files in a file archive.
|
||||
To restore files, you can select them individually or match them with a glob
|
||||
pattern.
|
||||
|
||||
Using the catalog for navigation reduces the overhead considerably because only
|
||||
the catalog needs to be downloaded and, optionally, decrypted.
|
||||
The actual chunks are only accessed if the metadata in the catalog is not enough
|
||||
or for the actual restore.
|
||||
|
||||
Similar to common UNIX shells ``cd`` and ``ls`` are the commands used to change
|
||||
working directory and list directory contents in the archive.
|
||||
``pwd`` shows the full path of the current working directory with respect to the
|
||||
archive root.
|
||||
|
||||
Being able to quickly search the contents of the archive is a commonly needed feature.
|
||||
That's where the catalog is most valuable.
|
||||
For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
pxar:/ > find etc/**/*.txt --select
|
||||
"/etc/X11/rgb.txt"
|
||||
pxar:/ > list-selected
|
||||
etc/**/*.txt
|
||||
pxar:/ > restore-selected /target/path
|
||||
...
|
||||
|
||||
This will find and print all files ending in ``.txt`` located in ``etc/`` or a
|
||||
subdirectory and add the corresponding pattern to the list for subsequent restores.
|
||||
``list-selected`` shows these patterns and ``restore-selected`` finally restores
|
||||
all files in the archive matching the patterns to ``/target/path`` on the local
|
||||
host. This will scan the whole archive.
|
||||
|
||||
With ``restore /target/path`` you can restore the sub-archive given by the current
|
||||
working directory to the local target path ``/target/path`` on your host.
|
||||
By additionally passing a glob pattern with ``--pattern <glob>``, the restore is
|
||||
further limited to files matching the pattern.
|
||||
For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
pxar:/ > cd /etc/
|
||||
pxar:/etc/ > restore /target/ --pattern **/*.conf
|
||||
...
|
||||
|
||||
The above will scan trough all the directories below ``/etc`` and restore all
|
||||
files ending in ``.conf``.
|
||||
|
||||
.. todo:: Explain interactive restore in more detail
|
||||
|
||||
Mounting of Archives via FUSE
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The :term:`FUSE` implementation for the pxar archive allows you to mount a
|
||||
file archive as a read-only filesystem to a mountpoint on your host.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client mount host/backup-client/2020-01-29T11:29:22Z root.pxar /mnt/mountpoint
|
||||
# ls /mnt/mountpoint
|
||||
bin dev home lib32 libx32 media opt root sbin sys usr
|
||||
boot etc lib lib64 lost+found mnt proc run srv tmp var
|
||||
|
||||
This allows you to access the full contents of the archive in a seamless manner.
|
||||
|
||||
.. note:: As the FUSE connection needs to fetch and decrypt chunks from the
|
||||
backup server's datastore, this can cause some additional network and CPU
|
||||
load on your host, depending on the operations you perform on the mounted
|
||||
filesystem.
|
||||
|
||||
To unmount the filesystem use the ``umount`` command on the mountpoint:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# umount /mnt/mountpoint
|
||||
|
||||
Login and Logout
|
||||
----------------
|
||||
|
||||
The client tool prompts you to enter the logon password as soon as you
|
||||
want to access the backup server. The server checks your credentials
|
||||
and responds with a ticket that is valid for two hours. The client
|
||||
tool automatically stores that ticket and uses it for further requests
|
||||
to this server.
|
||||
|
||||
You can also manually trigger this login/logout using the login and
|
||||
logout commands:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client login
|
||||
Password: **********
|
||||
|
||||
To remove the ticket, issue a logout:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client logout
|
||||
|
||||
|
||||
.. _backup-pruning:
|
||||
|
||||
Pruning and Removing Backups
|
||||
----------------------------
|
||||
|
||||
You can manually delete a backup snapshot using the ``forget``
|
||||
command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client forget <snapshot>
|
||||
|
||||
|
||||
.. caution:: This command removes all archives in this backup
|
||||
snapshot. They will be inaccessible and unrecoverable.
|
||||
|
||||
|
||||
Although manual removal is sometimes required, the ``prune``
|
||||
command is normally used to systematically delete older backups. Prune lets
|
||||
you specify which backup snapshots you want to keep. The
|
||||
following retention options are available:
|
||||
|
||||
``--keep-last <N>``
|
||||
Keep the last ``<N>`` backup snapshots.
|
||||
|
||||
``--keep-hourly <N>``
|
||||
Keep backups for the last ``<N>`` hours. If there is more than one
|
||||
backup for a single hour, only the latest is kept.
|
||||
|
||||
``--keep-daily <N>``
|
||||
Keep backups for the last ``<N>`` days. If there is more than one
|
||||
backup for a single day, only the latest is kept.
|
||||
|
||||
``--keep-weekly <N>``
|
||||
Keep backups for the last ``<N>`` weeks. If there is more than one
|
||||
backup for a single week, only the latest is kept.
|
||||
|
||||
.. note:: Weeks start on Monday and end on Sunday. The software
|
||||
uses the `ISO week date`_ system and handles weeks at
|
||||
the end of the year correctly.
|
||||
|
||||
``--keep-monthly <N>``
|
||||
Keep backups for the last ``<N>`` months. If there is more than one
|
||||
backup for a single month, only the latest is kept.
|
||||
|
||||
``--keep-yearly <N>``
|
||||
Keep backups for the last ``<N>`` years. If there is more than one
|
||||
backup for a single year, only the latest is kept.
|
||||
|
||||
The retention options are processed in the order given above. Each option
|
||||
only covers backups within its time period. The next option does not take care
|
||||
of already covered backups. It will only consider older backups.
|
||||
|
||||
Unfinished and incomplete backups will be removed by the prune command unless
|
||||
they are newer than the last successful backup. In this case, the last failed
|
||||
backup is retained.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client prune <group> --keep-daily 7 --keep-weekly 4 --keep-monthly 3
|
||||
|
||||
|
||||
You can use the ``--dry-run`` option to test your settings. This only
|
||||
shows the list of existing snapshots and what actions prune would take.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client prune host/elsa --dry-run --keep-daily 1 --keep-weekly 3
|
||||
┌────────────────────────────────┬──────┐
|
||||
│ snapshot │ keep │
|
||||
╞════════════════════════════════╪══════╡
|
||||
│ host/elsa/2019-12-04T13:20:37Z │ 1 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-12-03T09:35:01Z │ 0 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-11-22T11:54:47Z │ 1 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-11-21T12:36:25Z │ 0 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-11-10T10:42:20Z │ 1 │
|
||||
└────────────────────────────────┴──────┘
|
||||
|
||||
.. note:: Neither the ``prune`` command nor the ``forget`` command free space
|
||||
in the chunk-store. The chunk-store still contains the data blocks. To free
|
||||
space you need to perform :ref:`garbage-collection`.
|
||||
|
||||
|
||||
.. _garbage-collection:
|
||||
|
||||
Garbage Collection
|
||||
------------------
|
||||
|
||||
The ``prune`` command removes only the backup index files, not the data
|
||||
from the datastore. This task is left to the garbage collection
|
||||
command. It is recommended to carry out garbage collection on a regular basis.
|
||||
|
||||
The garbage collection works in two phases. In the first phase, all
|
||||
data blocks that are still in use are marked. In the second phase,
|
||||
unused data blocks are removed.
|
||||
|
||||
.. note:: This command needs to read all existing backup index files
|
||||
and touches the complete chunk-store. This can take a long time
|
||||
depending on the number of chunks and the speed of the underlying
|
||||
disks.
|
||||
|
||||
.. note:: The garbage collection will only remove chunks that haven't been used
|
||||
for at least one day (exactly 24h 5m). This grace period is necessary because
|
||||
chunks in use are marked by touching the chunk which updates the ``atime``
|
||||
(access time) property. Filesystems are mounted with the ``relatime`` option
|
||||
by default. This results in a better performance by only updating the
|
||||
``atime`` property if the last access has been at least 24 hours ago. The
|
||||
downside is, that touching a chunk within these 24 hours will not always
|
||||
update its ``atime`` property.
|
||||
|
||||
Chunks in the grace period will be logged at the end of the garbage
|
||||
collection task as *Pending removals*.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client garbage-collect
|
||||
starting garbage collection on store store2
|
||||
Start GC phase1 (mark used chunks)
|
||||
Start GC phase2 (sweep unused chunks)
|
||||
percentage done: 1, chunk count: 219
|
||||
percentage done: 2, chunk count: 453
|
||||
...
|
||||
percentage done: 99, chunk count: 21188
|
||||
Removed bytes: 411368505
|
||||
Removed chunks: 203
|
||||
Original data bytes: 327160886391
|
||||
Disk bytes: 52767414743 (16 %)
|
||||
Disk chunks: 21221
|
||||
Average chunk size: 2486565
|
||||
TASK OK
|
||||
|
||||
|
||||
.. todo:: howto run garbage-collection at regular intervals (cron)
|
||||
|
||||
Benchmarking
|
||||
------------
|
||||
|
||||
The backup client also comes with a benchmarking tool. This tool measures
|
||||
various metrics relating to compression and encryption speeds. You can run a
|
||||
benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client benchmark
|
||||
Uploaded 656 chunks in 5 seconds.
|
||||
Time per request: 7659 microseconds.
|
||||
TLS speed: 547.60 MB/s
|
||||
SHA256 speed: 585.76 MB/s
|
||||
Compression speed: 1923.96 MB/s
|
||||
Decompress speed: 7885.24 MB/s
|
||||
AES256/GCM speed: 3974.03 MB/s
|
||||
┌───────────────────────────────────┬─────────────────────┐
|
||||
│ Name │ Value │
|
||||
╞═══════════════════════════════════╪═════════════════════╡
|
||||
│ TLS (maximal backup upload speed) │ 547.60 MB/s (93%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ SHA256 checksum computation speed │ 585.76 MB/s (28%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ ZStd level 1 compression speed │ 1923.96 MB/s (89%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ ZStd level 1 decompression speed │ 7885.24 MB/s (98%) │
|
||||
├───────────────────────────────────┼─────────────────────┤
|
||||
│ AES256 GCM encryption speed │ 3974.03 MB/s (104%) │
|
||||
└───────────────────────────────────┴─────────────────────┘
|
||||
|
||||
.. note:: The percentages given in the output table correspond to a
|
||||
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
||||
local host, so there is no network involved.
|
||||
|
||||
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||
rather than the default table format.
|
||||
|
||||
|
100
docs/calendarevents.rst
Normal file
100
docs/calendarevents.rst
Normal file
@ -0,0 +1,100 @@
|
||||
|
||||
.. _calendar-events:
|
||||
|
||||
Calendar Events
|
||||
===============
|
||||
|
||||
Introduction and Format
|
||||
-----------------------
|
||||
|
||||
Certain tasks, for example pruning and garbage collection, need to be
|
||||
performed on a regular basis. Proxmox Backup Server uses a format inspired
|
||||
by the systemd Time and Date Specification (see `systemd.time manpage`_)
|
||||
called `calendar events` for its schedules.
|
||||
|
||||
`Calendar events` are expressions to specify one or more points in time.
|
||||
They are mostly compatible with systemd's calendar events.
|
||||
|
||||
The general format is as follows:
|
||||
|
||||
.. code-block:: console
|
||||
:caption: Calendar event
|
||||
|
||||
[WEEKDAY] [[YEARS-]MONTHS-DAYS] [HOURS:MINUTES[:SECONDS]]
|
||||
|
||||
Note that there either has to be at least a weekday, date or time part.
|
||||
If the weekday or date part is omitted, all (week)days are included.
|
||||
If the time part is omitted, the time 00:00:00 is implied.
|
||||
(e.g. '2020-01-01' refers to '2020-01-01 00:00:00')
|
||||
|
||||
Weekdays are specified with the abbreviated English version:
|
||||
`mon, tue, wed, thu, fri, sat, sun`.
|
||||
|
||||
Each field can contain multiple values in the following formats:
|
||||
|
||||
* comma-separated: e.g., 01,02,03
|
||||
* as a range: e.g., 01..10
|
||||
* as a repetition: e.g, 05/10 (means starting at 5 every 10)
|
||||
* and a combination of the above: e.g., 01,05..10,12/02
|
||||
* or a `*` for every possible value: e.g., \*:00
|
||||
|
||||
There are some special values that have specific meaning:
|
||||
|
||||
================================= ==============================
|
||||
Value Syntax
|
||||
================================= ==============================
|
||||
`minutely` `*-*-* *:*:00`
|
||||
`hourly` `*-*-* *:00:00`
|
||||
`daily` `*-*-* 00:00:00`
|
||||
`weekly` `mon *-*-* 00:00:00`
|
||||
`monthly` `*-*-01 00:00:00`
|
||||
`yearly` or `annually` `*-01-01 00:00:00`
|
||||
`quarterly` `*-01,04,07,10-01 00:00:00`
|
||||
`semiannually` or `semi-annually` `*-01,07-01 00:00:00`
|
||||
================================= ==============================
|
||||
|
||||
|
||||
Here is a table with some useful examples:
|
||||
|
||||
======================== ============================= ===================================
|
||||
Example Alternative Explanation
|
||||
======================== ============================= ===================================
|
||||
`mon,tue,wed,thu,fri` `mon..fri` Every working day at 00:00
|
||||
`sat,sun` `sat..sun` Only on weekends at 00:00
|
||||
`mon,wed,fri` -- Monday, Wednesday, Friday at 00:00
|
||||
`12:05` -- Every day at 12:05 PM
|
||||
`*:00/5` `0/1:0/5` Every five minutes
|
||||
`mon..wed *:30/10` `mon,tue,wed *:30/10` Monday, Tuesday, Wednesday 30, 40 and 50 minutes after every full hour
|
||||
`mon..fri 8..17,22:0/15` -- Every working day every 15 minutes between 8 AM and 6 PM and between 10 PM and 11 PM
|
||||
`fri 12..13:5/20` `fri 12,13:5/20` Friday at 12:05, 12:25, 12:45, 13:05, 13:25 and 13:45
|
||||
`12,14,16,18,20,22:5` `12/2:5` Every day starting at 12:05 until 22:05, every 2 hours
|
||||
`*:*` `0/1:0/1` Every minute (minimum interval)
|
||||
`*-05` -- On the 5th day of every Month
|
||||
`Sat *-1..7 15:00` -- First Saturday each Month at 15:00
|
||||
`2015-10-21` -- 21st October 2015 at 00:00
|
||||
======================== ============================= ===================================
|
||||
|
||||
|
||||
Differences to systemd
|
||||
----------------------
|
||||
|
||||
Not all features of systemd calendar events are implemented:
|
||||
|
||||
* no Unix timestamps (e.g. `@12345`): instead use date and time to specify
|
||||
a specific point in time
|
||||
* no timezone: all schedules use the set timezone on the server
|
||||
* no sub-second resolution
|
||||
* no reverse day syntax (e.g. 2020-03~01)
|
||||
* no repetition of ranges (e.g. 1..10/2)
|
||||
|
||||
Notes on scheduling
|
||||
-------------------
|
||||
|
||||
In `Proxmox Backup`_ scheduling for most tasks is done in the
|
||||
`proxmox-backup-proxy`. This daemon checks all job schedules
|
||||
if they are due every minute. This means that even if
|
||||
`calendar events` can contain seconds, it will only be checked
|
||||
once a minute.
|
||||
|
||||
Also, all schedules will be checked against the timezone set
|
||||
in the `Proxmox Backup`_ server.
|
@ -12,7 +12,7 @@ Command Line Tools
|
||||
.. include:: proxmox-backup-manager/description.rst
|
||||
|
||||
``pxar``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~
|
||||
|
||||
.. include:: pxar/description.rst
|
||||
|
||||
|
@ -10,7 +10,7 @@ Command Syntax
|
||||
Catalog Shell Commands
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Those command are available when you start an intercative restore shell:
|
||||
Those command are available when you start an interactive restore shell:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
58
docs/conf.py
58
docs/conf.py
@ -74,7 +74,7 @@ rst_epilog = epilog_file.read()
|
||||
|
||||
# General information about the project.
|
||||
project = 'Proxmox Backup'
|
||||
copyright = '2019-2020, Proxmox Support Team'
|
||||
copyright = '2019-2020, Proxmox Server Solutions GmbH'
|
||||
author = 'Proxmox Support Team'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
@ -97,12 +97,10 @@ language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#
|
||||
# today = ''
|
||||
#
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#
|
||||
# today_fmt = '%B %d, %Y'
|
||||
today_fmt = '%A, %d %B %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
@ -148,7 +146,7 @@ pygments_style = 'sphinx'
|
||||
# keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = True
|
||||
todo_include_todos = not tags.has('release')
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
@ -156,13 +154,51 @@ todo_include_todos = True
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = 'sphinxdoc'
|
||||
html_theme = 'alabaster'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
# html_theme_options = {}
|
||||
html_theme_options = {
|
||||
'fixed_sidebar': True,
|
||||
'sidebar_includehidden': False,
|
||||
'sidebar_collapse': False,
|
||||
'globaltoc_collapse': False,
|
||||
'show_relbar_bottom': True,
|
||||
'show_powered_by': False,
|
||||
|
||||
'extra_nav_links': {
|
||||
'Proxmox Homepage': 'https://proxmox.com',
|
||||
'PDF': 'proxmox-backup.pdf',
|
||||
},
|
||||
|
||||
'sidebar_width': '320px',
|
||||
'page_width': '1320px',
|
||||
# font styles
|
||||
'head_font_family': 'Lato, sans-serif',
|
||||
'caption_font_family': 'Lato, sans-serif',
|
||||
'caption_font_size': '20px',
|
||||
'font_family': 'Open Sans, sans-serif',
|
||||
}
|
||||
|
||||
# Alabaster theme recommends setting this fixed.
|
||||
# If you switch theme this needs to removed, probably.
|
||||
html_sidebars = {
|
||||
'**': [
|
||||
'sidebar-header.html',
|
||||
'searchbox.html',
|
||||
'navigation.html',
|
||||
'relations.html',
|
||||
],
|
||||
|
||||
'index': [
|
||||
'sidebar-header.html',
|
||||
'searchbox.html',
|
||||
'index-sidebar.html',
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
# html_theme_path = []
|
||||
@ -179,7 +215,7 @@ html_theme = 'sphinxdoc'
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#
|
||||
html_logo = 'images/proxmox-logo.svg'
|
||||
#html_logo = 'images/proxmox-logo.svg' # replaced by html_theme_options.logo
|
||||
|
||||
# The name of an image file (relative to this directory) to use as a favicon of
|
||||
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
@ -209,10 +245,6 @@ html_static_path = ['_static']
|
||||
#
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#
|
||||
@ -232,7 +264,7 @@ html_static_path = ['_static']
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#
|
||||
# html_show_sourcelink = True
|
||||
html_show_sourcelink = False
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#
|
||||
|
52
docs/custom.css
Normal file
52
docs/custom.css
Normal file
@ -0,0 +1,52 @@
|
||||
div.sphinxsidebar {
|
||||
height: calc(100% - 20px);
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
h1.logo-name {
|
||||
font-size: 24px;
|
||||
}
|
||||
|
||||
div.body img {
|
||||
width: 250px;
|
||||
}
|
||||
pre {
|
||||
padding: 5px 10px;
|
||||
}
|
||||
|
||||
li a.current {
|
||||
font-weight: bold;
|
||||
border-bottom: 1px solid #000;
|
||||
}
|
||||
ul li.toctree-l1 {
|
||||
margin-top: 0.5em;
|
||||
}
|
||||
ul li.toctree-l1 > a {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
div.sphinxsidebar form.search {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3 {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h1.logo-name {
|
||||
display: none;
|
||||
}
|
||||
@media screen and (max-width: 875px) {
|
||||
div.sphinxsidebar p.logo {
|
||||
display: initial;
|
||||
}
|
||||
div.sphinxsidebar h1.logo-name {
|
||||
display: block;
|
||||
}
|
||||
div.sphinxsidebar span {
|
||||
color: #AAA;
|
||||
}
|
||||
ul li.toctree-l1 > a {
|
||||
color: #FFF;
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
.. Epilog (included at top of each file)
|
||||
|
||||
We use this file to define external links and commone replacement
|
||||
We use this file to define external links and common replacement
|
||||
patterns.
|
||||
|
||||
.. |VERSION| replace:: 1.0
|
||||
@ -13,7 +13,7 @@
|
||||
.. _Proxmox: https://www.proxmox.com
|
||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||
// FIXME
|
||||
.. FIXME
|
||||
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||
@ -38,3 +38,6 @@
|
||||
.. _RFC3399: https://tools.ietf.org/html/rfc3339
|
||||
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
|
||||
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date
|
||||
|
||||
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html
|
||||
|
||||
|
71
docs/faq.rst
Normal file
71
docs/faq.rst
Normal file
@ -0,0 +1,71 @@
|
||||
FAQ
|
||||
===
|
||||
|
||||
What distribution is Proxmox Backup Server (PBS) based on?
|
||||
----------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server is based on `Debian GNU/Linux <https://www.debian.org/>`_.
|
||||
|
||||
|
||||
Which platforms are supported as a backup source (client)?
|
||||
----------------------------------------------------------
|
||||
|
||||
The client tool works on most modern Linux systems, meaning you are not limited
|
||||
to Debian-based backups.
|
||||
|
||||
|
||||
Will Proxmox Backup Server run on a 32-bit processor?
|
||||
-----------------------------------------------------
|
||||
|
||||
Proxmox Backup Server only supports 64-bit CPUs (AMD or Intel). There are no
|
||||
future plans to support 32-bit processors.
|
||||
|
||||
|
||||
How long will my Proxmox Backup Server version be supported?
|
||||
------------------------------------------------------------
|
||||
|
||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
||||
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
|
||||
+=======================+====================+===============+============+====================+
|
||||
|Proxmox Backup 1.x | Debian 10 (Buster) | tba | tba | tba |
|
||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
||||
|
||||
|
||||
Can I copy or synchronize my datastore to another location?
|
||||
-----------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server allows you to copy or synchronize datastores to other
|
||||
locations, through the use of *Remotes* and *Sync Jobs*. *Remote* is the term
|
||||
given to a separate server, which has a datastore that can be synced to a local store.
|
||||
A *Sync Job* is the process which is used to pull the contents of a datastore from
|
||||
a *Remote* to a local datastore.
|
||||
|
||||
|
||||
Can Proxmox Backup Server verify data integrity of a backup archive?
|
||||
--------------------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server uses a built-in SHA-256 checksum algorithm, to ensure
|
||||
data integrity. Within each backup, a manifest file (index.json) is created,
|
||||
which contains a list of all the backup files, along with their sizes and
|
||||
checksums. This manifest file is used to verify the integrity of each backup.
|
||||
|
||||
|
||||
When backing up to remote servers, do I have to trust the remote server?
|
||||
------------------------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server supports client-side encryption, meaning your data is
|
||||
encrypted before it reaches the server. Thus, in the event that an attacker
|
||||
gains access to the server, they will not be able to read the data.
|
||||
|
||||
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
||||
`Encryption
|
||||
<https://pbs.proxmox.com/docs/administration-guide.html#encryption>`_ section
|
||||
of the Proxmox Backup Server Administration Guide.
|
||||
|
||||
|
||||
Is the backup incremental/deduplicated?
|
||||
---------------------------------------
|
||||
|
||||
With Proxmox Backup Server, backups are sent incremental and data is
|
||||
deduplicated on the server.
|
||||
This minimizes both the storage consumed and the network impact.
|
@ -51,14 +51,3 @@ Glossary
|
||||
A remote Proxmox Backup Server installation and credentials for a user on it.
|
||||
You can pull datastores from a remote to a local datastore in order to
|
||||
have redundant backups.
|
||||
|
||||
Schedule
|
||||
|
||||
Certain tasks, for example pruning and garbage collection, need to be
|
||||
performed on a regular basis. Proxmox Backup Server uses a subset of the
|
||||
`systemd Time and Date Specification
|
||||
<https://www.freedesktop.org/software/systemd/man/systemd.time.html#>`_.
|
||||
The subset currently supports time of day specifications and weekdays, in
|
||||
addition to the shorthand expressions 'minutely', 'hourly', 'daily'.
|
||||
There is no support for specifying timezones, the tasks are run in the
|
||||
timezone configured on the server.
|
||||
|
135
docs/gui.rst
Normal file
135
docs/gui.rst
Normal file
@ -0,0 +1,135 @@
|
||||
Graphical User Interface
|
||||
========================
|
||||
|
||||
Proxmox Backup Server offers an integrated, web-based interface to manage the
|
||||
server. This means that you can carry out all administration tasks through your
|
||||
web browser, and that you don't have to worry about installing extra management
|
||||
tools. The web interface also provides a built in console, so if you prefer the
|
||||
command line or need some extra control, you have this option.
|
||||
|
||||
The web interface can be accessed via https://youripaddress:8007. The default
|
||||
login is `root`, and the password is the one specified during the installation
|
||||
process.
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* Simple management interface for Proxmox Backup Server
|
||||
* Monitoring of tasks, logs and resource usage
|
||||
* Management of users, permissions, datastores, etc.
|
||||
* Secure HTML5 console
|
||||
* Support for multiple authentication sources
|
||||
* Support for multiple languages
|
||||
* Based on ExtJS 6.x JavaScript framework
|
||||
|
||||
|
||||
Login
|
||||
-----
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-login-window.png
|
||||
:width: 250
|
||||
:align: right
|
||||
:alt: PBS login window
|
||||
|
||||
When you connect to the web interface, you will first see the login window.
|
||||
Proxmox Backup Server supports various languages and authentication back ends
|
||||
(*Realms*), both of which can be selected here.
|
||||
|
||||
.. note:: For convenience, you can save the username on the client side, by
|
||||
selecting the "Save User name" checkbox at the bottom of the window.
|
||||
|
||||
|
||||
GUI Overview
|
||||
------------
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-dashboard.png
|
||||
:width: 250
|
||||
:align: right
|
||||
:alt: PBS GUI Dashboard
|
||||
|
||||
The Proxmox Backup Server web interface consists of 3 main sections:
|
||||
|
||||
* **Header**: At the top. This shows version information, and contains buttons to view
|
||||
documentation, monitor running tasks, and logout.
|
||||
* **Sidebar**: On the left. This contains the configuration options for
|
||||
the server.
|
||||
* **Configuration Panel**: In the center. This contains the control interface for the
|
||||
configuration options in the *Sidebar*.
|
||||
|
||||
|
||||
Sidebar
|
||||
-------
|
||||
|
||||
In the sidebar, on the left side of the page, you can see various items relating
|
||||
to specific management activities.
|
||||
|
||||
|
||||
Dashboard
|
||||
^^^^^^^^^
|
||||
|
||||
The Dashboard shows a summary of activity and resource usage on the server.
|
||||
Specifically, this displays hardware usage, a summary of
|
||||
previous and currently running tasks, and subscription information.
|
||||
|
||||
|
||||
Configuration
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
The Configuration section contains some system configuration options, such as
|
||||
time and network configuration. It also contains the following subsections:
|
||||
|
||||
* **User Management**: Add users and manage accounts
|
||||
* **Permissions**: Manage permissions for various users
|
||||
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
|
||||
* **Sync Jobs**: Manage and run sync jobs to remotes
|
||||
* **Subscription**: Upload a subscription key and view subscription status
|
||||
|
||||
|
||||
Administration
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-administration-serverstatus.png
|
||||
:width: 250
|
||||
:align: right
|
||||
:alt: Administration: Server Status overview
|
||||
|
||||
The Administration section contains a top panel, with further administration
|
||||
tasks and information. These are:
|
||||
|
||||
* **ServerStatus**: Provides access to the console, power options, and various
|
||||
resource usage statistics
|
||||
* **Services**: Manage and monitor system services
|
||||
* **Updates**: An interface for upgrading packages
|
||||
* **Syslog**: View log messages from the server
|
||||
* **Tasks**: Task history with multiple filter options
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-disks.png
|
||||
:width: 250
|
||||
:align: right
|
||||
:alt: Administration: Disks
|
||||
|
||||
The administration menu item also contains a disk management subsection:
|
||||
|
||||
* **Disks**: View information on available disks
|
||||
|
||||
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
||||
* **ZFS**: Create and view information on *ZFS* disks
|
||||
|
||||
|
||||
Datastore
|
||||
^^^^^^^^^
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-datastore.png
|
||||
:width: 250
|
||||
:align: right
|
||||
:alt: Datastore Configuration
|
||||
|
||||
The Datastore section provides an interface for creating and managing
|
||||
datastores. It contains a subsection for each datastore on the system, in
|
||||
which you can use the top panel to view:
|
||||
|
||||
* **Content**: Information on the datastore's backup groups and their respective
|
||||
contents
|
||||
* **Statistics**: Usage statistics for the datastore
|
||||
* **Permissions**: View and manage permissions for the datastore
|
BIN
docs/images/screenshots/pbs-gui-administration-serverstatus.png
Normal file
BIN
docs/images/screenshots/pbs-gui-administration-serverstatus.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 127 KiB |
BIN
docs/images/screenshots/pbs-gui-dashboard.png
Normal file
BIN
docs/images/screenshots/pbs-gui-dashboard.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 119 KiB |
BIN
docs/images/screenshots/pbs-gui-login-window.png
Normal file
BIN
docs/images/screenshots/pbs-gui-login-window.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 16 KiB |
@ -2,8 +2,8 @@
|
||||
|
||||
Welcome to the Proxmox Backup documentation!
|
||||
============================================
|
||||
|
||||
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
| Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
| Version |version| -- |today|
|
||||
|
||||
Permission is granted to copy, distribute and/or modify this document under the
|
||||
terms of the GNU Free Documentation License, Version 1.3 or any later version
|
||||
@ -22,8 +22,18 @@ in the section entitled "GNU Free Documentation License".
|
||||
|
||||
introduction.rst
|
||||
installation.rst
|
||||
administration-guide.rst
|
||||
terminology.rst
|
||||
gui.rst
|
||||
storage.rst
|
||||
network-management.rst
|
||||
user-management.rst
|
||||
managing-remotes.rst
|
||||
maintenance.rst
|
||||
backup-client.rst
|
||||
pve-integration.rst
|
||||
pxar-tool.rst
|
||||
sysadmin.rst
|
||||
faq.rst
|
||||
|
||||
.. raw:: latex
|
||||
|
||||
@ -36,6 +46,7 @@ in the section entitled "GNU Free Documentation License".
|
||||
command-syntax.rst
|
||||
file-formats.rst
|
||||
backup-protocol.rst
|
||||
calendarevents.rst
|
||||
glossary.rst
|
||||
GFDL.rst
|
||||
|
||||
@ -43,10 +54,10 @@ in the section entitled "GNU Free Documentation License".
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
:caption: Developer Appendix
|
||||
|
||||
todos.rst
|
||||
|
||||
|
||||
* :ref:`genindex`
|
||||
|
||||
.. # * :ref:`genindex`
|
||||
|
@ -5,6 +5,8 @@ Installation
|
||||
can either be installed with a graphical installer or on top of
|
||||
Debian_ from the provided package repository.
|
||||
|
||||
.. include:: system-requirements.rst
|
||||
|
||||
.. include:: package-repositories.rst
|
||||
|
||||
Server installation
|
||||
|
@ -104,7 +104,7 @@ Software Stack
|
||||
|
||||
Proxmox Backup Server consists of multiple components:
|
||||
|
||||
* A server-daemon providing, among other things, a RESTfull API, super-fast
|
||||
* A server-daemon providing, among other things, a RESTful API, super-fast
|
||||
asynchronous tasks, lightweight usage statistic collection, scheduling
|
||||
events, strict separation of privileged and unprivileged execution
|
||||
environments
|
||||
@ -127,6 +127,7 @@ language.
|
||||
|
||||
.. todo:: further explain the software stack
|
||||
|
||||
|
||||
Getting Help
|
||||
------------
|
||||
|
||||
@ -178,5 +179,29 @@ along with this program. If not, see AGPL3_.
|
||||
History
|
||||
-------
|
||||
|
||||
.. todo:: Add development History of the product
|
||||
Backup is, and always was, as central aspect of IT administration.
|
||||
The need to recover from data loss is fundamental and increases with
|
||||
virtualization.
|
||||
|
||||
Not surprisingly, we shipped a backup tool with Proxmox VE from the
|
||||
beginning. The tool is called ``vzdump`` and is able to make
|
||||
consistent snapshots of running LXC containers and KVM virtual
|
||||
machines.
|
||||
|
||||
But ``vzdump`` only allowed for full backups. While this is perfect
|
||||
for small backups, it becomes a burden for users with large VMs. Both
|
||||
backup time and space usage was too large for this case, specially
|
||||
when Users want to keep many backups of the same VMs. We need
|
||||
deduplication and incremental backups to solve those problems.
|
||||
|
||||
Back in October 2018 development started. We had been looking into
|
||||
several technologies and frameworks and finally decided to use
|
||||
:term:`Rust` as implementation language to provide high speed and
|
||||
memory efficiency. The 2018-edition of Rust seemed to be promising and
|
||||
useful for our requirements.
|
||||
|
||||
In July 2020 we released the first beta version of Proxmox Backup
|
||||
Server, followed by a first stable version in November 2020. With the
|
||||
support of incremental, fully deduplicated backups, Proxmox Backup
|
||||
significantly reduces the network load and saves valuable storage
|
||||
space.
|
||||
|
@ -220,7 +220,7 @@ and you can install it using `apt-get`:
|
||||
# apt-get install zfs-zed
|
||||
|
||||
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
||||
favourite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
||||
favorite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -312,6 +312,8 @@ You can disable compression at any time with:
|
||||
|
||||
Only new blocks will be affected by this change.
|
||||
|
||||
.. _local_zfs_special_device:
|
||||
|
||||
ZFS Special Device
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
13
docs/maintenance.rst
Normal file
13
docs/maintenance.rst
Normal file
@ -0,0 +1,13 @@
|
||||
Maintenance Tasks
|
||||
=================
|
||||
|
||||
Garbage Collection
|
||||
------------------
|
||||
|
||||
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
||||
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
||||
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually start garbage
|
||||
collection on an entire datastore and the ``status`` subcommand to see
|
||||
attributes relating to the :ref:`garbage collection <garbage-collection>`.
|
||||
|
||||
.. todo:: Add section on verification
|
82
docs/managing-remotes.rst
Normal file
82
docs/managing-remotes.rst
Normal file
@ -0,0 +1,82 @@
|
||||
Managing Remotes
|
||||
================
|
||||
|
||||
.. _backup_remote:
|
||||
|
||||
:term:`Remote`
|
||||
--------------
|
||||
|
||||
A remote refers to a separate Proxmox Backup Server installation and a user on that
|
||||
installation, from which you can `sync` datastores to a local datastore with a
|
||||
`Sync Job`. You can configure remotes in the web interface, under **Configuration
|
||||
-> Remotes**. Alternatively, you can use the ``remote`` subcommand. The
|
||||
configuration information for remotes is stored in the file
|
||||
``/etc/proxmox-backup/remote.cfg``.
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-remote-add.png
|
||||
:align: right
|
||||
:alt: Add a remote
|
||||
|
||||
To add a remote, you need its hostname or IP, a userid and password on the
|
||||
remote, and its certificate fingerprint. To get the fingerprint, use the
|
||||
``proxmox-backup-manager cert info`` command on the remote, or navigate to
|
||||
**Dashboard** in the remote's web interface and select **Show Fingerprint**.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager cert info |grep Fingerprint
|
||||
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
Using the information specified above, you can add a remote from the **Remotes**
|
||||
configuration panel, or by using the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager remote create pbs2 --host pbs2.mydomain.example --userid sync@pam --password 'SECRET' --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
Use the ``list``, ``show``, ``update``, ``remove`` subcommands of
|
||||
``proxmox-backup-manager remote`` to manage your remotes:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager remote update pbs2 --host pbs2.example
|
||||
# proxmox-backup-manager remote list
|
||||
┌──────┬──────────────┬──────────┬───────────────────────────────────────────┬─────────┐
|
||||
│ name │ host │ userid │ fingerprint │ comment │
|
||||
╞══════╪══════════════╪══════════╪═══════════════════════════════════════════╪═════════╡
|
||||
│ pbs2 │ pbs2.example │ sync@pam │64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe │ │
|
||||
└──────┴──────────────┴──────────┴───────────────────────────────────────────┴─────────┘
|
||||
# proxmox-backup-manager remote remove pbs2
|
||||
|
||||
|
||||
.. _syncjobs:
|
||||
|
||||
Sync Jobs
|
||||
---------
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-syncjob-add.png
|
||||
:align: right
|
||||
:alt: Add a Sync Job
|
||||
|
||||
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
|
||||
a local datastore. You can manage sync jobs under **Configuration -> Sync Jobs**
|
||||
in the web interface, or using the ``proxmox-backup-manager sync-job`` command.
|
||||
The configuration information for sync jobs is stored at
|
||||
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
|
||||
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
||||
either start it manually on the GUI or provide it with a schedule (see
|
||||
:ref:`calendar-events`) to run regularly.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager sync-job create pbs2-local --remote pbs2 --remote-store local --store local --schedule 'Wed 02:30'
|
||||
# proxmox-backup-manager sync-job update pbs2-local --comment 'offsite'
|
||||
# proxmox-backup-manager sync-job list
|
||||
┌────────────┬───────┬────────┬──────────────┬───────────┬─────────┐
|
||||
│ id │ store │ remote │ remote-store │ schedule │ comment │
|
||||
╞════════════╪═══════╪════════╪══════════════╪═══════════╪═════════╡
|
||||
│ pbs2-local │ local │ pbs2 │ local │ Wed 02:30 │ offsite │
|
||||
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||
# proxmox-backup-manager sync-job remove pbs2-local
|
||||
|
||||
|
88
docs/network-management.rst
Normal file
88
docs/network-management.rst
Normal file
@ -0,0 +1,88 @@
|
||||
Network Management
|
||||
==================
|
||||
|
||||
Proxmox Backup Server provides both a web interface and a command line tool for
|
||||
network configuration. You can find the configuration options in the web
|
||||
interface under the **Network Interfaces** section of the **Configuration** menu
|
||||
tree item. The command line tool is accessed via the ``network`` subcommand.
|
||||
These interfaces allow you to carry out some basic network management tasks,
|
||||
such as adding, configuring, and removing network interfaces.
|
||||
|
||||
.. note:: Any changes made to the network configuration are not
|
||||
applied, until you click on **Apply Configuration** or enter the ``network
|
||||
reload`` command. This allows you to make many changes at once. It also allows
|
||||
you to ensure that your changes are correct before applying them, as making a
|
||||
mistake here can render the server inaccessible over the network.
|
||||
|
||||
To get a list of available interfaces, use the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network list
|
||||
┌───────┬────────┬───────────┬────────┬─────────────┬──────────────┬──────────────┐
|
||||
│ name │ type │ autostart │ method │ address │ gateway │ ports/slaves │
|
||||
╞═══════╪════════╪═══════════╪════════╪═════════════╪══════════════╪══════════════╡
|
||||
│ bond0 │ bond │ 1 │ static │ x.x.x.x/x │ x.x.x.x │ ens18 ens19 │
|
||||
├───────┼────────┼───────────┼────────┼─────────────┼──────────────┼──────────────┤
|
||||
│ ens18 │ eth │ 1 │ manual │ │ │ │
|
||||
├───────┼────────┼───────────┼────────┼─────────────┼──────────────┼──────────────┤
|
||||
│ ens19 │ eth │ 1 │ manual │ │ │ │
|
||||
└───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-network-create-bond.png
|
||||
:align: right
|
||||
:alt: Add a network interface
|
||||
|
||||
To add a new network interface, use the ``create`` subcommand with the relevant
|
||||
parameters. For example, you may want to set up a bond, for the purpose of
|
||||
network redundancy. The following command shows a template for creating the bond shown
|
||||
in the list above:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network create bond0 --type bond --bond_mode active-backup --slaves ens18,ens19 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x
|
||||
|
||||
You can make changes to the configuration of a network interface with the
|
||||
``update`` subcommand:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network update bond0 --cidr y.y.y.y/y
|
||||
|
||||
You can also remove a network interface:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network remove bond0
|
||||
|
||||
The pending changes for the network configuration file will appear at the bottom of the
|
||||
web interface. You can also view these changes, by using the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network changes
|
||||
|
||||
If you would like to cancel all changes at this point, you can either click on
|
||||
the **Revert** button or use the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network revert
|
||||
|
||||
If you are happy with the changes and would like to write them into the
|
||||
configuration file, select **Apply Configuration**. The corresponding command
|
||||
is:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network reload
|
||||
|
||||
.. note:: This command and corresponding GUI button rely on the ``ifreload``
|
||||
command, from the package ``ifupdown2``. This package is included within the
|
||||
Proxmox Backup Server installation, however, you may have to install it yourself,
|
||||
if you have installed Proxmox Backup Server on top of Debian or Proxmox VE.
|
||||
|
||||
You can also configure DNS settings, from the **DNS** section
|
||||
of **Configuration** or by using the ``dns`` subcommand of
|
||||
``proxmox-backup-manager``.
|
||||
|
49
docs/pve-integration.rst
Normal file
49
docs/pve-integration.rst
Normal file
@ -0,0 +1,49 @@
|
||||
.. _pve-integration:
|
||||
|
||||
`Proxmox VE`_ Integration
|
||||
-------------------------
|
||||
|
||||
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
||||
node. The following example uses ``store2`` as storage name, and
|
||||
assumes the server address is ``localhost``, and you want to connect
|
||||
as ``user1@pbs``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pvesm add pbs store2 --server localhost --datastore store2
|
||||
# pvesm set store2 --username user1@pbs --password <secret>
|
||||
|
||||
.. note:: If you would rather not pass your password as plain text, you can pass
|
||||
the ``--password`` parameter, without any arguments. This will cause the
|
||||
program to prompt you for a password upon entering the command.
|
||||
|
||||
If your backup server uses a self signed certificate, you need to add
|
||||
the certificate fingerprint to the configuration. You can get the
|
||||
fingerprint by running the following command on the backup server:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager cert info | grep Fingerprint
|
||||
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
Please add that fingerprint to your configuration to establish a trust
|
||||
relationship:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
After that you should be able to see storage status with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pvesm status --storage store2
|
||||
Name Type Status Total Used Available %
|
||||
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
||||
|
||||
Having added the PBS datastore to `Proxmox VE`_, you can backup VMs and
|
||||
containers in the same way you would for any other storage device within the
|
||||
environment (see `PVE Admin Guide: Backup and Restore
|
||||
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.
|
||||
|
||||
|
5
docs/pxar-tool.rst
Normal file
5
docs/pxar-tool.rst
Normal file
@ -0,0 +1,5 @@
|
||||
pxar Command Line Tool
|
||||
======================
|
||||
|
||||
.. include:: pxar/description.rst
|
||||
|
244
docs/storage.rst
Normal file
244
docs/storage.rst
Normal file
@ -0,0 +1,244 @@
|
||||
Storage
|
||||
=======
|
||||
|
||||
Disk Management
|
||||
---------------
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-disks.png
|
||||
:align: right
|
||||
:alt: List of disks
|
||||
|
||||
Proxmox Backup Server comes with a set of disk utilities, which are
|
||||
accessed using the ``disk`` subcommand. This subcommand allows you to initialize
|
||||
disks, create various filesystems, and get information about the disks.
|
||||
|
||||
To view the disks connected to the system, navigate to **Administration ->
|
||||
Disks** in the web interface or use the ``list`` subcommand of
|
||||
``disk``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk list
|
||||
┌──────┬────────┬─────┬───────────┬─────────────┬───────────────┬─────────┬────────┐
|
||||
│ name │ used │ gpt │ disk-type │ size │ model │ wearout │ status │
|
||||
╞══════╪════════╪═════╪═══════════╪═════════════╪═══════════════╪═════════╪════════╡
|
||||
│ sda │ lvm │ 1 │ hdd │ 34359738368 │ QEMU_HARDDISK │ - │ passed │
|
||||
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
|
||||
│ sdb │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
|
||||
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
|
||||
│ sdc │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
|
||||
└──────┴────────┴─────┴───────────┴─────────────┴───────────────┴─────────┴────────┘
|
||||
|
||||
To initialize a disk with a new GPT, use the ``initialize`` subcommand:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk initialize sdX
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-disks-dir-create.png
|
||||
:align: right
|
||||
:alt: Create a directory
|
||||
|
||||
You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
|
||||
create``, or by navigating to **Administration -> Disks -> Directory** in the
|
||||
web interface and creating one from there. The following command creates an
|
||||
``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
|
||||
automatically create a datastore on the disk (in this case ``sdd``). This will
|
||||
create a datastore at the location ``/mnt/datastore/store1``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk fs create store1 --disk sdd --filesystem ext4 --add-datastore true
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-disks-zfs-create.png
|
||||
:align: right
|
||||
:alt: Create ZFS
|
||||
|
||||
You can also create a ``zpool`` with various raid levels from **Administration
|
||||
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
|
||||
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
|
||||
mounts it on the root directory (default):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk zpool create zpool1 --devices sdb,sdc --raidlevel mirror
|
||||
|
||||
.. note:: You can also pass the ``--add-datastore`` parameter here, to automatically
|
||||
create a datastore from the disk.
|
||||
|
||||
You can use ``disk fs list`` and ``disk zpool list`` to keep track of your
|
||||
filesystems and zpools respectively.
|
||||
|
||||
Proxmox Backup Server uses the package smartmontools. This is a set of tools
|
||||
used to monitor and control the S.M.A.R.T. system for local hard disks. If a
|
||||
disk supports S.M.A.R.T. capability, and you have this enabled, you can
|
||||
display S.M.A.R.T. attributes from the web interface or by using the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk smart-attributes sdX
|
||||
|
||||
.. note:: This functionality may also be accessed directly through the use of
|
||||
the ``smartctl`` command, which comes as part of the smartmontools package
|
||||
(see ``man smartctl`` for more details).
|
||||
|
||||
|
||||
.. _datastore_intro:
|
||||
|
||||
:term:`DataStore`
|
||||
-----------------
|
||||
|
||||
A datastore refers to a location at which backups are stored. The current
|
||||
implementation uses a directory inside a standard Unix file system (``ext4``,
|
||||
``xfs`` or ``zfs``) to store the backup data.
|
||||
|
||||
Datastores are identified by a simple *ID*. You can configure this
|
||||
when setting up the datastore. The configuration information for datastores
|
||||
is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
|
||||
|
||||
.. note:: The `File Layout`_ requires the file system to support at least *65538*
|
||||
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
||||
pre-created chunk namespace directories, and the ``.`` and ``..`` default
|
||||
directory entries. This requirement excludes certain filesystems and
|
||||
filesystem configuration from being supported for a datastore. For example,
|
||||
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
||||
|
||||
|
||||
Datastore Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-datastore.png
|
||||
:align: right
|
||||
:alt: Datastore Overview
|
||||
|
||||
You can configure multiple datastores. Minimum one datastore needs to be
|
||||
configured. The datastore is identified by a simple *name* and points to a
|
||||
directory on the filesystem. Each datastore also has associated retention
|
||||
settings of how many backup snapshots for each interval of ``hourly``,
|
||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||
number of backups to keep in that store. :ref:`backup-pruning` and
|
||||
:ref:`garbage collection <garbage-collection>` can also be configured to run
|
||||
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
|
||||
|
||||
|
||||
Creating a Datastore
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
.. image:: images/screenshots/pbs-gui-datastore-create-general.png
|
||||
:align: right
|
||||
:alt: Create a datastore
|
||||
|
||||
You can create a new datastore from the web GUI, by navigating to **Datastore** in
|
||||
the menu tree and clicking **Create**. Here:
|
||||
|
||||
* *Name* refers to the name of the datastore
|
||||
* *Backing Path* is the path to the directory upon which you want to create the
|
||||
datastore
|
||||
* *GC Schedule* refers to the time and intervals at which garbage collection
|
||||
runs
|
||||
* *Prune Schedule* refers to the frequency at which pruning takes place
|
||||
* *Prune Options* set the amount of backups which you would like to keep (see :ref:`backup-pruning`).
|
||||
|
||||
Alternatively you can create a new datastore from the command line. The
|
||||
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
|
||||
|
||||
|
||||
Managing Datastores
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To list existing datastores from the command line run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore list
|
||||
┌────────┬──────────────────────┬─────────────────────────────┐
|
||||
│ name │ path │ comment │
|
||||
╞════════╪══════════════════════╪═════════════════════════════╡
|
||||
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
|
||||
└────────┴──────────────────────┴─────────────────────────────┘
|
||||
|
||||
You can change the garbage collection and prune settings of a datastore, by
|
||||
editing the datastore from the GUI or by using the ``update`` subcommand. For
|
||||
example, the below command changes the garbage collection schedule using the
|
||||
``update`` subcommand and prints the properties of the datastore with the
|
||||
``show`` subcommand:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore update store1 --gc-schedule 'Tue 04:27'
|
||||
# proxmox-backup-manager datastore show store1
|
||||
┌────────────────┬─────────────────────────────┐
|
||||
│ Name │ Value │
|
||||
╞════════════════╪═════════════════════════════╡
|
||||
│ name │ store1 │
|
||||
├────────────────┼─────────────────────────────┤
|
||||
│ path │ /backup/disk1/store1 │
|
||||
├────────────────┼─────────────────────────────┤
|
||||
│ comment │ This is my default storage. │
|
||||
├────────────────┼─────────────────────────────┤
|
||||
│ gc-schedule │ Tue 04:27 │
|
||||
├────────────────┼─────────────────────────────┤
|
||||
│ keep-last │ 7 │
|
||||
├────────────────┼─────────────────────────────┤
|
||||
│ prune-schedule │ daily │
|
||||
└────────────────┴─────────────────────────────┘
|
||||
|
||||
Finally, it is possible to remove the datastore configuration:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore remove store1
|
||||
|
||||
.. note:: The above command removes only the datastore configuration. It does
|
||||
not delete any data from the underlying directory.
|
||||
|
||||
|
||||
File Layout
|
||||
^^^^^^^^^^^
|
||||
|
||||
After creating a datastore, the following default layout will appear:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# ls -arilh /backup/disk1/store1
|
||||
276493 -rw-r--r-- 1 backup backup 0 Jul 8 12:35 .lock
|
||||
276490 drwxr-x--- 1 backup backup 1064960 Jul 8 12:35 .chunks
|
||||
|
||||
`.lock` is an empty file used for process locking.
|
||||
|
||||
The `.chunks` directory contains folders, starting from `0000` and taking hexadecimal values until `ffff`. These
|
||||
directories will store the chunked data after a backup operation has been executed.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# ls -arilh /backup/disk1/store1/.chunks
|
||||
545824 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 ffff
|
||||
545823 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffe
|
||||
415621 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffd
|
||||
415620 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffc
|
||||
353187 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffb
|
||||
344995 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffa
|
||||
144079 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff9
|
||||
144078 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff8
|
||||
144077 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff7
|
||||
...
|
||||
403180 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000c
|
||||
403179 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000b
|
||||
403177 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000a
|
||||
402530 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0009
|
||||
402513 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0008
|
||||
402509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0007
|
||||
276509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0006
|
||||
276508 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0005
|
||||
276507 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0004
|
||||
276501 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0003
|
||||
276499 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0002
|
||||
276498 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0001
|
||||
276494 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0000
|
||||
276489 drwxr-xr-x 3 backup backup 4.0K Jul 8 12:35 ..
|
||||
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
|
||||
|
||||
|
57
docs/system-requirements.rst
Normal file
57
docs/system-requirements.rst
Normal file
@ -0,0 +1,57 @@
|
||||
System Requirements
|
||||
-------------------
|
||||
|
||||
We recommend using high quality server hardware when running Proxmox Backup in
|
||||
production. To further decrease the impact of a failed host, you can set up
|
||||
periodic, efficient, incremental :ref:`datastore synchronization <syncjobs>`
|
||||
from other Proxmox Backup Server instances.
|
||||
|
||||
Minimum Server Requirements, for Evaluation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
These minimum requirements are for evaluation purposes only and should not be
|
||||
used in production.
|
||||
|
||||
* CPU: 64bit (*x86-64* or *AMD64*), 2+ Cores
|
||||
|
||||
* Memory (RAM): 2 GB RAM
|
||||
|
||||
* Hard drive: more than 8GB of space.
|
||||
|
||||
* Network card (NIC)
|
||||
|
||||
|
||||
Recommended Server System Requirements
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* CPU: Modern AMD or Intel 64-bit based CPU, with at least 4 cores
|
||||
|
||||
* Memory: minimum 4 GiB for the OS, filesystem cache and Proxmox Backup Server
|
||||
daemons. Add at least another GiB per TiB storage space.
|
||||
|
||||
* OS storage:
|
||||
|
||||
* 32 GiB, or more, free storage space
|
||||
* Use a hardware RAID with battery protected write cache (*BBU*) or a
|
||||
redundant ZFS setup (ZFS is not compatible with a hardware RAID
|
||||
controller).
|
||||
|
||||
* Backup storage:
|
||||
|
||||
* Use only SSDs, for best results
|
||||
* If HDDs are used: Using a metadata cache is highly recommended, for example,
|
||||
add a ZFS :ref:`special device mirror <local_zfs_special_device>`.
|
||||
|
||||
* Redundant Multi-GBit/s network interface cards (NICs)
|
||||
|
||||
|
||||
Supported Web Browsers for Accessing the Web Interface
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To access the server's web-based user interface, we recommend using one of the
|
||||
following browsers:
|
||||
|
||||
* Firefox, a release from the current year, or the latest Extended Support Release
|
||||
* Chrome, a release from the current year
|
||||
* Microsoft's currently supported version of Edge
|
||||
* Safari, a release from the current year
|
118
docs/terminology.rst
Normal file
118
docs/terminology.rst
Normal file
@ -0,0 +1,118 @@
|
||||
Terminology
|
||||
===========
|
||||
|
||||
Backup Content
|
||||
--------------
|
||||
|
||||
When doing deduplication, there are different strategies to get
|
||||
optimal results in terms of performance and/or deduplication rates.
|
||||
Depending on the type of data, it can be split into *fixed* or *variable*
|
||||
sized chunks.
|
||||
|
||||
Fixed sized chunking requires minimal CPU power, and is used to
|
||||
backup virtual machine images.
|
||||
|
||||
Variable sized chunking needs more CPU power, but is essential to get
|
||||
good deduplication rates for file archives.
|
||||
|
||||
The Proxmox Backup Server supports both strategies.
|
||||
|
||||
|
||||
Image Archives: ``<name>.img``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This is used for virtual machine images and other large binary
|
||||
data. Content is split into fixed-sized chunks.
|
||||
|
||||
|
||||
File Archives: ``<name>.pxar``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
|
||||
|
||||
A file archive stores a full directory tree. Content is stored using
|
||||
the :ref:`pxar-format`, split into variable-sized chunks. The format
|
||||
is optimized to achieve good deduplication rates.
|
||||
|
||||
|
||||
Binary Data (BLOBs)
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This type is used to store smaller (< 16MB) binary data such as
|
||||
configuration files. Larger files should be stored as image archive.
|
||||
|
||||
.. caution:: Please do not store all files as BLOBs. Instead, use the
|
||||
file archive to store whole directory trees.
|
||||
|
||||
|
||||
Catalog File: ``catalog.pcat1``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The catalog file is an index for file archives. It contains
|
||||
the list of files and is used to speed up search operations.
|
||||
|
||||
|
||||
The Manifest: ``index.json``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The manifest contains the list of all backup files, their
|
||||
sizes and checksums. It is used to verify the consistency of a
|
||||
backup.
|
||||
|
||||
|
||||
Backup Type
|
||||
-----------
|
||||
|
||||
The backup server groups backups by *type*, where *type* is one of:
|
||||
|
||||
``vm``
|
||||
This type is used for :term:`virtual machine`\ s. Typically
|
||||
consists of the virtual machine's configuration file and an image archive
|
||||
for each disk.
|
||||
|
||||
``ct``
|
||||
This type is used for :term:`container`\ s. Consists of the container's
|
||||
configuration and a single file archive for the filesystem content.
|
||||
|
||||
``host``
|
||||
This type is used for backups created from within the backed up machine.
|
||||
Typically this would be a physical host but could also be a virtual machine
|
||||
or container. Such backups may contain file and image archives, there are no restrictions in this regard.
|
||||
|
||||
|
||||
Backup ID
|
||||
---------
|
||||
|
||||
A unique ID. Usually the virtual machine or container ID. ``host``
|
||||
type backups normally use the hostname.
|
||||
|
||||
|
||||
Backup Time
|
||||
-----------
|
||||
|
||||
The time when the backup was made.
|
||||
|
||||
|
||||
Backup Group
|
||||
------------
|
||||
|
||||
The tuple ``<type>/<ID>`` is called a backup group. Such a group
|
||||
may contain one or more backup snapshots.
|
||||
|
||||
|
||||
Backup Snapshot
|
||||
---------------
|
||||
|
||||
The triplet ``<type>/<ID>/<time>`` is called a backup snapshot. It
|
||||
uniquely identifies a specific backup within a datastore.
|
||||
|
||||
.. code-block:: console
|
||||
:caption: Backup Snapshot Examples
|
||||
|
||||
vm/104/2019-10-09T08:01:06Z
|
||||
host/elsa/2019-11-08T09:48:14Z
|
||||
|
||||
As you can see, the time format is RFC3399_ with Coordinated
|
||||
Universal Time (UTC_, identified by the trailing *Z*).
|
||||
|
||||
|
186
docs/user-management.rst
Normal file
186
docs/user-management.rst
Normal file
@ -0,0 +1,186 @@
|
||||
.. _user_mgmt:
|
||||
|
||||
User Management
|
||||
===============
|
||||
|
||||
|
||||
User Configuration
|
||||
------------------
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-user-management.png
|
||||
:align: right
|
||||
:alt: User management
|
||||
|
||||
Proxmox Backup Server supports several authentication realms, and you need to
|
||||
choose the realm when you add a new user. Possible realms are:
|
||||
|
||||
:pam: Linux PAM standard authentication. Use this if you want to
|
||||
authenticate as Linux system user (Users need to exist on the
|
||||
system).
|
||||
|
||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||
``/etc/proxmox-backup/shadow.json``.
|
||||
|
||||
After installation, there is a single user ``root@pam``, which
|
||||
corresponds to the Unix superuser. User configuration information is stored in the file
|
||||
``/etc/proxmox-backup/user.cfg``. You can use the
|
||||
``proxmox-backup-manager`` command line tool to list or manipulate
|
||||
users:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user list
|
||||
┌─────────────┬────────┬────────┬───────────┬──────────┬────────────────┬────────────────────┐
|
||||
│ userid │ enable │ expire │ firstname │ lastname │ email │ comment │
|
||||
╞═════════════╪════════╪════════╪═══════════╪══════════╪════════════════╪════════════════════╡
|
||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||
└─────────────┴────────┴────────┴───────────┴──────────┴────────────────┴────────────────────┘
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
|
||||
:align: right
|
||||
:alt: Add a new user
|
||||
|
||||
The superuser has full administration rights on everything, so you
|
||||
normally want to add other users with less privileges. You can create a new
|
||||
user with the ``user create`` subcommand or through the web interface, under
|
||||
**Configuration -> User Management**. The ``create`` subcommand lets you specify
|
||||
many options like ``--email`` or ``--password``. You can update or change any
|
||||
user properties using the ``update`` subcommand later (**Edit** in the GUI):
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user create john@pbs --email john@example.com
|
||||
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
||||
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
||||
|
||||
.. todo:: Mention how to set password without passing plaintext password as cli argument.
|
||||
|
||||
|
||||
The resulting user list looks like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user list
|
||||
┌──────────┬────────┬────────┬───────────┬──────────┬──────────────────┬──────────────────┐
|
||||
│ userid │ enable │ expire │ firstname │ lastname │ email │ comment │
|
||||
╞══════════╪════════╪════════╪═══════════╪══════════╪══════════════════╪══════════════════╡
|
||||
│ john@pbs │ 1 │ │ John │ Smith │ john@example.com │ An example user. │
|
||||
├──────────┼────────┼────────┼───────────┼──────────┼──────────────────┼──────────────────┤
|
||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||
|
||||
Newly created users do not have any permissions. Please read the next
|
||||
section to learn how to set access permissions.
|
||||
|
||||
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user update john@pbs --enable 0
|
||||
|
||||
Or completely remove the user with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user remove john@pbs
|
||||
|
||||
|
||||
.. _user_acl:
|
||||
|
||||
Access Control
|
||||
--------------
|
||||
|
||||
By default new users do not have any permission. Instead you need to
|
||||
specify what is allowed and what is not. You can do this by assigning
|
||||
roles to users on specific objects like datastores or remotes. The
|
||||
following roles exist:
|
||||
|
||||
**NoAccess**
|
||||
Disable Access - nothing is allowed.
|
||||
|
||||
**Admin**
|
||||
Can do anything.
|
||||
|
||||
**Audit**
|
||||
Can view things, but is not allowed to change settings.
|
||||
|
||||
**DatastoreAdmin**
|
||||
Can do anything on datastores.
|
||||
|
||||
**DatastoreAudit**
|
||||
Can view datastore settings and list content. But
|
||||
is not allowed to read the actual data.
|
||||
|
||||
**DatastoreReader**
|
||||
Can Inspect datastore content and can do restores.
|
||||
|
||||
**DatastoreBackup**
|
||||
Can backup and restore owned backups.
|
||||
|
||||
**DatastorePowerUser**
|
||||
Can backup, restore, and prune owned backups.
|
||||
|
||||
**RemoteAdmin**
|
||||
Can do anything on remotes.
|
||||
|
||||
**RemoteAudit**
|
||||
Can view remote settings.
|
||||
|
||||
**RemoteSyncOperator**
|
||||
Is allowed to read data from a remote.
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-permissions-add.png
|
||||
:align: right
|
||||
:alt: Add permissions for user
|
||||
|
||||
Access permission information is stored in ``/etc/proxmox-backup/acl.cfg``. The
|
||||
file contains 5 fields, separated using a colon (':') as a delimiter. A typical
|
||||
entry takes the form:
|
||||
|
||||
``acl:1:/datastore:john@pbs:DatastoreBackup``
|
||||
|
||||
The data represented in each field is as follows:
|
||||
|
||||
#. ``acl`` identifier
|
||||
#. A ``1`` or ``0``, representing whether propagation is enabled or disabled,
|
||||
respectively
|
||||
#. The object on which the permission is set. This can be a specific object
|
||||
(single datastore, remote, etc.) or a top level object, which with
|
||||
propagation enabled, represents all children of the object also.
|
||||
#. The user for which the permission is set
|
||||
#. The role being set
|
||||
|
||||
You can manage datastore permissions from **Configuration -> Permissions** in the
|
||||
web interface. Likewise, you can use the ``acl`` subcommand to manage and
|
||||
monitor user permissions from the command line. For example, the command below
|
||||
will add the user ``john@pbs`` as a **DatastoreAdmin** for the datastore
|
||||
``store1``, located at ``/backup/disk1/store1``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --userid john@pbs
|
||||
|
||||
You can monitor the roles of each user using the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager acl list
|
||||
┌──────────┬──────────────────┬───────────┬────────────────┐
|
||||
│ ugid │ path │ propagate │ roleid │
|
||||
╞══════════╪══════════════════╪═══════════╪════════════════╡
|
||||
│ john@pbs │ /datastore/disk1 │ 1 │ DatastoreAdmin │
|
||||
└──────────┴──────────────────┴───────────┴────────────────┘
|
||||
|
||||
A single user can be assigned multiple permission sets for different datastores.
|
||||
|
||||
.. Note::
|
||||
Naming convention is important here. For datastores on the host,
|
||||
you must use the convention ``/datastore/{storename}``. For example, to set
|
||||
permissions for a datastore mounted at ``/mnt/backup/disk4/store2``, you would use
|
||||
``/datastore/store2`` for the path. For remote stores, use the convention
|
||||
``/remote/{remote}/{storename}``, where ``{remote}`` signifies the name of the
|
||||
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
|
||||
the remote.
|
||||
|
||||
|
@ -2,8 +2,6 @@ use std::io::Write;
|
||||
|
||||
use anyhow::{Error};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
||||
|
||||
@ -34,9 +32,9 @@ async fn run() -> Result<(), Error> {
|
||||
.interactive(true)
|
||||
.ticket_cache(true);
|
||||
|
||||
let client = HttpClient::new(host, username, options)?;
|
||||
let client = HttpClient::new(host, 8007, username, options)?;
|
||||
|
||||
let backup_time = "2019-06-28T10:49:48Z".parse::<DateTime<Utc>>()?;
|
||||
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||
|
||||
let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
|
||||
.await?;
|
||||
|
@ -14,11 +14,11 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||
.interactive(true)
|
||||
.ticket_cache(true);
|
||||
|
||||
let client = HttpClient::new(host, username, options)?;
|
||||
let client = HttpClient::new(host, 8007, username, options)?;
|
||||
|
||||
let backup_time = chrono::Utc::now();
|
||||
let backup_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
|
||||
|
||||
println!("start upload speed test");
|
||||
let res = client.upload_speedtest(true).await?;
|
||||
|
@ -175,7 +175,7 @@ pub fn update_acl(
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut tree, expected_digest) = acl::config()?;
|
||||
|
||||
|
@ -14,7 +14,7 @@ use crate::config::acl::{Role, ROLE_NAMES, PRIVILEGES};
|
||||
type: Array,
|
||||
items: {
|
||||
type: Object,
|
||||
description: "User name with description.",
|
||||
description: "Role with description and privileges.",
|
||||
properties: {
|
||||
roleid: {
|
||||
type: Role,
|
||||
|
@ -8,6 +8,7 @@ use proxmox::tools::fs::open_file_locked;
|
||||
use crate::api2::types::*;
|
||||
use crate::config::user;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
@ -25,10 +26,11 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
items: { type: user::User },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
permission: &Permission::Anybody,
|
||||
description: "Returns all or just the logged-in user, depending on privileges.",
|
||||
},
|
||||
)]
|
||||
/// List all users
|
||||
/// List users
|
||||
pub fn list_users(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
@ -37,11 +39,21 @@ pub fn list_users(
|
||||
|
||||
let (config, digest) = user::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("user")?;
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
|
||||
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
let filter_by_privs = |user: &user::User| {
|
||||
top_level_allowed || user.userid == userid
|
||||
};
|
||||
|
||||
let list:Vec<user::User> = config.convert_to_typed_array("user")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
Ok(list.into_iter().filter(filter_by_privs).collect())
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -88,7 +100,7 @@ pub fn list_users(
|
||||
/// Create new user.
|
||||
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let user: user::User = serde_json::from_value(param)?;
|
||||
|
||||
@ -124,7 +136,10 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
||||
type: user::User,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Read user configuration data.
|
||||
@ -177,7 +192,10 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Update user configuration.
|
||||
@ -193,7 +211,7 @@ pub fn update_user(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
@ -258,13 +276,16 @@ pub fn update_user(
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Remove a user from the configuration file.
|
||||
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
|
@ -172,7 +172,7 @@ fn list_groups(
|
||||
let result_item = GroupListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
last_backup: info.backup_dir.backup_time().timestamp(),
|
||||
last_backup: info.backup_dir.backup_time(),
|
||||
backup_count: list.len() as u64,
|
||||
files: info.files.clone(),
|
||||
owner: Some(owner),
|
||||
@ -230,7 +230,7 @@ pub fn list_snapshot_files(
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
||||
@ -280,7 +280,7 @@ fn delete_snapshot(
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -403,7 +403,7 @@ pub fn list_snapshots (
|
||||
let result_item = SnapshotListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||
backup_time: info.backup_dir.backup_time(),
|
||||
comment,
|
||||
verification,
|
||||
files,
|
||||
@ -490,7 +490,7 @@ pub fn verify(
|
||||
match (backup_type, backup_id, backup_time) {
|
||||
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
|
||||
let dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
backup_dir = Some(dir);
|
||||
}
|
||||
(Some(backup_type), Some(backup_id), None) => {
|
||||
@ -518,7 +518,14 @@ pub fn verify(
|
||||
|
||||
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
||||
let mut res = Vec::new();
|
||||
if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? {
|
||||
if !verify_backup_dir(
|
||||
datastore,
|
||||
&backup_dir,
|
||||
verified_chunks,
|
||||
corrupt_chunks,
|
||||
worker.clone(),
|
||||
worker.upid().clone(),
|
||||
)? {
|
||||
res.push(backup_dir.to_string());
|
||||
}
|
||||
res
|
||||
@ -530,10 +537,11 @@ pub fn verify(
|
||||
corrupt_chunks,
|
||||
None,
|
||||
worker.clone(),
|
||||
worker.upid(),
|
||||
)?;
|
||||
failed_dirs
|
||||
} else {
|
||||
verify_all_backups(datastore, worker.clone())?
|
||||
verify_all_backups(datastore, worker.clone(), worker.upid())?
|
||||
};
|
||||
if failed_dirs.len() > 0 {
|
||||
worker.log("Failed to verify following snapshots:");
|
||||
@ -673,7 +681,7 @@ fn prune(
|
||||
prune_result.push(json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"keep": keep,
|
||||
}));
|
||||
}
|
||||
@ -697,7 +705,7 @@ fn prune(
|
||||
if keep_all { keep = true; }
|
||||
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
let timestamp = BackupDir::backup_time_to_string(backup_time);
|
||||
let timestamp = info.backup_dir.backup_time_string();
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
|
||||
@ -714,7 +722,7 @@ fn prune(
|
||||
prune_result.push(json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"keep": keep,
|
||||
}));
|
||||
|
||||
@ -770,7 +778,7 @@ fn start_garbage_collection(
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
datastore.garbage_collection(&worker)
|
||||
datastore.garbage_collection(&*worker, worker.upid())
|
||||
},
|
||||
)?;
|
||||
|
||||
@ -897,7 +905,7 @@ fn download_file(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -970,7 +978,7 @@ fn download_file_decoded(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -1083,7 +1091,7 @@ fn upload_backup_log(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
|
||||
@ -1097,7 +1105,7 @@ fn upload_backup_log(
|
||||
}
|
||||
|
||||
println!("Upload backup log to {}/{}/{}/{}/{}", store,
|
||||
backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
|
||||
backup_type, backup_id, backup_dir.backup_time_string(), file_name);
|
||||
|
||||
let data = req_body
|
||||
.map_err(Error::from)
|
||||
@ -1159,7 +1167,7 @@ fn catalog(
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -1276,7 +1284,7 @@ fn pxar_file_download(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -1417,7 +1425,7 @@ fn get_notes(
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -1470,7 +1478,7 @@ fn set_notes(
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -1484,6 +1492,51 @@ fn set_notes(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"new-owner": {
|
||||
type: Userid,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
|
||||
},
|
||||
)]
|
||||
/// Change owner of a backup group
|
||||
fn set_backup_owner(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
new_owner: Userid,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
if !user_info.is_active_user(&new_owner) {
|
||||
bail!("user '{}' is inactive or non-existent", new_owner);
|
||||
}
|
||||
|
||||
datastore.set_owner(&backup_group, &new_owner, true)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
@ -1491,6 +1544,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.get(&API_METHOD_CATALOG)
|
||||
),
|
||||
(
|
||||
"change-owner",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_SET_BACKUP_OWNER)
|
||||
),
|
||||
(
|
||||
"download",
|
||||
&Router::new()
|
||||
|
@ -113,8 +113,29 @@ async move {
|
||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||
}
|
||||
|
||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time);
|
||||
let last_backup = {
|
||||
let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
|
||||
if let Some(info) = info {
|
||||
let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
|
||||
let verify = manifest.unprotected["verify_state"].clone();
|
||||
match serde_json::from_value::<SnapshotVerifyState>(verify) {
|
||||
Ok(verify) => {
|
||||
match verify.state {
|
||||
VerifyState::Ok => Some(info),
|
||||
VerifyState::Failed => None,
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
// no verify state found, treat as valid
|
||||
Some(info)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?;
|
||||
|
||||
let _last_guard = if let Some(last) = &last_backup {
|
||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||
@ -159,6 +180,7 @@ async move {
|
||||
let window_size = 32*1024*1024; // max = (1 << 31) - 2
|
||||
http.http2_initial_stream_window_size(window_size);
|
||||
http.http2_initial_connection_window_size(window_size);
|
||||
http.http2_max_frame_size(4*1024*1024);
|
||||
|
||||
http.serve_connection(conn, service)
|
||||
.map_err(Error::from)
|
||||
@ -178,7 +200,7 @@ async move {
|
||||
};
|
||||
if benchmark {
|
||||
env.log("benchmark finished successfully");
|
||||
env.remove_backup()?;
|
||||
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||
return Ok(());
|
||||
}
|
||||
match (res, env.ensure_finished()) {
|
||||
@ -200,7 +222,7 @@ async move {
|
||||
(Err(err), Err(_)) => {
|
||||
env.log(format!("backup failed: {}", err));
|
||||
env.log("removing failed backup");
|
||||
env.remove_backup()?;
|
||||
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||
Err(err)
|
||||
},
|
||||
}
|
||||
@ -354,7 +376,7 @@ fn create_fixed_index(
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
bail!("cannot reuse index - no previous backup exists");
|
||||
bail!("cannot reuse index - no valid previous backup exists");
|
||||
}
|
||||
};
|
||||
|
||||
@ -669,7 +691,7 @@ fn download_previous(
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => bail!("no previous backup"),
|
||||
None => bail!("no valid previous backup"),
|
||||
};
|
||||
|
||||
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
|
||||
|
@ -66,13 +66,16 @@ struct FixedWriterState {
|
||||
incremental: bool,
|
||||
}
|
||||
|
||||
// key=digest, value=length
|
||||
type KnownChunksMap = HashMap<[u8;32], u32>;
|
||||
|
||||
struct SharedBackupState {
|
||||
finished: bool,
|
||||
uid_counter: usize,
|
||||
file_counter: usize, // successfully uploaded files
|
||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||
known_chunks: HashMap<[u8;32], u32>,
|
||||
known_chunks: KnownChunksMap,
|
||||
backup_size: u64, // sums up size of all files
|
||||
backup_stat: UploadStatistic,
|
||||
}
|
||||
|
@ -61,12 +61,15 @@ impl Future for UploadChunk {
|
||||
let (is_duplicate, compressed_size) = match proxmox::try_block! {
|
||||
let mut chunk = DataBlob::from_raw(raw_data)?;
|
||||
|
||||
tools::runtime::block_in_place(|| {
|
||||
chunk.verify_unencrypted(this.size as usize, &this.digest)?;
|
||||
|
||||
// always comput CRC at server side
|
||||
chunk.set_crc(chunk.compute_crc());
|
||||
|
||||
this.store.insert_chunk(&chunk, &this.digest)
|
||||
})
|
||||
|
||||
} {
|
||||
Ok(res) => res,
|
||||
Err(err) => break err,
|
||||
|
@ -9,8 +9,9 @@ use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
||||
use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -22,7 +23,7 @@ use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||
items: { type: datastore::DataStoreConfig },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_AUDIT, false),
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List all datastores
|
||||
@ -33,11 +34,18 @@ pub fn list_datastores(
|
||||
|
||||
let (config, digest) = datastore::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("datastore")?;
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||
let filter_by_privs = |store: &DataStoreConfig| {
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
|
||||
(user_privs & PRIV_DATASTORE_AUDIT) != 0
|
||||
};
|
||||
|
||||
Ok(list.into_iter().filter(filter_by_privs).collect())
|
||||
}
|
||||
|
||||
|
||||
@ -67,6 +75,10 @@ pub fn list_datastores(
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"verify-schedule": {
|
||||
optional: true,
|
||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
@ -94,13 +106,13 @@ pub fn list_datastores(
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_MODIFY, false),
|
||||
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_ALLOCATE, false),
|
||||
},
|
||||
)]
|
||||
/// Create new datastore config.
|
||||
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
||||
|
||||
@ -119,6 +131,10 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::create_state_file("prune", &datastore.name)?;
|
||||
crate::config::jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||
crate::config::jobstate::create_state_file("verify", &datastore.name)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -163,6 +179,8 @@ pub enum DeletableProperty {
|
||||
gc_schedule,
|
||||
/// Delete the prune job schedule.
|
||||
prune_schedule,
|
||||
/// Delete the verify schedule property
|
||||
verify_schedule,
|
||||
/// Delete the keep-last property
|
||||
keep_last,
|
||||
/// Delete the keep-hourly property
|
||||
@ -196,6 +214,10 @@ pub enum DeletableProperty {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"verify-schedule": {
|
||||
optional: true,
|
||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
@ -244,6 +266,7 @@ pub fn update_datastore(
|
||||
comment: Option<String>,
|
||||
gc_schedule: Option<String>,
|
||||
prune_schedule: Option<String>,
|
||||
verify_schedule: Option<String>,
|
||||
keep_last: Option<u64>,
|
||||
keep_hourly: Option<u64>,
|
||||
keep_daily: Option<u64>,
|
||||
@ -254,7 +277,7 @@ pub fn update_datastore(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
@ -272,6 +295,7 @@ pub fn update_datastore(
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
|
||||
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
|
||||
DeletableProperty::verify_schedule => { data.verify_schedule = None; },
|
||||
DeletableProperty::keep_last => { data.keep_last = None; },
|
||||
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
|
||||
DeletableProperty::keep_daily => { data.keep_daily = None; },
|
||||
@ -291,8 +315,23 @@ pub fn update_datastore(
|
||||
}
|
||||
}
|
||||
|
||||
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
|
||||
if prune_schedule.is_some() { data.prune_schedule = prune_schedule; }
|
||||
let mut gc_schedule_changed = false;
|
||||
if gc_schedule.is_some() {
|
||||
gc_schedule_changed = data.gc_schedule != gc_schedule;
|
||||
data.gc_schedule = gc_schedule;
|
||||
}
|
||||
|
||||
let mut prune_schedule_changed = false;
|
||||
if prune_schedule.is_some() {
|
||||
prune_schedule_changed = data.prune_schedule != prune_schedule;
|
||||
data.prune_schedule = prune_schedule;
|
||||
}
|
||||
|
||||
let mut verify_schedule_changed = false;
|
||||
if verify_schedule.is_some() {
|
||||
verify_schedule_changed = data.verify_schedule != verify_schedule;
|
||||
data.verify_schedule = verify_schedule;
|
||||
}
|
||||
|
||||
if keep_last.is_some() { data.keep_last = keep_last; }
|
||||
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
|
||||
@ -305,6 +344,20 @@ pub fn update_datastore(
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
// we want to reset the statefiles, to avoid an immediate action in some cases
|
||||
// (e.g. going from monthly to weekly in the second week of the month)
|
||||
if gc_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("garbage_collection", &name)?;
|
||||
}
|
||||
|
||||
if prune_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("prune", &name)?;
|
||||
}
|
||||
|
||||
if verify_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("verify", &name)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -322,13 +375,13 @@ pub fn update_datastore(
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_MODIFY, false),
|
||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a datastore configuration.
|
||||
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
|
||||
@ -344,6 +397,11 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
// ignore errors
|
||||
let _ = crate::config::jobstate::remove_state_file("prune", &name);
|
||||
let _ = crate::config::jobstate::remove_state_file("garbage_collection", &name);
|
||||
let _ = crate::config::jobstate::remove_state_file("verify", &name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -60,6 +60,12 @@ pub fn list_remotes(
|
||||
host: {
|
||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||
},
|
||||
port: {
|
||||
description: "The (optional) port.",
|
||||
type: u16,
|
||||
optional: true,
|
||||
default: 8007,
|
||||
},
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
@ -79,7 +85,7 @@ pub fn list_remotes(
|
||||
/// Create new remote.
|
||||
pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let mut data = param.clone();
|
||||
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
||||
@ -136,6 +142,8 @@ pub enum DeletableProperty {
|
||||
comment,
|
||||
/// Delete the fingerprint property.
|
||||
fingerprint,
|
||||
/// Delete the port property.
|
||||
port,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -153,6 +161,11 @@ pub enum DeletableProperty {
|
||||
optional: true,
|
||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||
},
|
||||
port: {
|
||||
description: "The (optional) port.",
|
||||
type: u16,
|
||||
optional: true,
|
||||
},
|
||||
userid: {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
@ -188,6 +201,7 @@ pub fn update_remote(
|
||||
name: String,
|
||||
comment: Option<String>,
|
||||
host: Option<String>,
|
||||
port: Option<u16>,
|
||||
userid: Option<Userid>,
|
||||
password: Option<String>,
|
||||
fingerprint: Option<String>,
|
||||
@ -195,7 +209,7 @@ pub fn update_remote(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = remote::config()?;
|
||||
|
||||
@ -211,6 +225,7 @@ pub fn update_remote(
|
||||
match delete_prop {
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
DeletableProperty::fingerprint => { data.fingerprint = None; },
|
||||
DeletableProperty::port => { data.port = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -224,6 +239,7 @@ pub fn update_remote(
|
||||
}
|
||||
}
|
||||
if let Some(host) = host { data.host = host; }
|
||||
if port.is_some() { data.port = port; }
|
||||
if let Some(userid) = userid { data.userid = userid; }
|
||||
if let Some(password) = password { data.password = password; }
|
||||
|
||||
@ -256,7 +272,7 @@ pub fn update_remote(
|
||||
/// Remove a remote from the configuration file.
|
||||
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = remote::config()?;
|
||||
|
||||
|
@ -69,7 +69,7 @@ pub fn list_sync_jobs(
|
||||
/// Create a new sync job.
|
||||
pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
||||
|
||||
@ -187,7 +187,7 @@ pub fn update_sync_job(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = sync::config()?;
|
||||
@ -250,7 +250,7 @@ pub fn update_sync_job(
|
||||
/// Remove a sync job configuration
|
||||
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = sync::config()?;
|
||||
|
||||
|
@ -25,6 +25,8 @@ use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
use crate::tools::systemd;
|
||||
|
||||
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
|
||||
.schema();
|
||||
@ -355,6 +357,11 @@ pub fn create_zpool(
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
worker.log(output);
|
||||
|
||||
if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() {
|
||||
let import_unit = format!("zfs-import@{}.service", systemd::escape_unit(&name, false));
|
||||
systemd::enable_unit(&import_unit)?;
|
||||
}
|
||||
|
||||
if let Some(compression) = compression {
|
||||
let mut command = std::process::Command::new("zfs");
|
||||
command.args(&["set", &format!("compression={}", compression), &name]);
|
||||
|
@ -198,6 +198,14 @@ pub fn read_interface(iface: String) -> Result<Value, Error> {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
"bond-primary": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_xmit_hash_policy: {
|
||||
type: BondXmitHashPolicy,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
@ -224,6 +232,8 @@ pub fn create_interface(
|
||||
bridge_ports: Option<String>,
|
||||
bridge_vlan_aware: Option<bool>,
|
||||
bond_mode: Option<LinuxBondMode>,
|
||||
bond_primary: Option<String>,
|
||||
bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||
slaves: Option<String>,
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
@ -231,7 +241,7 @@ pub fn create_interface(
|
||||
let interface_type = crate::tools::required_string_param(¶m, "type")?;
|
||||
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
|
||||
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, _digest) = network::config()?;
|
||||
|
||||
@ -284,7 +294,23 @@ pub fn create_interface(
|
||||
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
|
||||
}
|
||||
NetworkInterfaceType::Bond => {
|
||||
if bond_mode.is_some() { interface.bond_mode = bond_mode; }
|
||||
if let Some(mode) = bond_mode {
|
||||
interface.bond_mode = bond_mode;
|
||||
if bond_primary.is_some() {
|
||||
if mode != LinuxBondMode::active_backup {
|
||||
bail!("bond-primary is only valid with Active/Backup mode");
|
||||
}
|
||||
interface.bond_primary = bond_primary;
|
||||
}
|
||||
if bond_xmit_hash_policy.is_some() {
|
||||
if mode != LinuxBondMode::ieee802_3ad &&
|
||||
mode != LinuxBondMode::balance_xor
|
||||
{
|
||||
bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
|
||||
}
|
||||
interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
|
||||
}
|
||||
}
|
||||
if let Some(slaves) = slaves {
|
||||
let slaves = split_interface_list(&slaves)?;
|
||||
interface.set_bond_slaves(slaves)?;
|
||||
@ -343,6 +369,11 @@ pub enum DeletableProperty {
|
||||
bridge_vlan_aware,
|
||||
/// Delete bond-slaves (set to 'none')
|
||||
slaves,
|
||||
/// Delete bond-primary
|
||||
#[serde(rename = "bond-primary")]
|
||||
bond_primary,
|
||||
/// Delete bond transmit hash policy
|
||||
bond_xmit_hash_policy,
|
||||
}
|
||||
|
||||
|
||||
@ -420,6 +451,14 @@ pub enum DeletableProperty {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
"bond-primary": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_xmit_hash_policy: {
|
||||
type: BondXmitHashPolicy,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
@ -458,13 +497,15 @@ pub fn update_interface(
|
||||
bridge_ports: Option<String>,
|
||||
bridge_vlan_aware: Option<bool>,
|
||||
bond_mode: Option<LinuxBondMode>,
|
||||
bond_primary: Option<String>,
|
||||
bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||
slaves: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = network::config()?;
|
||||
|
||||
@ -501,6 +542,8 @@ pub fn update_interface(
|
||||
DeletableProperty::bridge_ports => { interface.set_bridge_ports(Vec::new())?; }
|
||||
DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; }
|
||||
DeletableProperty::slaves => { interface.set_bond_slaves(Vec::new())?; }
|
||||
DeletableProperty::bond_primary => { interface.bond_primary = None; }
|
||||
DeletableProperty::bond_xmit_hash_policy => { interface.bond_xmit_hash_policy = None }
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -518,7 +561,23 @@ pub fn update_interface(
|
||||
let slaves = split_interface_list(&slaves)?;
|
||||
interface.set_bond_slaves(slaves)?;
|
||||
}
|
||||
if bond_mode.is_some() { interface.bond_mode = bond_mode; }
|
||||
if let Some(mode) = bond_mode {
|
||||
interface.bond_mode = bond_mode;
|
||||
if bond_primary.is_some() {
|
||||
if mode != LinuxBondMode::active_backup {
|
||||
bail!("bond-primary is only valid with Active/Backup mode");
|
||||
}
|
||||
interface.bond_primary = bond_primary;
|
||||
}
|
||||
if bond_xmit_hash_policy.is_some() {
|
||||
if mode != LinuxBondMode::ieee802_3ad &&
|
||||
mode != LinuxBondMode::balance_xor
|
||||
{
|
||||
bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
|
||||
}
|
||||
interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(cidr) = cidr {
|
||||
let (_, _, is_v6) = network::parse_cidr(&cidr)?;
|
||||
@ -587,7 +646,7 @@ pub fn update_interface(
|
||||
/// Remove network interface configuration.
|
||||
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||
|
||||
let (mut config, expected_digest) = network::config()?;
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::{Value, json};
|
||||
|
||||
use proxmox::api::{api, Router};
|
||||
use proxmox::api::{api, Permission, Router};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
use crate::rrd::{extract_cached_data, RRD_DATA_ENTRIES};
|
||||
|
||||
pub fn create_value_from_rrd(
|
||||
@ -15,7 +15,7 @@ pub fn create_value_from_rrd(
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut result = Vec::new();
|
||||
let now = epoch_now_f64()?;
|
||||
let now = proxmox::tools::time::epoch_f64();
|
||||
|
||||
for name in list {
|
||||
let (start, reso, list) = match extract_cached_data(basedir, name, now, timeframe, cf) {
|
||||
@ -57,6 +57,9 @@ pub fn create_value_from_rrd(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read node stats
|
||||
fn get_node_stats(
|
||||
|
@ -1,11 +1,12 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
use crate::api2::types::NODE_SCHEMA;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::types::{NODE_SCHEMA, Userid};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -28,7 +29,7 @@ use crate::api2::types::NODE_SCHEMA;
|
||||
},
|
||||
serverid: {
|
||||
type: String,
|
||||
description: "The unique server ID.",
|
||||
description: "The unique server ID, if permitted to access.",
|
||||
},
|
||||
url: {
|
||||
type: String,
|
||||
@ -37,17 +38,28 @@ use crate::api2::types::NODE_SCHEMA;
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Read subscription info.
|
||||
fn get_subscription(_param: Value) -> Result<Value, Error> {
|
||||
fn get_subscription(
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &[]);
|
||||
let server_id = if (user_privs & PRIV_SYS_AUDIT) != 0 {
|
||||
tools::get_hardware_address()?
|
||||
} else {
|
||||
"hidden".to_string()
|
||||
};
|
||||
|
||||
let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing";
|
||||
Ok(json!({
|
||||
"status": "NotFound",
|
||||
"message": "There is no subscription key",
|
||||
"serverid": tools::get_hardware_address()?,
|
||||
"serverid": server_id,
|
||||
"url": url,
|
||||
}))
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ use proxmox::{identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::tools;
|
||||
use crate::api2::types::*;
|
||||
use crate::server::{self, UPID, TaskState};
|
||||
use crate::server::{self, UPID, TaskState, TaskListInfoIterator};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
@ -303,6 +303,7 @@ pub fn list_tasks(
|
||||
limit: u64,
|
||||
errors: bool,
|
||||
running: bool,
|
||||
userfilter: Option<String>,
|
||||
param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
@ -315,57 +316,55 @@ pub fn list_tasks(
|
||||
|
||||
let store = param["store"].as_str();
|
||||
|
||||
let userfilter = param["userfilter"].as_str();
|
||||
let list = TaskListInfoIterator::new(running)?;
|
||||
|
||||
let list = server::read_task_list()?;
|
||||
let result: Vec<TaskListItem> = list
|
||||
.take_while(|info| !info.is_err())
|
||||
.filter_map(|info| {
|
||||
let info = match info {
|
||||
Ok(info) => info,
|
||||
Err(_) => return None,
|
||||
};
|
||||
|
||||
let mut result = vec![];
|
||||
if !list_all && info.upid.userid != userid { return None; }
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
for info in list {
|
||||
if !list_all && info.upid.userid != userid { continue; }
|
||||
|
||||
|
||||
if let Some(userid) = userfilter {
|
||||
if !info.upid.userid.as_str().contains(userid) { continue; }
|
||||
if let Some(userid) = &userfilter {
|
||||
if !info.upid.userid.as_str().contains(userid) { return None; }
|
||||
}
|
||||
|
||||
if let Some(store) = store {
|
||||
// Note: useful to select all tasks spawned by proxmox-backup-client
|
||||
let worker_id = match &info.upid.worker_id {
|
||||
Some(w) => w,
|
||||
None => continue, // skip
|
||||
None => return None, // skip
|
||||
};
|
||||
|
||||
if info.upid.worker_type == "backup" || info.upid.worker_type == "restore" ||
|
||||
info.upid.worker_type == "prune"
|
||||
{
|
||||
let prefix = format!("{}_", store);
|
||||
if !worker_id.starts_with(&prefix) { continue; }
|
||||
if !worker_id.starts_with(&prefix) { return None; }
|
||||
} else if info.upid.worker_type == "garbage_collection" {
|
||||
if worker_id != store { continue; }
|
||||
if worker_id != store { return None; }
|
||||
} else {
|
||||
continue; // skip
|
||||
return None; // skip
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref state) = info.state {
|
||||
if running { continue; }
|
||||
match state {
|
||||
crate::server::TaskState::OK { .. } if errors => continue,
|
||||
match info.state {
|
||||
Some(_) if running => return None,
|
||||
Some(crate::server::TaskState::OK { .. }) if errors => return None,
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
|
||||
if (count as u64) < start {
|
||||
count += 1;
|
||||
continue;
|
||||
} else {
|
||||
count += 1;
|
||||
}
|
||||
Some(info.into())
|
||||
}).skip(start as usize)
|
||||
.take(limit as usize)
|
||||
.collect();
|
||||
|
||||
if (result.len() as u64) < limit { result.push(info.into()); };
|
||||
let mut count = result.len() + start as usize;
|
||||
if result.len() > 0 && result.len() >= limit as usize { // we have a 'virtual' entry as long as we have any new
|
||||
count += 1;
|
||||
}
|
||||
|
||||
rpcenv["total"] = Value::from(count);
|
||||
|
@ -1,4 +1,3 @@
|
||||
use chrono::prelude::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
@ -57,10 +56,11 @@ fn read_etc_localtime() -> Result<String, Error> {
|
||||
)]
|
||||
/// Read server time and time zone settings.
|
||||
fn get_time(_param: Value) -> Result<Value, Error> {
|
||||
let datetime = Local::now();
|
||||
let offset = datetime.offset();
|
||||
let time = datetime.timestamp();
|
||||
let localtime = time + (offset.fix().local_minus_utc() as i64);
|
||||
let time = proxmox::tools::time::epoch_i64();
|
||||
let tm = proxmox::tools::time::localtime(time)?;
|
||||
let offset = tm.tm_gmtoff;
|
||||
|
||||
let localtime = time + offset;
|
||||
|
||||
Ok(json!({
|
||||
"timezone": read_etc_localtime()?,
|
||||
|
@ -55,12 +55,13 @@ pub async fn get_pull_parameters(
|
||||
.password(Some(remote.password.clone()))
|
||||
.fingerprint(remote.fingerprint.clone());
|
||||
|
||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||
let src_repo = BackupRepository::new(Some(remote.userid.clone()), Some(remote.host.clone()), remote.port, remote_store.to_string());
|
||||
|
||||
let client = HttpClient::new(&src_repo.host(), src_repo.port(), &src_repo.user(), options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
|
||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store.to_string());
|
||||
|
||||
Ok((client, src_repo, tgt_store))
|
||||
}
|
||||
@ -176,7 +177,13 @@ async fn pull (
|
||||
|
||||
worker.log(format!("sync datastore '{}' start", store));
|
||||
|
||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid).await?;
|
||||
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid);
|
||||
let future = select!{
|
||||
success = pull_future.fuse() => success,
|
||||
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
|
||||
};
|
||||
|
||||
let _ = future?;
|
||||
|
||||
worker.log(format!("sync datastore '{}' end", store));
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
//use chrono::{Local, TimeZone};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::header::{self, HeaderValue, UPGRADE};
|
||||
@ -15,7 +14,7 @@ use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::server::{WorkerTask, H2Service};
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_DATASTORE_READ;
|
||||
use crate::config::acl::{PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::helpers;
|
||||
|
||||
@ -59,7 +58,15 @@ fn upgrade_to_backup_reader_protocol(
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
|
||||
let privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let priv_read = privs & PRIV_DATASTORE_READ != 0;
|
||||
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
|
||||
|
||||
// priv_backup needs owner check further down below!
|
||||
if !priv_read && !priv_backup {
|
||||
bail!("no permissions on /datastore/{}", store);
|
||||
}
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -83,12 +90,19 @@ fn upgrade_to_backup_reader_protocol(
|
||||
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
if !priv_read {
|
||||
let owner = datastore.get_owner(backup_dir.group())?;
|
||||
if owner != userid {
|
||||
bail!("backup owner check failed!");
|
||||
}
|
||||
}
|
||||
|
||||
let path = datastore.base_path();
|
||||
|
||||
//let files = BackupInfo::list_files(&path, &backup_dir)?;
|
||||
|
||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time().timestamp());
|
||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
||||
|
||||
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
|
||||
let mut env = ReaderEnvironment::new(
|
||||
@ -121,6 +135,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||
let window_size = 32*1024*1024; // max = (1 << 31) - 2
|
||||
http.http2_initial_stream_window_size(window_size);
|
||||
http.http2_initial_connection_window_size(window_size);
|
||||
http.http2_max_frame_size(4*1024*1024);
|
||||
|
||||
http.serve_connection(conn, service)
|
||||
.map_err(Error::from)
|
||||
@ -195,6 +210,27 @@ fn download_file(
|
||||
|
||||
env.log(format!("download {:?}", path.clone()));
|
||||
|
||||
let index: Option<Box<dyn IndexFile + Send>> = match archive_type(&file_name)? {
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = env.datastore.open_fixed_reader(&path)?;
|
||||
Some(Box::new(index))
|
||||
}
|
||||
ArchiveType::DynamicIndex => {
|
||||
let index = env.datastore.open_dynamic_reader(&path)?;
|
||||
Some(Box::new(index))
|
||||
}
|
||||
_ => { None }
|
||||
};
|
||||
|
||||
if let Some(index) = index {
|
||||
env.log(format!("register chunks in '{}' as downloadable.", file_name));
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
env.register_chunk(info.digest);
|
||||
}
|
||||
}
|
||||
|
||||
helpers::create_download_response(path).await
|
||||
}.boxed()
|
||||
}
|
||||
@ -224,13 +260,17 @@ fn download_chunk(
|
||||
let digest_str = tools::required_string_param(¶m, "digest")?;
|
||||
let digest = proxmox::tools::hex_to_digest(digest_str)?;
|
||||
|
||||
if !env.check_chunk_access(digest) {
|
||||
env.log(format!("attempted to download chunk {} which is not in registered chunk list", digest_str));
|
||||
return Err(http_err!(UNAUTHORIZED, "download chunk {} not allowed", digest_str));
|
||||
}
|
||||
|
||||
let (path, _) = env.datastore.chunk_path(&digest);
|
||||
let path2 = path.clone();
|
||||
|
||||
env.debug(format!("download chunk {:?}", path));
|
||||
|
||||
let data = tokio::fs::read(path)
|
||||
.await
|
||||
let data = tools::runtime::block_in_place(|| std::fs::read(path))
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err))?;
|
||||
|
||||
let body = Body::from(data);
|
||||
@ -287,7 +327,7 @@ fn download_chunk_old(
|
||||
|
||||
pub const API_METHOD_SPEEDTEST: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&speedtest),
|
||||
&ObjectSchema::new("Test 4M block download speed.", &[])
|
||||
&ObjectSchema::new("Test 1M block download speed.", &[])
|
||||
);
|
||||
|
||||
fn speedtest(
|
||||
|
@ -1,5 +1,5 @@
|
||||
//use anyhow::{bail, format_err, Error};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc,RwLock};
|
||||
use std::collections::HashSet;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
@ -23,7 +23,7 @@ pub struct ReaderEnvironment {
|
||||
pub worker: Arc<WorkerTask>,
|
||||
pub datastore: Arc<DataStore>,
|
||||
pub backup_dir: BackupDir,
|
||||
// state: Arc<Mutex<SharedBackupState>>
|
||||
allowed_chunks: Arc<RwLock<HashSet<[u8;32]>>>,
|
||||
}
|
||||
|
||||
impl ReaderEnvironment {
|
||||
@ -45,7 +45,7 @@ impl ReaderEnvironment {
|
||||
debug: false,
|
||||
formatter: &JSON_FORMATTER,
|
||||
backup_dir,
|
||||
//state: Arc::new(Mutex::new(state)),
|
||||
allowed_chunks: Arc::new(RwLock::new(HashSet::new())),
|
||||
}
|
||||
}
|
||||
|
||||
@ -57,6 +57,15 @@ impl ReaderEnvironment {
|
||||
if self.debug { self.worker.log(msg); }
|
||||
}
|
||||
|
||||
|
||||
pub fn register_chunk(&self, digest: [u8;32]) {
|
||||
let mut allowed_chunks = self.allowed_chunks.write().unwrap();
|
||||
allowed_chunks.insert(digest);
|
||||
}
|
||||
|
||||
pub fn check_chunk_access(&self, digest: [u8;32]) -> bool {
|
||||
self.allowed_chunks.read().unwrap().contains(&digest)
|
||||
}
|
||||
}
|
||||
|
||||
impl RpcEnvironment for ReaderEnvironment {
|
||||
|
@ -17,13 +17,13 @@ use crate::api2::types::{
|
||||
RRDMode,
|
||||
RRDTimeFrameResolution,
|
||||
TaskListItem,
|
||||
TaskStateType,
|
||||
Userid,
|
||||
};
|
||||
|
||||
use crate::server;
|
||||
use crate::backup::{DataStore};
|
||||
use crate::config::datastore;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::tools::statistics::{linear_regression};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::{
|
||||
@ -110,7 +110,7 @@ fn datastore_status(
|
||||
});
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
let now = epoch_now_f64()?;
|
||||
let now = proxmox::tools::time::epoch_f64();
|
||||
let rrd_resolution = RRDTimeFrameResolution::Month;
|
||||
let rrd_mode = RRDMode::Average;
|
||||
|
||||
@ -183,10 +183,23 @@ fn datastore_status(
|
||||
input: {
|
||||
properties: {
|
||||
since: {
|
||||
type: u64,
|
||||
type: i64,
|
||||
description: "Only list tasks since this UNIX epoch.",
|
||||
optional: true,
|
||||
},
|
||||
typefilter: {
|
||||
optional: true,
|
||||
type: String,
|
||||
description: "Only list tasks, whose type contains this string.",
|
||||
},
|
||||
statusfilter: {
|
||||
optional: true,
|
||||
type: Array,
|
||||
description: "Only list tasks which have any one of the listed status.",
|
||||
items: {
|
||||
type: TaskStateType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
@ -201,6 +214,9 @@ fn datastore_status(
|
||||
)]
|
||||
/// List tasks.
|
||||
pub fn list_tasks(
|
||||
since: Option<i64>,
|
||||
typefilter: Option<String>,
|
||||
statusfilter: Option<Vec<TaskStateType>>,
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
@ -210,13 +226,49 @@ pub fn list_tasks(
|
||||
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
let since = since.unwrap_or_else(|| 0);
|
||||
|
||||
// TODO: replace with call that gets all task since 'since' epoch
|
||||
let list: Vec<TaskListItem> = server::read_task_list()?
|
||||
.into_iter()
|
||||
.map(TaskListItem::from)
|
||||
.filter(|entry| list_all || entry.user == userid)
|
||||
.collect();
|
||||
let list: Vec<TaskListItem> = server::TaskListInfoIterator::new(false)?
|
||||
.take_while(|info| {
|
||||
match info {
|
||||
Ok(info) => info.upid.starttime > since,
|
||||
Err(_) => false
|
||||
}
|
||||
})
|
||||
.filter_map(|info| {
|
||||
match info {
|
||||
Ok(info) => {
|
||||
if list_all || info.upid.userid == userid {
|
||||
if let Some(filter) = &typefilter {
|
||||
if !info.upid.worker_type.contains(filter) {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(filters) = &statusfilter {
|
||||
if let Some(state) = &info.state {
|
||||
let statetype = match state {
|
||||
server::TaskState::OK { .. } => TaskStateType::OK,
|
||||
server::TaskState::Unknown { .. } => TaskStateType::Unknown,
|
||||
server::TaskState::Error { .. } => TaskStateType::Error,
|
||||
server::TaskState::Warning { .. } => TaskStateType::Warning,
|
||||
};
|
||||
|
||||
if !filters.contains(&statetype) {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(Ok(TaskListItem::from(info)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
Err(err) => Some(Err(err))
|
||||
}
|
||||
})
|
||||
.collect::<Result<Vec<TaskListItem>, Error>>()?;
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, schema::*};
|
||||
use proxmox::const_regex;
|
||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||
use proxmox::{IPRE, IPRE_BRACKET, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||
|
||||
use crate::backup::CryptMode;
|
||||
use crate::server::UPID;
|
||||
@ -30,7 +30,7 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
});
|
||||
|
||||
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||
macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!())) }
|
||||
macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
|
||||
|
||||
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||
@ -63,9 +63,9 @@ const_regex!{
|
||||
|
||||
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
|
||||
|
||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||
|
||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||
|
||||
@ -302,6 +302,11 @@ pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const VERIFY_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run verify job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
@ -380,13 +385,24 @@ pub struct GroupListItem {
|
||||
pub owner: Option<Userid>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Result of a verify operation.
|
||||
pub enum VerifyState {
|
||||
/// Verification was successful
|
||||
Ok,
|
||||
/// Verification reported one or more errors
|
||||
Failed,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: {
|
||||
schema: UPID_SCHEMA
|
||||
},
|
||||
state: {
|
||||
type: String
|
||||
type: VerifyState
|
||||
},
|
||||
},
|
||||
)]
|
||||
@ -395,8 +411,8 @@ pub struct GroupListItem {
|
||||
pub struct SnapshotVerifyState {
|
||||
/// UPID of the verify task
|
||||
pub upid: UPID,
|
||||
/// State of the verification. "failed" or "ok"
|
||||
pub state: String,
|
||||
/// State of the verification. Enum.
|
||||
pub state: VerifyState,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -646,6 +662,20 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum TaskStateType {
|
||||
/// Ok
|
||||
OK,
|
||||
/// Warning
|
||||
Warning,
|
||||
/// Error
|
||||
Error,
|
||||
/// Unknown
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
@ -688,7 +718,7 @@ pub enum LinuxBondMode {
|
||||
/// Broadcast policy
|
||||
broadcast = 3,
|
||||
/// IEEE 802.3ad Dynamic link aggregation
|
||||
//#[serde(rename = "802.3ad")]
|
||||
#[serde(rename = "802.3ad")]
|
||||
ieee802_3ad = 4,
|
||||
/// Adaptive transmit load balancing
|
||||
balance_tlb = 5,
|
||||
@ -696,6 +726,23 @@ pub enum LinuxBondMode {
|
||||
balance_alb = 6,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[allow(non_camel_case_types)]
|
||||
#[repr(u8)]
|
||||
/// Bond Transmit Hash Policy for LACP (802.3ad)
|
||||
pub enum BondXmitHashPolicy {
|
||||
/// Layer 2
|
||||
layer2 = 0,
|
||||
/// Layer 2+3
|
||||
#[serde(rename = "layer2+3")]
|
||||
layer2_3 = 1,
|
||||
/// Layer 3+4
|
||||
#[serde(rename = "layer3+4")]
|
||||
layer3_4 = 2,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
@ -801,7 +848,15 @@ pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
"bond-primary": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_xmit_hash_policy: {
|
||||
type: BondXmitHashPolicy,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@ -858,6 +913,10 @@ pub struct Interface {
|
||||
pub slaves: Option<Vec<String>>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub bond_mode: Option<LinuxBondMode>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
#[serde(rename = "bond-primary")]
|
||||
pub bond_primary: Option<String>,
|
||||
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||
}
|
||||
|
||||
// Regression tests
|
||||
|
@ -131,13 +131,13 @@ impl std::ops::Deref for Username {
|
||||
|
||||
impl Borrow<UsernameRef> for Username {
|
||||
fn borrow(&self) -> &UsernameRef {
|
||||
UsernameRef::new(self.as_str())
|
||||
UsernameRef::new(self.0.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<UsernameRef> for Username {
|
||||
fn as_ref(&self) -> &UsernameRef {
|
||||
UsernameRef::new(self.as_str())
|
||||
self.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
@ -204,13 +204,13 @@ impl std::ops::Deref for Realm {
|
||||
|
||||
impl Borrow<RealmRef> for Realm {
|
||||
fn borrow(&self) -> &RealmRef {
|
||||
RealmRef::new(self.as_str())
|
||||
RealmRef::new(self.0.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<RealmRef> for Realm {
|
||||
fn as_ref(&self) -> &RealmRef {
|
||||
RealmRef::new(self.as_str())
|
||||
self.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
@ -397,10 +397,7 @@ impl TryFrom<String> for Userid {
|
||||
|
||||
impl PartialEq<str> for Userid {
|
||||
fn eq(&self, rhs: &str) -> bool {
|
||||
rhs.len() > self.name_len + 2 // make sure range access below is allowed
|
||||
&& rhs.starts_with(self.name().as_str())
|
||||
&& rhs.as_bytes()[self.name_len] == b'@'
|
||||
&& &rhs[(self.name_len + 1)..] == self.realm().as_str()
|
||||
self.data == *rhs
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,6 @@ use std::process::{Command, Stdio};
|
||||
use std::io::Write;
|
||||
use std::ffi::{CString, CStr};
|
||||
|
||||
use base64;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::json;
|
||||
|
||||
@ -25,8 +24,7 @@ impl ProxmoxAuthenticator for PAM {
|
||||
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
||||
auth.get_handler().set_credentials(username.as_str(), password);
|
||||
auth.authenticate()?;
|
||||
return Ok(());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||
@ -99,7 +97,7 @@ pub fn encrypt_pw(password: &str) -> Result<String, Error> {
|
||||
|
||||
pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error> {
|
||||
let verify = crypt(password.as_bytes(), enc_password)?;
|
||||
if &verify != enc_password {
|
||||
if verify != enc_password {
|
||||
bail!("invalid credentials");
|
||||
}
|
||||
Ok(())
|
||||
|
@ -11,7 +11,6 @@ use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn compute_csrf_secret_digest(
|
||||
timestamp: i64,
|
||||
@ -32,7 +31,7 @@ pub fn assemble_csrf_prevention_token(
|
||||
userid: &Userid,
|
||||
) -> String {
|
||||
|
||||
let epoch = epoch_now_u64().unwrap() as i64;
|
||||
let epoch = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, userid);
|
||||
|
||||
@ -69,7 +68,7 @@ pub fn verify_csrf_prevention_token(
|
||||
bail!("invalid signature.");
|
||||
}
|
||||
|
||||
let now = epoch_now_u64()? as i64;
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let age = now - ttime;
|
||||
if age < min_age {
|
||||
|
@ -4,8 +4,6 @@ use anyhow::{bail, format_err, Error};
|
||||
use regex::Regex;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
use chrono::{DateTime, TimeZone, SecondsFormat, Utc};
|
||||
|
||||
use std::path::{PathBuf, Path};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
@ -105,8 +103,7 @@ impl BackupGroup {
|
||||
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp());
|
||||
let backup_dir = BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
|
||||
let files = list_backup_files(l2_fd, backup_time)?;
|
||||
|
||||
list.push(BackupInfo { backup_dir, files });
|
||||
@ -116,7 +113,7 @@ impl BackupGroup {
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<DateTime<Utc>>, Error> {
|
||||
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
|
||||
|
||||
let mut last = None;
|
||||
|
||||
@ -142,11 +139,11 @@ impl BackupGroup {
|
||||
}
|
||||
}
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
if let Some(last_dt) = last {
|
||||
if dt > last_dt { last = Some(dt); }
|
||||
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
||||
if let Some(last_timestamp) = last {
|
||||
if timestamp > last_timestamp { last = Some(timestamp); }
|
||||
} else {
|
||||
last = Some(dt);
|
||||
last = Some(timestamp);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -203,45 +200,63 @@ pub struct BackupDir {
|
||||
/// Backup group
|
||||
group: BackupGroup,
|
||||
/// Backup timestamp
|
||||
backup_time: DateTime<Utc>,
|
||||
backup_time: i64,
|
||||
// backup_time as rfc3339
|
||||
backup_time_string: String
|
||||
}
|
||||
|
||||
impl BackupDir {
|
||||
|
||||
pub fn new<T, U>(backup_type: T, backup_id: U, timestamp: i64) -> Self
|
||||
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
||||
where
|
||||
T: Into<String>,
|
||||
U: Into<String>,
|
||||
{
|
||||
// Note: makes sure that nanoseconds is 0
|
||||
Self {
|
||||
group: BackupGroup::new(backup_type.into(), backup_id.into()),
|
||||
backup_time: Utc.timestamp(timestamp, 0),
|
||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||
BackupDir::with_group(group, backup_time)
|
||||
}
|
||||
|
||||
pub fn with_rfc3339<T,U,V>(backup_type: T, backup_id: U, backup_time_string: V) -> Result<Self, Error>
|
||||
where
|
||||
T: Into<String>,
|
||||
U: Into<String>,
|
||||
V: Into<String>,
|
||||
{
|
||||
let backup_time_string = backup_time_string.into();
|
||||
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||
Ok(Self { group, backup_time, backup_time_string })
|
||||
}
|
||||
pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Self {
|
||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||
|
||||
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
||||
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
||||
Ok(Self { group, backup_time, backup_time_string })
|
||||
}
|
||||
|
||||
pub fn group(&self) -> &BackupGroup {
|
||||
&self.group
|
||||
}
|
||||
|
||||
pub fn backup_time(&self) -> DateTime<Utc> {
|
||||
pub fn backup_time(&self) -> i64 {
|
||||
self.backup_time
|
||||
}
|
||||
|
||||
pub fn backup_time_string(&self) -> &str {
|
||||
&self.backup_time_string
|
||||
}
|
||||
|
||||
pub fn relative_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = self.group.group_path();
|
||||
|
||||
relative_path.push(Self::backup_time_to_string(self.backup_time));
|
||||
relative_path.push(self.backup_time_string.clone());
|
||||
|
||||
relative_path
|
||||
}
|
||||
|
||||
pub fn backup_time_to_string(backup_time: DateTime<Utc>) -> String {
|
||||
backup_time.to_rfc3339_opts(SecondsFormat::Secs, true)
|
||||
pub fn backup_time_to_string(backup_time: i64) -> Result<String, Error> {
|
||||
// fixme: can this fail? (avoid unwrap)
|
||||
proxmox::tools::time::epoch_to_rfc3339_utc(backup_time)
|
||||
}
|
||||
}
|
||||
|
||||
@ -255,9 +270,11 @@ impl std::str::FromStr for BackupDir {
|
||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
BackupDir::with_rfc3339(
|
||||
cap.get(1).unwrap().as_str(),
|
||||
cap.get(2).unwrap().as_str(),
|
||||
cap.get(3).unwrap().as_str(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -265,14 +282,7 @@ impl std::fmt::Display for BackupDir {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.group.backup_type();
|
||||
let id = self.group.backup_id();
|
||||
let time = Self::backup_time_to_string(self.backup_time);
|
||||
write!(f, "{}/{}/{}", backup_type, id, time)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(BackupGroup, i64)> for BackupDir {
|
||||
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
|
||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||
write!(f, "{}/{}/{}", backup_type, id, self.backup_time_string)
|
||||
}
|
||||
}
|
||||
|
||||
@ -330,13 +340,12 @@ impl BackupInfo {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |l1_fd, backup_id, file_type| {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
||||
tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |l2_fd, backup_time_string, file_type| {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp());
|
||||
let backup_dir = BackupDir::with_rfc3339(backup_type, backup_id, backup_time_string)?;
|
||||
|
||||
let files = list_backup_files(l2_fd, backup_time)?;
|
||||
let files = list_backup_files(l2_fd, backup_time_string)?;
|
||||
|
||||
list.push(BackupInfo { backup_dir, files });
|
||||
|
||||
|
@ -5,7 +5,6 @@ use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::offset::{TimeZone, Local};
|
||||
|
||||
use pathpatterns::{MatchList, MatchType};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
@ -533,17 +532,17 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
self.dump_dir(&path, pos)?;
|
||||
}
|
||||
CatalogEntryType::File => {
|
||||
let dt = Local
|
||||
.timestamp_opt(mtime as i64, 0)
|
||||
.single() // chrono docs say timestamp_opt can only be None or Single!
|
||||
.unwrap_or_else(|| Local.timestamp(0, 0));
|
||||
let mut mtime_string = mtime.to_string();
|
||||
if let Ok(s) = proxmox::tools::time::strftime_local("%FT%TZ", mtime as i64) {
|
||||
mtime_string = s;
|
||||
}
|
||||
|
||||
println!(
|
||||
"{} {:?} {} {}",
|
||||
etype,
|
||||
path,
|
||||
size,
|
||||
dt.to_rfc3339_opts(chrono::SecondsFormat::Secs, false),
|
||||
mtime_string,
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
|
@ -11,7 +11,7 @@ use crate::tools;
|
||||
use crate::api2::types::GarbageCollectionStatus;
|
||||
|
||||
use super::DataBlob;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::task::TaskState;
|
||||
|
||||
/// File system based chunk store
|
||||
pub struct ChunkStore {
|
||||
@ -104,12 +104,11 @@ impl ChunkStore {
|
||||
}
|
||||
let percentage = (i*100)/(64*1024);
|
||||
if percentage != last_percentage {
|
||||
eprintln!("{}%", percentage);
|
||||
// eprintln!("ChunkStore::create {}%", percentage);
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Self::open(name, base)
|
||||
}
|
||||
|
||||
@ -279,7 +278,7 @@ impl ChunkStore {
|
||||
oldest_writer: i64,
|
||||
phase1_start_time: i64,
|
||||
status: &mut GarbageCollectionStatus,
|
||||
worker: &WorkerTask,
|
||||
worker: &dyn TaskState,
|
||||
) -> Result<(), Error> {
|
||||
use nix::sys::stat::fstatat;
|
||||
use nix::unistd::{unlinkat, UnlinkatFlags};
|
||||
@ -298,10 +297,15 @@ impl ChunkStore {
|
||||
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
||||
if last_percentage != percentage {
|
||||
last_percentage = percentage;
|
||||
worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
|
||||
crate::task_log!(
|
||||
worker,
|
||||
"percentage done: phase2 {}% (processed {} chunks)",
|
||||
percentage,
|
||||
chunk_count,
|
||||
);
|
||||
}
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
worker.check_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
|
||||
let (dirfd, entry) = match entry {
|
||||
@ -335,12 +339,13 @@ impl ChunkStore {
|
||||
Ok(_) => {
|
||||
match unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
|
||||
Err(err) =>
|
||||
worker.warn(format!(
|
||||
crate::task_warn!(
|
||||
worker,
|
||||
"unlinking corrupt chunk {:?} failed on store '{}' - {}",
|
||||
filename,
|
||||
self.name,
|
||||
err,
|
||||
)),
|
||||
),
|
||||
Ok(_) => {
|
||||
status.removed_bad += 1;
|
||||
status.removed_bytes += stat.st_size as u64;
|
||||
@ -352,11 +357,12 @@ impl ChunkStore {
|
||||
},
|
||||
Err(err) => {
|
||||
// some other error, warn user and keep .bad file around too
|
||||
worker.warn(format!(
|
||||
crate::task_warn!(
|
||||
worker,
|
||||
"error during stat on '{:?}' - {}",
|
||||
orig_filename,
|
||||
err,
|
||||
));
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if stat.st_atime < min_atime {
|
||||
|
@ -10,7 +10,6 @@
|
||||
use std::io::Write;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
@ -216,10 +215,10 @@ impl CryptConfig {
|
||||
pub fn generate_rsa_encoded_key(
|
||||
&self,
|
||||
rsa: openssl::rsa::Rsa<openssl::pkey::Public>,
|
||||
created: DateTime<Local>,
|
||||
created: i64,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let modified = proxmox::tools::time::epoch_i64();
|
||||
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
|
||||
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
|
||||
|
||||
|
@ -72,7 +72,7 @@ impl DataBlob {
|
||||
}
|
||||
|
||||
// verify the CRC32 checksum
|
||||
fn verify_crc(&self) -> Result<(), Error> {
|
||||
pub fn verify_crc(&self) -> Result<(), Error> {
|
||||
let expected_crc = self.compute_crc();
|
||||
if expected_crc != self.crc() {
|
||||
bail!("Data blob has wrong CRC checksum.");
|
||||
@ -198,7 +198,10 @@ impl DataBlob {
|
||||
Ok(data)
|
||||
} else if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data_start = std::mem::size_of::<DataBlobHeader>();
|
||||
let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
|
||||
let mut reader = &self.raw_data[data_start..];
|
||||
let data = zstd::stream::decode_all(&mut reader)?;
|
||||
// zstd::block::decompress is abou 10% slower
|
||||
// let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
|
||||
if let Some(digest) = digest {
|
||||
Self::verify_digest(&data, None, digest)?;
|
||||
}
|
||||
@ -268,6 +271,12 @@ impl DataBlob {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns if chunk is encrypted
|
||||
pub fn is_encrypted(&self) -> bool {
|
||||
let magic = self.magic();
|
||||
magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0
|
||||
}
|
||||
|
||||
/// Verify digest and data length for unencrypted chunks.
|
||||
///
|
||||
/// To do that, we need to decompress data first. Please note that
|
||||
|
@ -6,7 +6,6 @@ use std::convert::TryFrom;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
@ -19,11 +18,12 @@ use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||
use super::index::*;
|
||||
use super::{DataBlob, ArchiveType, archive_type};
|
||||
use crate::config::datastore;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::task::TaskState;
|
||||
use crate::tools;
|
||||
use crate::tools::format::HumanByte;
|
||||
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||
use crate::api2::types::{GarbageCollectionStatus, Userid};
|
||||
use crate::server::UPID;
|
||||
|
||||
lazy_static! {
|
||||
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
|
||||
@ -71,6 +71,10 @@ impl DataStore {
|
||||
|
||||
let path = store_config["path"].as_str().unwrap();
|
||||
|
||||
Self::open_with_path(store_name, Path::new(path))
|
||||
}
|
||||
|
||||
pub fn open_with_path(store_name: &str, path: &Path) -> Result<Self, Error> {
|
||||
let chunk_store = ChunkStore::open(store_name, path)?;
|
||||
|
||||
let gc_status = GarbageCollectionStatus::default();
|
||||
@ -242,7 +246,7 @@ impl DataStore {
|
||||
/// Returns the time of the last successful backup
|
||||
///
|
||||
/// Or None if there is no backup in the group (or the group dir does not exist).
|
||||
pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<DateTime<Utc>>, Error> {
|
||||
pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<i64>, Error> {
|
||||
let base_path = self.base_path();
|
||||
let mut group_path = base_path.clone();
|
||||
group_path.push(backup_group.group_path());
|
||||
@ -408,25 +412,34 @@ impl DataStore {
|
||||
index: I,
|
||||
file_name: &Path, // only used for error reporting
|
||||
status: &mut GarbageCollectionStatus,
|
||||
worker: &WorkerTask,
|
||||
worker: &dyn TaskState,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
status.index_file_count += 1;
|
||||
status.index_data_bytes += index.index_bytes();
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
worker.fail_on_abort()?;
|
||||
worker.check_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
if let Err(err) = self.chunk_store.touch_chunk(digest) {
|
||||
worker.warn(&format!("warning: unable to access chunk {}, required by {:?} - {}",
|
||||
proxmox::tools::digest_to_hex(digest), file_name, err));
|
||||
crate::task_warn!(
|
||||
worker,
|
||||
"warning: unable to access chunk {}, required by {:?} - {}",
|
||||
proxmox::tools::digest_to_hex(digest),
|
||||
file_name,
|
||||
err,
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus, worker: &WorkerTask) -> Result<(), Error> {
|
||||
fn mark_used_chunks(
|
||||
&self,
|
||||
status: &mut GarbageCollectionStatus,
|
||||
worker: &dyn TaskState,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let image_list = self.list_images()?;
|
||||
|
||||
@ -438,7 +451,7 @@ impl DataStore {
|
||||
|
||||
for path in image_list {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
worker.check_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
|
||||
if let Ok(archive_type) = archive_type(&path) {
|
||||
@ -454,8 +467,13 @@ impl DataStore {
|
||||
|
||||
let percentage = done*100/image_count;
|
||||
if percentage > last_percentage {
|
||||
worker.log(format!("percentage done: phase1 {}% ({} of {} index files)",
|
||||
percentage, done, image_count));
|
||||
crate::task_log!(
|
||||
worker,
|
||||
"percentage done: phase1 {}% ({} of {} index files)",
|
||||
percentage,
|
||||
done,
|
||||
image_count,
|
||||
);
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
@ -471,46 +489,72 @@ impl DataStore {
|
||||
if let Ok(_) = self.gc_mutex.try_lock() { false } else { true }
|
||||
}
|
||||
|
||||
pub fn garbage_collection(&self, worker: &WorkerTask) -> Result<(), Error> {
|
||||
pub fn garbage_collection(&self, worker: &dyn TaskState, upid: &UPID) -> Result<(), Error> {
|
||||
|
||||
if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
|
||||
|
||||
// avoids that we run GC if an old daemon process has still a
|
||||
// running backup writer, which is not save as we have no "oldest
|
||||
// writer" information and thus no safe atime cutoff
|
||||
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
||||
|
||||
let phase1_start_time = unsafe { libc::time(std::ptr::null_mut()) };
|
||||
let phase1_start_time = proxmox::tools::time::epoch_i64();
|
||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
|
||||
|
||||
let mut gc_status = GarbageCollectionStatus::default();
|
||||
gc_status.upid = Some(worker.to_string());
|
||||
gc_status.upid = Some(upid.to_string());
|
||||
|
||||
worker.log("Start GC phase1 (mark used chunks)");
|
||||
crate::task_log!(worker, "Start GC phase1 (mark used chunks)");
|
||||
|
||||
self.mark_used_chunks(&mut gc_status, &worker)?;
|
||||
self.mark_used_chunks(&mut gc_status, worker)?;
|
||||
|
||||
worker.log("Start GC phase2 (sweep unused chunks)");
|
||||
self.chunk_store.sweep_unused_chunks(oldest_writer, phase1_start_time, &mut gc_status, &worker)?;
|
||||
crate::task_log!(worker, "Start GC phase2 (sweep unused chunks)");
|
||||
self.chunk_store.sweep_unused_chunks(
|
||||
oldest_writer,
|
||||
phase1_start_time,
|
||||
&mut gc_status,
|
||||
worker,
|
||||
)?;
|
||||
|
||||
worker.log(&format!("Removed garbage: {}", HumanByte::from(gc_status.removed_bytes)));
|
||||
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
|
||||
crate::task_log!(
|
||||
worker,
|
||||
"Removed garbage: {}",
|
||||
HumanByte::from(gc_status.removed_bytes),
|
||||
);
|
||||
crate::task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
|
||||
if gc_status.pending_bytes > 0 {
|
||||
worker.log(&format!("Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), gc_status.pending_chunks));
|
||||
crate::task_log!(
|
||||
worker,
|
||||
"Pending removals: {} (in {} chunks)",
|
||||
HumanByte::from(gc_status.pending_bytes),
|
||||
gc_status.pending_chunks,
|
||||
);
|
||||
}
|
||||
if gc_status.removed_bad > 0 {
|
||||
worker.log(&format!("Removed bad files: {}", gc_status.removed_bad));
|
||||
crate::task_log!(worker, "Removed bad files: {}", gc_status.removed_bad);
|
||||
}
|
||||
|
||||
worker.log(&format!("Original data usage: {}", HumanByte::from(gc_status.index_data_bytes)));
|
||||
crate::task_log!(
|
||||
worker,
|
||||
"Original data usage: {}",
|
||||
HumanByte::from(gc_status.index_data_bytes),
|
||||
);
|
||||
|
||||
if gc_status.index_data_bytes > 0 {
|
||||
let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
|
||||
worker.log(&format!("On-Disk usage: {} ({:.2}%)", HumanByte::from(gc_status.disk_bytes), comp_per));
|
||||
crate::task_log!(
|
||||
worker,
|
||||
"On-Disk usage: {} ({:.2}%)",
|
||||
HumanByte::from(gc_status.disk_bytes),
|
||||
comp_per,
|
||||
);
|
||||
}
|
||||
|
||||
worker.log(&format!("On-Disk chunks: {}", gc_status.disk_chunks));
|
||||
crate::task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
|
||||
|
||||
if gc_status.disk_chunks > 0 {
|
||||
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
||||
worker.log(&format!("Average chunk size: {}", HumanByte::from(avg_chunk)));
|
||||
crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
|
||||
}
|
||||
|
||||
*self.last_gc_status.lock().unwrap() = gc_status;
|
||||
|
@ -21,14 +21,14 @@ use super::read_chunk::ReadChunk;
|
||||
use super::Chunker;
|
||||
use super::IndexFile;
|
||||
use super::{DataBlob, DataChunkBuilder};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
use crate::tools;
|
||||
|
||||
/// Header format definition for dynamic index files (`.dixd`)
|
||||
#[repr(C)]
|
||||
pub struct DynamicIndexHeader {
|
||||
pub magic: [u8; 8],
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
/// Sha256 over the index ``SHA256(offset1||digest1||offset2||digest2||...)``
|
||||
pub index_csum: [u8; 32],
|
||||
reserved: [u8; 4032], // overall size is one page (4096 bytes)
|
||||
@ -77,7 +77,7 @@ pub struct DynamicIndexReader {
|
||||
pub size: usize,
|
||||
index: Mmap<DynamicEntry>,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
pub index_csum: [u8; 32],
|
||||
}
|
||||
|
||||
@ -90,12 +90,6 @@ impl DynamicIndexReader {
|
||||
}
|
||||
|
||||
pub fn new(mut file: std::fs::File) -> Result<Self, Error> {
|
||||
if let Err(err) =
|
||||
nix::fcntl::flock(file.as_raw_fd(), nix::fcntl::FlockArg::LockSharedNonblock)
|
||||
{
|
||||
bail!("unable to get shared lock - {}", err);
|
||||
}
|
||||
|
||||
// FIXME: This is NOT OUR job! Check the callers of this method and remove this!
|
||||
file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
@ -107,7 +101,7 @@ impl DynamicIndexReader {
|
||||
bail!("got unknown magic number");
|
||||
}
|
||||
|
||||
let ctime = u64::from_le(header.ctime);
|
||||
let ctime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let rawfd = file.as_raw_fd();
|
||||
|
||||
@ -480,7 +474,7 @@ pub struct DynamicIndexWriter {
|
||||
tmp_filename: PathBuf,
|
||||
csum: Option<openssl::sha::Sha256>,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
}
|
||||
|
||||
impl Drop for DynamicIndexWriter {
|
||||
@ -506,13 +500,13 @@ impl DynamicIndexWriter {
|
||||
|
||||
let mut writer = BufWriter::with_capacity(1024 * 1024, file);
|
||||
|
||||
let ctime = epoch_now_u64()?;
|
||||
let ctime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
let mut header = DynamicIndexHeader::zeroed();
|
||||
header.magic = super::DYNAMIC_SIZED_CHUNK_INDEX_1_0;
|
||||
header.ctime = u64::to_le(ctime);
|
||||
header.ctime = i64::to_le(ctime);
|
||||
header.uuid = *uuid.as_bytes();
|
||||
// header.index_csum = [0u8; 32];
|
||||
writer.write_all(header.as_bytes())?;
|
||||
|
@ -4,9 +4,8 @@ use std::io::{Seek, SeekFrom};
|
||||
use super::chunk_stat::*;
|
||||
use super::chunk_store::*;
|
||||
use super::{IndexFile, ChunkReadInfo};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
use crate::tools;
|
||||
|
||||
use chrono::{Local, TimeZone};
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
@ -23,7 +22,7 @@ use proxmox::tools::Uuid;
|
||||
pub struct FixedIndexHeader {
|
||||
pub magic: [u8; 8],
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
/// Sha256 over the index ``SHA256(digest1||digest2||...)``
|
||||
pub index_csum: [u8; 32],
|
||||
pub size: u64,
|
||||
@ -41,7 +40,7 @@ pub struct FixedIndexReader {
|
||||
index_length: usize,
|
||||
index: *mut u8,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
pub index_csum: [u8; 32],
|
||||
}
|
||||
|
||||
@ -66,12 +65,6 @@ impl FixedIndexReader {
|
||||
}
|
||||
|
||||
pub fn new(mut file: std::fs::File) -> Result<Self, Error> {
|
||||
if let Err(err) =
|
||||
nix::fcntl::flock(file.as_raw_fd(), nix::fcntl::FlockArg::LockSharedNonblock)
|
||||
{
|
||||
bail!("unable to get shared lock - {}", err);
|
||||
}
|
||||
|
||||
file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let header_size = std::mem::size_of::<FixedIndexHeader>();
|
||||
@ -82,7 +75,7 @@ impl FixedIndexReader {
|
||||
}
|
||||
|
||||
let size = u64::from_le(header.size);
|
||||
let ctime = u64::from_le(header.ctime);
|
||||
let ctime = i64::from_le(header.ctime);
|
||||
let chunk_size = u64::from_le(header.chunk_size);
|
||||
|
||||
let index_length = ((size + chunk_size - 1) / chunk_size) as usize;
|
||||
@ -148,10 +141,13 @@ impl FixedIndexReader {
|
||||
pub fn print_info(&self) {
|
||||
println!("Size: {}", self.size);
|
||||
println!("ChunkSize: {}", self.chunk_size);
|
||||
println!(
|
||||
"CTime: {}",
|
||||
Local.timestamp(self.ctime as i64, 0).format("%c")
|
||||
);
|
||||
|
||||
let mut ctime_str = self.ctime.to_string();
|
||||
if let Ok(s) = proxmox::tools::time::strftime_local("%c",self.ctime) {
|
||||
ctime_str = s;
|
||||
}
|
||||
|
||||
println!("CTime: {}", ctime_str);
|
||||
println!("UUID: {:?}", self.uuid);
|
||||
}
|
||||
}
|
||||
@ -228,7 +224,7 @@ pub struct FixedIndexWriter {
|
||||
index_length: usize,
|
||||
index: *mut u8,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
}
|
||||
|
||||
// `index` is mmap()ed which cannot be thread-local so should be sendable
|
||||
@ -271,7 +267,7 @@ impl FixedIndexWriter {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = epoch_now_u64()?;
|
||||
let ctime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
@ -279,7 +275,7 @@ impl FixedIndexWriter {
|
||||
let header = unsafe { &mut *(buffer.as_ptr() as *mut FixedIndexHeader) };
|
||||
|
||||
header.magic = super::FIXED_SIZED_CHUNK_INDEX_1_0;
|
||||
header.ctime = u64::to_le(ctime);
|
||||
header.ctime = i64::to_le(ctime);
|
||||
header.size = u64::to_le(size as u64);
|
||||
header.chunk_size = u64::to_le(chunk_size as u64);
|
||||
header.uuid = *uuid.as_bytes();
|
||||
|
@ -1,7 +1,6 @@
|
||||
use anyhow::{bail, format_err, Context, Error};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
@ -61,10 +60,10 @@ impl KeyDerivationConfig {
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
pub struct KeyConfig {
|
||||
pub kdf: Option<KeyDerivationConfig>,
|
||||
#[serde(with = "proxmox::tools::serde::date_time_as_rfc3339")]
|
||||
pub created: DateTime<Local>,
|
||||
#[serde(with = "proxmox::tools::serde::date_time_as_rfc3339")]
|
||||
pub modified: DateTime<Local>,
|
||||
#[serde(with = "proxmox::tools::serde::epoch_as_rfc3339")]
|
||||
pub created: i64,
|
||||
#[serde(with = "proxmox::tools::serde::epoch_as_rfc3339")]
|
||||
pub modified: i64,
|
||||
#[serde(with = "proxmox::tools::serde::bytes_as_base64")]
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
@ -136,7 +135,7 @@ pub fn encrypt_key_with_passphrase(
|
||||
enc_data.extend_from_slice(&tag);
|
||||
enc_data.extend_from_slice(&encrypted_key);
|
||||
|
||||
let created = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let created = proxmox::tools::time::epoch_i64();
|
||||
|
||||
Ok(KeyConfig {
|
||||
kdf: Some(kdf),
|
||||
@ -149,7 +148,7 @@ pub fn encrypt_key_with_passphrase(
|
||||
pub fn load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
) -> Result<([u8;32], i64), Error> {
|
||||
do_load_and_decrypt_key(path, passphrase)
|
||||
.with_context(|| format!("failed to load decryption key from {:?}", path))
|
||||
}
|
||||
@ -157,14 +156,14 @@ pub fn load_and_decrypt_key(
|
||||
fn do_load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
) -> Result<([u8;32], i64), Error> {
|
||||
decrypt_key(&file_get_contents(&path)?, passphrase)
|
||||
}
|
||||
|
||||
pub fn decrypt_key(
|
||||
mut keydata: &[u8],
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
) -> Result<([u8;32], i64), Error> {
|
||||
let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?;
|
||||
|
||||
let raw_data = key_config.data;
|
||||
|
@ -103,7 +103,7 @@ impl BackupManifest {
|
||||
Self {
|
||||
backup_type: snapshot.group().backup_type().into(),
|
||||
backup_id: snapshot.group().backup_id().into(),
|
||||
backup_time: snapshot.backup_time().timestamp(),
|
||||
backup_time: snapshot.backup_time(),
|
||||
files: Vec::new(),
|
||||
unprotected: json!({}),
|
||||
signature: None,
|
||||
|
@ -2,18 +2,16 @@ use anyhow::{Error};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use chrono::{DateTime, Timelike, Datelike, Local};
|
||||
|
||||
use super::{BackupDir, BackupInfo};
|
||||
use super::BackupInfo;
|
||||
|
||||
enum PruneMark { Keep, KeepPartial, Remove }
|
||||
|
||||
fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
||||
fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
||||
mark: &mut HashMap<PathBuf, PruneMark>,
|
||||
list: &Vec<BackupInfo>,
|
||||
keep: usize,
|
||||
select_id: F,
|
||||
) {
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut include_hash = HashSet::new();
|
||||
|
||||
@ -21,8 +19,7 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
||||
for info in list {
|
||||
let backup_id = info.backup_dir.relative_path();
|
||||
if let Some(PruneMark::Keep) = mark.get(&backup_id) {
|
||||
let local_time = info.backup_dir.backup_time().with_timezone(&Local);
|
||||
let sel_id: String = select_id(local_time, &info);
|
||||
let sel_id: String = select_id(&info)?;
|
||||
already_included.insert(sel_id);
|
||||
}
|
||||
}
|
||||
@ -30,8 +27,7 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
||||
for info in list {
|
||||
let backup_id = info.backup_dir.relative_path();
|
||||
if let Some(_) = mark.get(&backup_id) { continue; }
|
||||
let local_time = info.backup_dir.backup_time().with_timezone(&Local);
|
||||
let sel_id: String = select_id(local_time, &info);
|
||||
let sel_id: String = select_id(&info)?;
|
||||
|
||||
if already_included.contains(&sel_id) { continue; }
|
||||
|
||||
@ -43,6 +39,8 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
||||
mark.insert(backup_id, PruneMark::Remove);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_incomplete_snapshots(
|
||||
@ -182,44 +180,43 @@ pub fn compute_prune_info(
|
||||
remove_incomplete_snapshots(&mut mark, &list);
|
||||
|
||||
if let Some(keep_last) = options.keep_last {
|
||||
mark_selections(&mut mark, &list, keep_last as usize, |_local_time, info| {
|
||||
BackupDir::backup_time_to_string(info.backup_dir.backup_time())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_last as usize, |info| {
|
||||
Ok(info.backup_dir.backup_time_string().to_owned())
|
||||
})?;
|
||||
}
|
||||
|
||||
use proxmox::tools::time::strftime_local;
|
||||
|
||||
if let Some(keep_hourly) = options.keep_hourly {
|
||||
mark_selections(&mut mark, &list, keep_hourly as usize, |local_time, _info| {
|
||||
format!("{}/{}/{}/{}", local_time.year(), local_time.month(),
|
||||
local_time.day(), local_time.hour())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_hourly as usize, |info| {
|
||||
strftime_local("%Y/%m/%d/%H", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_daily) = options.keep_daily {
|
||||
mark_selections(&mut mark, &list, keep_daily as usize, |local_time, _info| {
|
||||
format!("{}/{}/{}", local_time.year(), local_time.month(), local_time.day())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_daily as usize, |info| {
|
||||
strftime_local("%Y/%m/%d", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_weekly) = options.keep_weekly {
|
||||
mark_selections(&mut mark, &list, keep_weekly as usize, |local_time, _info| {
|
||||
let iso_week = local_time.iso_week();
|
||||
let week = iso_week.week();
|
||||
// Note: This year number might not match the calendar year number.
|
||||
let iso_week_year = iso_week.year();
|
||||
format!("{}/{}", iso_week_year, week)
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_weekly as usize, |info| {
|
||||
// Note: Use iso-week year/week here. This year number
|
||||
// might not match the calendar year number.
|
||||
strftime_local("%G/%V", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_monthly) = options.keep_monthly {
|
||||
mark_selections(&mut mark, &list, keep_monthly as usize, |local_time, _info| {
|
||||
format!("{}/{}", local_time.year(), local_time.month())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_monthly as usize, |info| {
|
||||
strftime_local("%Y/%m", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_yearly) = options.keep_yearly {
|
||||
mark_selections(&mut mark, &list, keep_yearly as usize, |local_time, _info| {
|
||||
format!("{}/{}", local_time.year(), local_time.year())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_yearly as usize, |info| {
|
||||
strftime_local("%Y", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
let prune_info: Vec<(BackupInfo, bool)> = list.into_iter()
|
||||
|
@ -5,13 +5,24 @@ use std::time::Instant;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::api2::types::*;
|
||||
|
||||
use super::{
|
||||
DataStore, DataBlob, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
use crate::{
|
||||
api2::types::*,
|
||||
backup::{
|
||||
DataStore,
|
||||
DataBlob,
|
||||
BackupGroup,
|
||||
BackupDir,
|
||||
BackupInfo,
|
||||
IndexFile,
|
||||
CryptMode,
|
||||
FileInfo, ArchiveType, archive_type,
|
||||
FileInfo,
|
||||
ArchiveType,
|
||||
archive_type,
|
||||
},
|
||||
server::UPID,
|
||||
task::TaskState,
|
||||
task_log,
|
||||
tools::ParallelHandler,
|
||||
};
|
||||
|
||||
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
@ -42,7 +53,7 @@ fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInf
|
||||
fn rename_corrupted_chunk(
|
||||
datastore: Arc<DataStore>,
|
||||
digest: &[u8;32],
|
||||
worker: Arc<WorkerTask>,
|
||||
worker: &dyn TaskState,
|
||||
) {
|
||||
let (path, digest_str) = datastore.chunk_path(digest);
|
||||
|
||||
@ -55,132 +66,111 @@ fn rename_corrupted_chunk(
|
||||
|
||||
match std::fs::rename(&path, &new_path) {
|
||||
Ok(_) => {
|
||||
worker.log(format!("corrupted chunk renamed to {:?}", &new_path));
|
||||
task_log!(worker, "corrupted chunk renamed to {:?}", &new_path);
|
||||
},
|
||||
Err(err) => {
|
||||
match err.kind() {
|
||||
std::io::ErrorKind::NotFound => { /* ignored */ },
|
||||
_ => worker.log(format!("could not rename corrupted chunk {:?} - {}", &path, err))
|
||||
_ => task_log!(worker, "could not rename corrupted chunk {:?} - {}", &path, err)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// We use a separate thread to read/load chunks, so that we can do
|
||||
// load and verify in parallel to increase performance.
|
||||
fn chunk_reader_thread(
|
||||
datastore: Arc<DataStore>,
|
||||
index: Box<dyn IndexFile + Send>,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
errors: Arc<AtomicUsize>,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> std::sync::mpsc::Receiver<(DataBlob, [u8;32], u64)> {
|
||||
|
||||
let (sender, receiver) = std::sync::mpsc::sync_channel(3); // buffer up to 3 chunks
|
||||
|
||||
std::thread::spawn(move|| {
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
|
||||
if verified_chunks.lock().unwrap().contains(&info.digest) {
|
||||
continue; // already verified
|
||||
}
|
||||
|
||||
if corrupt_chunks.lock().unwrap().contains(&info.digest) {
|
||||
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
||||
worker.log(format!("chunk {} was marked as corrupt", digest_str));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
continue;
|
||||
}
|
||||
|
||||
match datastore.load_chunk(&info.digest) {
|
||||
Err(err) => {
|
||||
corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||
worker.log(format!("can't verify chunk, load failed - {}", err));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(datastore.clone(), &info.digest, worker.clone());
|
||||
continue;
|
||||
}
|
||||
Ok(chunk) => {
|
||||
if sender.send((chunk, info.digest, size)).is_err() {
|
||||
break; // receiver gone - simply stop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
receiver
|
||||
}
|
||||
|
||||
fn verify_index_chunks(
|
||||
datastore: Arc<DataStore>,
|
||||
index: Box<dyn IndexFile + Send>,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
crypt_mode: CryptMode,
|
||||
worker: Arc<WorkerTask>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let errors = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let start_time = Instant::now();
|
||||
|
||||
let chunk_channel = chunk_reader_thread(
|
||||
datastore.clone(),
|
||||
index,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
errors.clone(),
|
||||
worker.clone(),
|
||||
);
|
||||
|
||||
let mut read_bytes = 0;
|
||||
let mut decoded_bytes = 0;
|
||||
|
||||
loop {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
let (chunk, digest, size) = match chunk_channel.recv() {
|
||||
Ok(tuple) => tuple,
|
||||
Err(std::sync::mpsc::RecvError) => break,
|
||||
};
|
||||
|
||||
read_bytes += chunk.raw_size();
|
||||
decoded_bytes += size;
|
||||
let worker2 = Arc::clone(&worker);
|
||||
let datastore2 = Arc::clone(&datastore);
|
||||
let corrupt_chunks2 = Arc::clone(&corrupt_chunks);
|
||||
let verified_chunks2 = Arc::clone(&verified_chunks);
|
||||
let errors2 = Arc::clone(&errors);
|
||||
|
||||
let decoder_pool = ParallelHandler::new(
|
||||
"verify chunk decoder", 4,
|
||||
move |(chunk, digest, size): (DataBlob, [u8;32], u64)| {
|
||||
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||
Err(err) => {
|
||||
corrupt_chunks.lock().unwrap().insert(digest);
|
||||
worker.log(format!("can't verify chunk, unknown CryptMode - {}", err));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
continue;
|
||||
corrupt_chunks2.lock().unwrap().insert(digest);
|
||||
task_log!(worker2, "can't verify chunk, unknown CryptMode - {}", err);
|
||||
errors2.fetch_add(1, Ordering::SeqCst);
|
||||
return Ok(());
|
||||
},
|
||||
Ok(mode) => mode,
|
||||
};
|
||||
|
||||
if chunk_crypt_mode != crypt_mode {
|
||||
worker.log(format!(
|
||||
task_log!(
|
||||
worker2,
|
||||
"chunk CryptMode {:?} does not match index CryptMode {:?}",
|
||||
chunk_crypt_mode,
|
||||
crypt_mode
|
||||
));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
);
|
||||
errors2.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
||||
corrupt_chunks.lock().unwrap().insert(digest);
|
||||
worker.log(format!("{}", err));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(datastore.clone(), &digest, worker.clone());
|
||||
corrupt_chunks2.lock().unwrap().insert(digest);
|
||||
task_log!(worker2, "{}", err);
|
||||
errors2.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(datastore2.clone(), &digest, &worker2);
|
||||
} else {
|
||||
verified_chunks.lock().unwrap().insert(digest);
|
||||
verified_chunks2.lock().unwrap().insert(digest);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
);
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
|
||||
worker.check_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.size();
|
||||
|
||||
if verified_chunks.lock().unwrap().contains(&info.digest) {
|
||||
continue; // already verified
|
||||
}
|
||||
|
||||
if corrupt_chunks.lock().unwrap().contains(&info.digest) {
|
||||
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
||||
task_log!(worker, "chunk {} was marked as corrupt", digest_str);
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
continue;
|
||||
}
|
||||
|
||||
match datastore.load_chunk(&info.digest) {
|
||||
Err(err) => {
|
||||
corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||
task_log!(worker, "can't verify chunk, load failed - {}", err);
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(datastore.clone(), &info.digest, &worker);
|
||||
continue;
|
||||
}
|
||||
Ok(chunk) => {
|
||||
read_bytes += chunk.raw_size();
|
||||
decoder_pool.send((chunk, info.digest, size))?;
|
||||
decoded_bytes += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
decoder_pool.complete()?;
|
||||
|
||||
let elapsed = start_time.elapsed().as_secs_f64();
|
||||
|
||||
@ -192,8 +182,16 @@ fn verify_index_chunks(
|
||||
|
||||
let error_count = errors.load(Ordering::SeqCst);
|
||||
|
||||
worker.log(format!(" verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
|
||||
read_bytes_mib, decoded_bytes_mib, elapsed, read_speed, decode_speed, error_count));
|
||||
task_log!(
|
||||
worker,
|
||||
" verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
|
||||
read_bytes_mib,
|
||||
decoded_bytes_mib,
|
||||
elapsed,
|
||||
read_speed,
|
||||
decode_speed,
|
||||
error_count,
|
||||
);
|
||||
|
||||
if errors.load(Ordering::SeqCst) > 0 {
|
||||
bail!("chunks could not be verified");
|
||||
@ -208,7 +206,7 @@ fn verify_fixed_index(
|
||||
info: &FileInfo,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
@ -225,7 +223,14 @@ fn verify_fixed_index(
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
|
||||
verify_index_chunks(
|
||||
datastore,
|
||||
Box::new(index),
|
||||
verified_chunks,
|
||||
corrupt_chunks,
|
||||
info.chunk_crypt_mode(),
|
||||
worker,
|
||||
)
|
||||
}
|
||||
|
||||
fn verify_dynamic_index(
|
||||
@ -234,7 +239,7 @@ fn verify_dynamic_index(
|
||||
info: &FileInfo,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
@ -251,7 +256,14 @@ fn verify_dynamic_index(
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
|
||||
verify_index_chunks(
|
||||
datastore,
|
||||
Box::new(index),
|
||||
verified_chunks,
|
||||
corrupt_chunks,
|
||||
info.chunk_crypt_mode(),
|
||||
worker,
|
||||
)
|
||||
}
|
||||
|
||||
/// Verify a single backup snapshot
|
||||
@ -268,25 +280,32 @@ pub fn verify_backup_dir(
|
||||
backup_dir: &BackupDir,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: UPID,
|
||||
) -> Result<bool, Error> {
|
||||
|
||||
let mut manifest = match datastore.load_manifest(&backup_dir) {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||
task_log!(
|
||||
worker,
|
||||
"verify {}:{} - manifest load error: {}",
|
||||
datastore.name(),
|
||||
backup_dir,
|
||||
err,
|
||||
);
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify {}:{}", datastore.name(), backup_dir));
|
||||
task_log!(worker, "verify {}:{}", datastore.name(), backup_dir);
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
let mut verify_result = "ok";
|
||||
let mut verify_result = VerifyState::Ok;
|
||||
for info in manifest.files() {
|
||||
let result = proxmox::try_block!({
|
||||
worker.log(format!(" check {}", info.filename));
|
||||
task_log!(worker, " check {}", info.filename);
|
||||
match archive_type(&info.filename)? {
|
||||
ArchiveType::FixedIndex =>
|
||||
verify_fixed_index(
|
||||
@ -310,26 +329,32 @@ pub fn verify_backup_dir(
|
||||
}
|
||||
});
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
worker.check_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||
task_log!(
|
||||
worker,
|
||||
"verify {}:{}/{} failed: {}",
|
||||
datastore.name(),
|
||||
backup_dir,
|
||||
info.filename,
|
||||
err,
|
||||
);
|
||||
error_count += 1;
|
||||
verify_result = "failed";
|
||||
verify_result = VerifyState::Failed;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
let verify_state = SnapshotVerifyState {
|
||||
state: verify_result.to_string(),
|
||||
upid: worker.upid().clone(),
|
||||
state: verify_result,
|
||||
upid,
|
||||
};
|
||||
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
||||
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
|
||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
@ -346,19 +371,26 @@ pub fn verify_backup_group(
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
||||
worker: Arc<WorkerTask>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: &UPID,
|
||||
) -> Result<(usize, Vec<String>), Error> {
|
||||
|
||||
let mut errors = Vec::new();
|
||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||
task_log!(
|
||||
worker,
|
||||
"verify group {}:{} - unable to list backups: {}",
|
||||
datastore.name(),
|
||||
group,
|
||||
err,
|
||||
);
|
||||
return Ok((0, errors));
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||
task_log!(worker, "verify group {}:{}", datastore.name(), group);
|
||||
|
||||
let (done, snapshot_count) = progress.unwrap_or((0, list.len()));
|
||||
|
||||
@ -366,13 +398,26 @@ pub fn verify_backup_group(
|
||||
BackupInfo::sort_list(&mut list, false); // newest first
|
||||
for info in list {
|
||||
count += 1;
|
||||
if !verify_backup_dir(datastore.clone(), &info.backup_dir, verified_chunks.clone(), corrupt_chunks.clone(), worker.clone())?{
|
||||
if !verify_backup_dir(
|
||||
datastore.clone(),
|
||||
&info.backup_dir,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
worker.clone(),
|
||||
upid.clone(),
|
||||
)? {
|
||||
errors.push(info.backup_dir.to_string());
|
||||
}
|
||||
if snapshot_count != 0 {
|
||||
let pos = done + count;
|
||||
let percentage = ((pos as f64) * 100.0)/(snapshot_count as f64);
|
||||
worker.log(format!("percentage done: {:.2}% ({} of {} snapshots)", percentage, pos, snapshot_count));
|
||||
task_log!(
|
||||
worker,
|
||||
"percentage done: {:.2}% ({} of {} snapshots)",
|
||||
percentage,
|
||||
pos,
|
||||
snapshot_count,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -386,8 +431,11 @@ pub fn verify_backup_group(
|
||||
/// Returns
|
||||
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) -> Result<Vec<String>, Error> {
|
||||
|
||||
pub fn verify_all_backups(
|
||||
datastore: Arc<DataStore>,
|
||||
worker: Arc<dyn TaskState + Send + Sync>,
|
||||
upid: &UPID,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||
@ -396,7 +444,12 @@ pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) ->
|
||||
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
|
||||
.collect::<Vec<BackupGroup>>(),
|
||||
Err(err) => {
|
||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||
task_log!(
|
||||
worker,
|
||||
"verify datastore {} - unable to list backups: {}",
|
||||
datastore.name(),
|
||||
err,
|
||||
);
|
||||
return Ok(errors);
|
||||
}
|
||||
};
|
||||
@ -414,7 +467,7 @@ pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) ->
|
||||
// start with 64 chunks since we assume there are few corrupt ones
|
||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||
|
||||
worker.log(format!("verify datastore {} ({} snapshots)", datastore.name(), snapshot_count));
|
||||
task_log!(worker, "verify datastore {} ({} snapshots)", datastore.name(), snapshot_count);
|
||||
|
||||
let mut done = 0;
|
||||
for group in list {
|
||||
@ -425,6 +478,7 @@ pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) ->
|
||||
corrupt_chunks.clone(),
|
||||
Some((done, snapshot_count)),
|
||||
worker.clone(),
|
||||
upid,
|
||||
)?;
|
||||
errors.append(&mut group_errors);
|
||||
|
||||
|
@ -8,7 +8,6 @@ use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{Local, DateTime, Utc, TimeZone};
|
||||
use futures::future::FutureExt;
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
use serde_json::{json, Value};
|
||||
@ -16,11 +15,20 @@ use tokio::sync::mpsc;
|
||||
use xdg::BaseDirectories;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::cli::*;
|
||||
use proxmox::api::api;
|
||||
use proxmox::{
|
||||
tools::{
|
||||
time::{strftime_local, epoch_i64},
|
||||
fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size},
|
||||
},
|
||||
api::{
|
||||
api,
|
||||
ApiHandler,
|
||||
ApiMethod,
|
||||
RpcEnvironment,
|
||||
schema::*,
|
||||
cli::*,
|
||||
},
|
||||
};
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use proxmox_backup::tools;
|
||||
@ -28,6 +36,7 @@ use proxmox_backup::api2::types::*;
|
||||
use proxmox_backup::api2::version;
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::pxar::catalog::*;
|
||||
use proxmox_backup::config::user::complete_user_name;
|
||||
use proxmox_backup::backup::{
|
||||
archive_type,
|
||||
decrypt_key,
|
||||
@ -184,7 +193,7 @@ pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<
|
||||
result
|
||||
}
|
||||
|
||||
fn connect(server: &str, userid: &Userid) -> Result<HttpClient, Error> {
|
||||
fn connect(server: &str, port: u16, userid: &Userid) -> Result<HttpClient, Error> {
|
||||
|
||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
||||
|
||||
@ -203,7 +212,7 @@ fn connect(server: &str, userid: &Userid) -> Result<HttpClient, Error> {
|
||||
.fingerprint_cache(true)
|
||||
.ticket_cache(true);
|
||||
|
||||
HttpClient::new(server, userid, options)
|
||||
HttpClient::new(server, port, userid, options)
|
||||
}
|
||||
|
||||
async fn view_task_result(
|
||||
@ -246,7 +255,7 @@ pub async fn api_datastore_latest_snapshot(
|
||||
client: &HttpClient,
|
||||
store: &str,
|
||||
group: BackupGroup,
|
||||
) -> Result<(String, String, DateTime<Utc>), Error> {
|
||||
) -> Result<(String, String, i64), Error> {
|
||||
|
||||
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
|
||||
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
|
||||
@ -257,7 +266,7 @@ pub async fn api_datastore_latest_snapshot(
|
||||
|
||||
list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
|
||||
|
||||
let backup_time = Utc.timestamp(list[0].backup_time, 0);
|
||||
let backup_time = list[0].backup_time;
|
||||
|
||||
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
|
||||
}
|
||||
@ -357,7 +366,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
|
||||
|
||||
@ -373,7 +382,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
@ -404,6 +413,45 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
group: {
|
||||
type: String,
|
||||
description: "Backup group.",
|
||||
},
|
||||
"new-owner": {
|
||||
type: Userid,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Change owner of a backup group
|
||||
async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
param.as_object_mut().unwrap().remove("repository");
|
||||
|
||||
let group: BackupGroup = group.parse()?;
|
||||
|
||||
param["backup-type"] = group.backup_type().into();
|
||||
param["backup-id"] = group.backup_id().into();
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
|
||||
client.post(&path, Some(param)).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -430,7 +478,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
|
||||
Some(path.parse()?)
|
||||
@ -444,7 +492,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
@ -495,14 +543,14 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
|
||||
|
||||
let result = client.delete(&path, Some(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
}))).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
@ -525,7 +573,7 @@ async fn api_login(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
client.login().await?;
|
||||
|
||||
record_repository(&repo);
|
||||
@ -582,7 +630,7 @@ async fn api_version(param: Value) -> Result<(), Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m);
|
||||
if let Ok(repo) = repo {
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
match client.get("api2/json/version", None).await {
|
||||
Ok(mut result) => version_info["server"] = result["data"].take(),
|
||||
@ -632,14 +680,14 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
|
||||
|
||||
let mut result = client.get(&path, Some(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
}))).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
@ -676,7 +724,7 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
|
||||
|
||||
@ -986,18 +1034,18 @@ async fn create_backup(
|
||||
}
|
||||
}
|
||||
|
||||
let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
|
||||
let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
record_repository(&repo);
|
||||
|
||||
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
|
||||
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
|
||||
|
||||
println!("Client name: {}", proxmox::tools::nodename());
|
||||
|
||||
let start_time = Local::now();
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
||||
println!("Starting backup protocol: {}", strftime_local("%c", epoch_i64())?);
|
||||
|
||||
let (crypt_config, rsa_encrypted_key) = match keydata {
|
||||
None => (None, None),
|
||||
@ -1035,7 +1083,7 @@ async fn create_backup(
|
||||
None
|
||||
};
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
|
||||
let mut catalog = None;
|
||||
@ -1150,11 +1198,11 @@ async fn create_backup(
|
||||
|
||||
client.finish().await?;
|
||||
|
||||
let end_time = Local::now();
|
||||
let elapsed = end_time.signed_duration_since(start_time);
|
||||
println!("Duration: {}", elapsed);
|
||||
let end_time = std::time::Instant::now();
|
||||
let elapsed = end_time.duration_since(start_time);
|
||||
println!("Duration: {:.2}s", elapsed.as_secs_f64());
|
||||
|
||||
println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
||||
println!("End Time: {}", strftime_local("%c", epoch_i64())?);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -1291,7 +1339,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
@ -1464,7 +1512,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
let snapshot = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = snapshot.parse()?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
|
||||
|
||||
@ -1492,7 +1540,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
let args = json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
});
|
||||
|
||||
let body = hyper::Body::from(raw_data);
|
||||
@ -1535,7 +1583,7 @@ fn prune<'a>(
|
||||
async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
|
||||
|
||||
@ -1560,7 +1608,7 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
@ -1618,7 +1666,7 @@ async fn status(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
|
||||
|
||||
@ -1663,7 +1711,7 @@ async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
||||
.fingerprint_cache(true)
|
||||
.ticket_cache(true);
|
||||
|
||||
let client = match HttpClient::new(repo.host(), repo.user(), options) {
|
||||
let client = match HttpClient::new(repo.host(), repo.port(), repo.user(), options) {
|
||||
Ok(v) => v,
|
||||
_ => return Value::Null,
|
||||
};
|
||||
@ -1752,11 +1800,12 @@ async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<Str
|
||||
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
|
||||
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
|
||||
{
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
|
||||
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
@ -1787,7 +1836,7 @@ async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<St
|
||||
let query = tools::json_object_to_query(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
})).unwrap();
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
|
||||
@ -1808,17 +1857,29 @@ async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<St
|
||||
fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
complete_server_file_name(arg, param)
|
||||
.iter()
|
||||
.map(|v| tools::format::strip_server_file_expenstion(&v))
|
||||
.map(|v| tools::format::strip_server_file_extension(&v))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
complete_server_file_name(arg, param)
|
||||
.iter()
|
||||
.filter_map(|v| {
|
||||
let name = tools::format::strip_server_file_expenstion(&v);
|
||||
if name.ends_with(".pxar") {
|
||||
Some(name)
|
||||
.filter_map(|name| {
|
||||
if name.ends_with(".pxar.didx") {
|
||||
Some(tools::format::strip_server_file_extension(name))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
complete_server_file_name(arg, param)
|
||||
.iter()
|
||||
.filter_map(|name| {
|
||||
if name.ends_with(".img.fidx") {
|
||||
Some(tools::format::strip_server_file_extension(name))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -1946,6 +2007,12 @@ fn main() {
|
||||
let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
|
||||
.completion_cb("repository", complete_repository);
|
||||
|
||||
let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
|
||||
.arg_param(&["group", "new-owner"])
|
||||
.completion_cb("group", complete_backup_group)
|
||||
.completion_cb("new-owner", complete_user_name)
|
||||
.completion_cb("repository", complete_repository);
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("backup", backup_cmd_def)
|
||||
.insert("upload-log", upload_log_cmd_def)
|
||||
@ -1961,10 +2028,13 @@ fn main() {
|
||||
.insert("status", status_cmd_def)
|
||||
.insert("key", key::cli())
|
||||
.insert("mount", mount_cmd_def())
|
||||
.insert("map", map_cmd_def())
|
||||
.insert("unmap", unmap_cmd_def())
|
||||
.insert("catalog", catalog_mgmt_cli())
|
||||
.insert("task", task_mgmt_cli())
|
||||
.insert("version", version_cmd_def)
|
||||
.insert("benchmark", benchmark_cmd_def);
|
||||
.insert("benchmark", benchmark_cmd_def)
|
||||
.insert("change-owner", change_owner_cmd_def);
|
||||
|
||||
let rpcenv = CliEnvironment::new();
|
||||
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
||||
|
@ -62,10 +62,10 @@ fn connect() -> Result<HttpClient, Error> {
|
||||
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
||||
.sign(private_auth_key(), None)?;
|
||||
options = options.password(Some(ticket));
|
||||
HttpClient::new("localhost", Userid::root_userid(), options)?
|
||||
HttpClient::new("localhost", 8007, Userid::root_userid(), options)?
|
||||
} else {
|
||||
options = options.ticket_cache(true).interactive(true);
|
||||
HttpClient::new("localhost", Userid::root_userid(), options)?
|
||||
HttpClient::new("localhost", 8007, Userid::root_userid(), options)?
|
||||
};
|
||||
|
||||
Ok(client)
|
||||
@ -410,6 +410,7 @@ pub fn complete_remote_datastore_name(_arg: &str, param: &HashMap<String, String
|
||||
|
||||
let client = HttpClient::new(
|
||||
&remote.host,
|
||||
remote.port.unwrap_or(8007),
|
||||
&remote.userid,
|
||||
options,
|
||||
)?;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
@ -13,7 +13,7 @@ use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::buildcfg;
|
||||
use proxmox_backup::server;
|
||||
use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64};
|
||||
use proxmox_backup::tools::daemon;
|
||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||
@ -69,7 +69,7 @@ async fn run() -> Result<(), Error> {
|
||||
let key_path = configdir!("/proxy.key");
|
||||
let cert_path = configdir!("/proxy.pem");
|
||||
|
||||
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
||||
let mut acceptor = SslAcceptor::mozilla_intermediate_v5(SslMethod::tls()).unwrap();
|
||||
acceptor.set_private_key_file(key_path, SslFiletype::PEM)
|
||||
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
|
||||
acceptor.set_certificate_chain_file(cert_path)
|
||||
@ -144,10 +144,11 @@ fn start_task_scheduler() {
|
||||
tokio::spawn(task.map(|_| ()));
|
||||
}
|
||||
|
||||
use std::time:: {Instant, Duration};
|
||||
use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH};
|
||||
|
||||
fn next_minute() -> Result<Instant, Error> {
|
||||
let epoch_now = epoch_now()?;
|
||||
let now = SystemTime::now();
|
||||
let epoch_now = now.duration_since(UNIX_EPOCH)?;
|
||||
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
|
||||
Ok(Instant::now() + epoch_next - epoch_now)
|
||||
}
|
||||
@ -195,45 +196,21 @@ async fn schedule_tasks() -> Result<(), Error> {
|
||||
|
||||
schedule_datastore_garbage_collection().await;
|
||||
schedule_datastore_prune().await;
|
||||
schedule_datastore_verification().await;
|
||||
schedule_datastore_sync_jobs().await;
|
||||
schedule_task_log_rotate().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> {
|
||||
|
||||
let list = proxmox_backup::server::read_task_list()?;
|
||||
|
||||
let mut last: Option<&server::UPID> = None;
|
||||
|
||||
for entry in list.iter() {
|
||||
if entry.upid.worker_type == worker_type {
|
||||
if let Some(ref id) = entry.upid.worker_id {
|
||||
if id == worker_id {
|
||||
match last {
|
||||
Some(ref upid) => {
|
||||
if upid.starttime < entry.upid.starttime {
|
||||
last = Some(&entry.upid)
|
||||
}
|
||||
}
|
||||
None => {
|
||||
last = Some(&entry.upid)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(last.cloned())
|
||||
}
|
||||
|
||||
|
||||
async fn schedule_datastore_garbage_collection() {
|
||||
|
||||
use proxmox_backup::backup::DataStore;
|
||||
use proxmox_backup::server::{UPID, WorkerTask};
|
||||
use proxmox_backup::config::datastore::{self, DataStoreConfig};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
@ -289,11 +266,10 @@ async fn schedule_datastore_garbage_collection() {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match lookup_last_worker(worker_type, &store) {
|
||||
Ok(Some(upid)) => upid.starttime,
|
||||
Ok(None) => 0,
|
||||
match jobstate::last_run_time(worker_type, &store) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -308,15 +284,15 @@ async fn schedule_datastore_garbage_collection() {
|
||||
}
|
||||
};
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let mut job = match Job::new(worker_type, &store) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let store2 = store.clone();
|
||||
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
@ -325,9 +301,20 @@ async fn schedule_datastore_garbage_collection() {
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
datastore.garbage_collection(&worker)
|
||||
|
||||
let result = datastore.garbage_collection(&*worker, worker.upid());
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
) {
|
||||
eprintln!("unable to start garbage collection on store {} - {}", store2, err);
|
||||
@ -338,9 +325,12 @@ async fn schedule_datastore_garbage_collection() {
|
||||
async fn schedule_datastore_prune() {
|
||||
|
||||
use proxmox_backup::backup::{
|
||||
PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info};
|
||||
PruneOptions, DataStore, BackupGroup, compute_prune_info};
|
||||
use proxmox_backup::server::{WorkerTask};
|
||||
use proxmox_backup::config::datastore::{self, DataStoreConfig};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
@ -397,16 +387,10 @@ async fn schedule_datastore_prune() {
|
||||
|
||||
let worker_type = "prune";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &store) {
|
||||
Ok(Some(upid)) => {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
continue;
|
||||
}
|
||||
upid.starttime
|
||||
}
|
||||
Ok(None) => 0,
|
||||
let last = match jobstate::last_run_time(worker_type, &store) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@ -420,15 +404,15 @@ async fn schedule_datastore_prune() {
|
||||
}
|
||||
};
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let mut job = match Job::new(worker_type, &store) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let store2 = store.clone();
|
||||
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
@ -437,6 +421,11 @@ async fn schedule_datastore_prune() {
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
let result = try_block!({
|
||||
|
||||
worker.log(format!("Starting datastore prune on store \"{}\"", store));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
||||
@ -457,15 +446,22 @@ async fn schedule_datastore_prune() {
|
||||
"{} {}/{}/{}",
|
||||
if keep { "keep" } else { "remove" },
|
||||
group.backup_type(), group.backup_id(),
|
||||
BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
|
||||
|
||||
info.backup_dir.backup_time_string()));
|
||||
if !keep {
|
||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
) {
|
||||
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
|
||||
@ -473,6 +469,121 @@ async fn schedule_datastore_prune() {
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_datastore_verification() {
|
||||
use proxmox_backup::backup::{DataStore, verify_all_backups};
|
||||
use proxmox_backup::server::{WorkerTask};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
let config = match datastore::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read datastore config - {}", err);
|
||||
return;
|
||||
}
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
for (store, (_, store_config)) in config.sections {
|
||||
let datastore = match DataStore::lookup_datastore(&store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_datastore failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
|
||||
Ok(c) => c,
|
||||
Err(err) => {
|
||||
eprintln!("datastore config from_value failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let event_str = match store_config.verify_schedule {
|
||||
Some(event_str) => event_str,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let event = match parse_calendar_event(&event_str) {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let worker_type = "verify";
|
||||
|
||||
let last = match jobstate::last_run_time(worker_type, &store) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let mut job = match Job::new(worker_type, &store) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let worker_id = store.clone();
|
||||
let store2 = store.clone();
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(worker_id),
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
worker.log(format!("starting verification on store {}", store2));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
let result = try_block!({
|
||||
let failed_dirs =
|
||||
verify_all_backups(datastore, worker.clone(), worker.upid())?;
|
||||
if failed_dirs.len() > 0 {
|
||||
worker.log("Failed to verify following snapshots:");
|
||||
for dir in failed_dirs {
|
||||
worker.log(format!("\t{}", dir));
|
||||
}
|
||||
Err(format_err!("verification failed - please check the log for details"))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
});
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
result
|
||||
},
|
||||
) {
|
||||
eprintln!("unable to start verification on store {} - {}", store, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_datastore_sync_jobs() {
|
||||
|
||||
use proxmox_backup::{
|
||||
@ -529,13 +640,8 @@ async fn schedule_datastore_sync_jobs() {
|
||||
}
|
||||
};
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let job = match Job::new(worker_type, &job_id) {
|
||||
@ -551,6 +657,101 @@ async fn schedule_datastore_sync_jobs() {
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_task_log_rotate() {
|
||||
use proxmox_backup::{
|
||||
config::jobstate::{self, Job},
|
||||
server::rotate_task_log_archive,
|
||||
};
|
||||
use proxmox_backup::server::WorkerTask;
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
let worker_type = "logrotate";
|
||||
let job_id = "task-archive";
|
||||
|
||||
let last = match jobstate::last_run_time(worker_type, job_id) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("could not get last run time of task log archive rotation: {}", err);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// schedule daily at 00:00 like normal logrotate
|
||||
let schedule = "00:00";
|
||||
|
||||
let event = match parse_calendar_event(schedule) {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
// should not happen?
|
||||
eprintln!("unable to parse schedule '{}' - {}", schedule, err);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => return,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", schedule, err);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now {
|
||||
// if we never ran the rotation, schedule instantly
|
||||
match jobstate::JobState::load(worker_type, job_id) {
|
||||
Ok(state) => match state {
|
||||
jobstate::JobState::Created { .. } => {},
|
||||
_ => return,
|
||||
},
|
||||
_ => return,
|
||||
}
|
||||
}
|
||||
|
||||
let mut job = match Job::new(worker_type, job_id) {
|
||||
Ok(job) => job,
|
||||
Err(_) => return, // could not get lock
|
||||
};
|
||||
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(job_id.to_string()),
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
worker.log(format!("starting task log rotation"));
|
||||
// one entry has normally about ~100-150 bytes
|
||||
let max_size = 500000; // at least 5000 entries
|
||||
let max_files = 20; // at least 100000 entries
|
||||
let result = try_block!({
|
||||
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
|
||||
if has_rotated {
|
||||
worker.log(format!("task log archive was rotated"));
|
||||
} else {
|
||||
worker.log(format!("task log archive was not rotated"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
result
|
||||
},
|
||||
) {
|
||||
eprintln!("unable to start task log rotation: {}", err);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
async fn run_stat_generator() {
|
||||
|
||||
let mut count = 0;
|
||||
|
@ -3,7 +3,6 @@ use std::sync::Arc;
|
||||
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use serde::Serialize;
|
||||
|
||||
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||
@ -22,6 +21,7 @@ use proxmox_backup::backup::{
|
||||
load_and_decrypt_key,
|
||||
CryptConfig,
|
||||
KeyDerivationConfig,
|
||||
DataChunkBuilder,
|
||||
};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
@ -61,6 +61,9 @@ struct Speed {
|
||||
"aes256_gcm": {
|
||||
type: Speed,
|
||||
},
|
||||
"verify": {
|
||||
type: Speed,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Copy, Clone, Serialize)]
|
||||
@ -76,29 +79,34 @@ struct BenchmarkResult {
|
||||
decompress: Speed,
|
||||
/// AES256 GCM encryption speed
|
||||
aes256_gcm: Speed,
|
||||
/// Verify speed
|
||||
verify: Speed,
|
||||
}
|
||||
|
||||
|
||||
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
||||
tls: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 590.0, // TLS to localhost, AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 1235.0, // TLS to localhost, AMD Ryzen 7 2700X
|
||||
},
|
||||
sha256: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 2120.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 2022.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
compress: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 2158.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 752.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
decompress: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 8062.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 1198.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
aes256_gcm: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 3803.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 3645.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
verify: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 758.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
};
|
||||
|
||||
@ -195,6 +203,9 @@ fn render_result(
|
||||
.column(ColumnConfig::new("decompress")
|
||||
.header("ZStd level 1 decompression speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("verify")
|
||||
.header("Chunk verification speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("aes256_gcm")
|
||||
.header("AES256 GCM encryption speed")
|
||||
.right_align(false).renderer(render_speed));
|
||||
@ -212,9 +223,9 @@ async fn test_upload_speed(
|
||||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
||||
let backup_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
record_repository(&repo);
|
||||
|
||||
if verbose { eprintln!("Connecting to backup server"); }
|
||||
@ -258,7 +269,17 @@ fn test_crypt_speed(
|
||||
|
||||
let crypt_config = CryptConfig::new(testkey)?;
|
||||
|
||||
let random_data = proxmox::sys::linux::random_data(1024*1024)?;
|
||||
//let random_data = proxmox::sys::linux::random_data(1024*1024)?;
|
||||
let mut random_data = vec![];
|
||||
// generate pseudo random byte sequence
|
||||
for i in 0..256*1024 {
|
||||
for j in 0..4 {
|
||||
let byte = ((i >> (j<<3))&0xff) as u8;
|
||||
random_data.push(byte);
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(random_data.len(), 1024*1024);
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
@ -323,5 +344,23 @@ fn test_crypt_speed(
|
||||
|
||||
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let (chunk, digest) = DataChunkBuilder::new(&random_data)
|
||||
.compress(true)
|
||||
.build()?;
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
chunk.verify_unencrypted(random_data.len(), &digest)?;
|
||||
bytes += random_data.len();
|
||||
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.verify.speed = Some(speed);
|
||||
|
||||
eprintln!("Verify speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
}
|
||||
};
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
@ -153,7 +153,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
/// Shell to interactively inspect and restore snapshots.
|
||||
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
|
||||
|
@ -1,7 +1,8 @@
|
||||
use std::path::PathBuf;
|
||||
use std::io::Write;
|
||||
use std::process::{Stdio, Command};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{Local, TimeZone};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
@ -14,6 +15,17 @@ use proxmox_backup::backup::{
|
||||
};
|
||||
use proxmox_backup::tools;
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Paperkey output format
|
||||
pub enum PaperkeyFormat {
|
||||
/// Format as Utf8 text. Includes QR codes as ascii-art.
|
||||
Text,
|
||||
/// Format as Html. Includes QR codes as png images.
|
||||
Html,
|
||||
}
|
||||
|
||||
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
||||
pub const MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
||||
|
||||
@ -112,7 +124,7 @@ fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
let created = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let created = proxmox::tools::time::epoch_i64();
|
||||
|
||||
store_key_config(
|
||||
&path,
|
||||
@ -180,7 +192,7 @@ fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error
|
||||
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let modified = proxmox::tools::time::epoch_i64();
|
||||
|
||||
store_key_config(
|
||||
&path,
|
||||
@ -262,6 +274,55 @@ fn create_master_key() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
path: {
|
||||
description: "Key file. Without this the default key's will be used.",
|
||||
optional: true,
|
||||
},
|
||||
subject: {
|
||||
description: "Include the specified subject as titel text.",
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
type: PaperkeyFormat,
|
||||
description: "Output format. Text or Html.",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Generate a printable, human readable text file containing the encryption key.
|
||||
///
|
||||
/// This also includes a scanable QR code for fast key restore.
|
||||
fn paper_key(
|
||||
path: Option<String>,
|
||||
subject: Option<String>,
|
||||
output_format: Option<PaperkeyFormat>,
|
||||
) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => {
|
||||
let path = find_default_encryption_key()?
|
||||
.ok_or_else(|| {
|
||||
format_err!("no encryption file provided and no default file found")
|
||||
})?;
|
||||
path
|
||||
}
|
||||
};
|
||||
|
||||
let data = file_get_contents(&path)?;
|
||||
let data = std::str::from_utf8(&data)?;
|
||||
|
||||
let format = output_format.unwrap_or(PaperkeyFormat::Html);
|
||||
|
||||
match format {
|
||||
PaperkeyFormat::Html => paperkey_html(data, subject),
|
||||
PaperkeyFormat::Text => paperkey_text(data, subject),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cli() -> CliCommandMap {
|
||||
let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
|
||||
.arg_param(&["path"])
|
||||
@ -276,9 +337,214 @@ pub fn cli() -> CliCommandMap {
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", tools::complete_file_name);
|
||||
|
||||
let paper_key_cmd_def = CliCommand::new(&API_METHOD_PAPER_KEY)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", tools::complete_file_name);
|
||||
|
||||
CliCommandMap::new()
|
||||
.insert("create", key_create_cmd_def)
|
||||
.insert("create-master-key", key_create_master_key_cmd_def)
|
||||
.insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
|
||||
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
||||
.insert("paperkey", paper_key_cmd_def)
|
||||
}
|
||||
|
||||
fn paperkey_html(data: &str, subject: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let img_size_pt = 500;
|
||||
|
||||
println!("<!DOCTYPE html>");
|
||||
println!("<html lang=\"en\">");
|
||||
println!("<head>");
|
||||
println!("<meta charset=\"utf-8\">");
|
||||
println!("<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">");
|
||||
println!("<title>Proxmox Backup Paperkey</title>");
|
||||
println!("<style type=\"text/css\">");
|
||||
|
||||
println!(" p {{");
|
||||
println!(" font-size: 12pt;");
|
||||
println!(" font-family: monospace;");
|
||||
println!(" white-space: pre-wrap;");
|
||||
println!(" line-break: anywhere;");
|
||||
println!(" }}");
|
||||
|
||||
println!("</style>");
|
||||
|
||||
println!("</head>");
|
||||
|
||||
println!("<body>");
|
||||
|
||||
if let Some(subject) = subject {
|
||||
println!("<p>Subject: {}</p>", subject);
|
||||
}
|
||||
|
||||
if data.starts_with("-----BEGIN ENCRYPTED PRIVATE KEY-----\n") {
|
||||
let lines: Vec<String> = data.lines()
|
||||
.map(|s| s.trim_end())
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(String::from)
|
||||
.collect();
|
||||
|
||||
if !lines[lines.len()-1].starts_with("-----END ENCRYPTED PRIVATE KEY-----") {
|
||||
bail!("unexpected key format");
|
||||
}
|
||||
|
||||
if lines.len() < 20 {
|
||||
bail!("unexpected key format");
|
||||
}
|
||||
|
||||
const BLOCK_SIZE: usize = 20;
|
||||
let blocks = (lines.len() + BLOCK_SIZE -1)/BLOCK_SIZE;
|
||||
|
||||
for i in 0..blocks {
|
||||
let start = i*BLOCK_SIZE;
|
||||
let mut end = start + BLOCK_SIZE;
|
||||
if end > lines.len() {
|
||||
end = lines.len();
|
||||
}
|
||||
let data = &lines[start..end];
|
||||
|
||||
println!("<div style=\"page-break-inside: avoid;page-break-after: always\">");
|
||||
println!("<p>");
|
||||
|
||||
for l in start..end {
|
||||
println!("{:02}: {}", l, lines[l]);
|
||||
}
|
||||
|
||||
println!("</p>");
|
||||
|
||||
let data = data.join("\n");
|
||||
let qr_code = generate_qr_code("png", data.as_bytes())?;
|
||||
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
||||
|
||||
println!("<center>");
|
||||
println!("<img");
|
||||
println!("width=\"{}pt\" height=\"{}pt\"", img_size_pt, img_size_pt);
|
||||
println!("src=\"data:image/png;base64,{}\"/>", qr_code);
|
||||
println!("</center>");
|
||||
println!("</div>");
|
||||
}
|
||||
|
||||
println!("</body>");
|
||||
println!("</html>");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let key_config: KeyConfig = serde_json::from_str(&data)?;
|
||||
let key_text = serde_json::to_string_pretty(&key_config)?;
|
||||
|
||||
println!("<div style=\"page-break-inside: avoid\">");
|
||||
|
||||
println!("<p>");
|
||||
|
||||
println!("-----BEGIN PROXMOX BACKUP KEY-----");
|
||||
|
||||
for line in key_text.lines() {
|
||||
println!("{}", line);
|
||||
}
|
||||
|
||||
println!("-----END PROXMOX BACKUP KEY-----");
|
||||
|
||||
println!("</p>");
|
||||
|
||||
let qr_code = generate_qr_code("png", key_text.as_bytes())?;
|
||||
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
||||
|
||||
println!("<center>");
|
||||
println!("<img");
|
||||
println!("width=\"{}pt\" height=\"{}pt\"", img_size_pt, img_size_pt);
|
||||
println!("src=\"data:image/png;base64,{}\"/>", qr_code);
|
||||
println!("</center>");
|
||||
|
||||
println!("</div>");
|
||||
|
||||
println!("</body>");
|
||||
println!("</html>");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn paperkey_text(data: &str, subject: Option<String>) -> Result<(), Error> {
|
||||
|
||||
if let Some(subject) = subject {
|
||||
println!("Subject: {}\n", subject);
|
||||
}
|
||||
|
||||
if data.starts_with("-----BEGIN ENCRYPTED PRIVATE KEY-----\n") {
|
||||
let lines: Vec<String> = data.lines()
|
||||
.map(|s| s.trim_end())
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(String::from)
|
||||
.collect();
|
||||
|
||||
if !lines[lines.len()-1].starts_with("-----END ENCRYPTED PRIVATE KEY-----") {
|
||||
bail!("unexpected key format");
|
||||
}
|
||||
|
||||
if lines.len() < 20 {
|
||||
bail!("unexpected key format");
|
||||
}
|
||||
|
||||
const BLOCK_SIZE: usize = 5;
|
||||
let blocks = (lines.len() + BLOCK_SIZE -1)/BLOCK_SIZE;
|
||||
|
||||
for i in 0..blocks {
|
||||
let start = i*BLOCK_SIZE;
|
||||
let mut end = start + BLOCK_SIZE;
|
||||
if end > lines.len() {
|
||||
end = lines.len();
|
||||
}
|
||||
let data = &lines[start..end];
|
||||
|
||||
for l in start..end {
|
||||
println!("{:-2}: {}", l, lines[l]);
|
||||
}
|
||||
let data = data.join("\n");
|
||||
let qr_code = generate_qr_code("utf8i", data.as_bytes())?;
|
||||
let qr_code = String::from_utf8(qr_code)
|
||||
.map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?;
|
||||
println!("{}", qr_code);
|
||||
println!("{}", char::from(12u8)); // page break
|
||||
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let key_config: KeyConfig = serde_json::from_str(&data)?;
|
||||
let key_text = serde_json::to_string_pretty(&key_config)?;
|
||||
|
||||
println!("-----BEGIN PROXMOX BACKUP KEY-----");
|
||||
println!("{}", key_text);
|
||||
println!("-----END PROXMOX BACKUP KEY-----");
|
||||
|
||||
let qr_code = generate_qr_code("utf8i", key_text.as_bytes())?;
|
||||
let qr_code = String::from_utf8(qr_code)
|
||||
.map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?;
|
||||
|
||||
println!("{}", qr_code);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn generate_qr_code(output_type: &str, data: &[u8]) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let mut child = Command::new("qrencode")
|
||||
.args(&["-t", output_type, "-m0", "-s1", "-lm", "--output", "-"])
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
||||
{
|
||||
let stdin = child.stdin.as_mut()
|
||||
.ok_or_else(|| format_err!("Failed to open stdin"))?;
|
||||
stdin.write_all(data)
|
||||
.map_err(|_| format_err!("Failed to write to stdin"))?;
|
||||
}
|
||||
|
||||
let output = child.wait_with_output()
|
||||
.map_err(|_| format_err!("Failed to read stdout"))?;
|
||||
|
||||
let output = crate::tools::command_output(output, None)?;
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
@ -3,6 +3,8 @@ use std::sync::Arc;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::Path;
|
||||
use std::ffi::OsStr;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::BuildHasher;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
@ -10,6 +12,7 @@ use tokio::signal::unix::{signal, SignalKind};
|
||||
use nix::unistd::{fork, ForkResult, pipe};
|
||||
use futures::select;
|
||||
use futures::future::FutureExt;
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
|
||||
@ -23,6 +26,7 @@ use proxmox_backup::backup::{
|
||||
BackupDir,
|
||||
BackupGroup,
|
||||
BufferedDynamicReader,
|
||||
AsyncIndexReader,
|
||||
};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
@ -31,6 +35,7 @@ use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
complete_pxar_archive_name,
|
||||
complete_img_archive_name,
|
||||
complete_group_or_snapshot,
|
||||
complete_repository,
|
||||
record_repository,
|
||||
@ -50,7 +55,37 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
||||
("target", false, &StringSchema::new("Target directory path.").schema()),
|
||||
("repository", true, &REPO_URL_SCHEMA),
|
||||
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||
("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
|
||||
("verbose", true, &BooleanSchema::new("Verbose output and stay in foreground.").default(false).schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_MAP: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&mount),
|
||||
&ObjectSchema::new(
|
||||
"Map a drive image from a VM backup to a local loopback device. Use 'unmap' to undo.
|
||||
WARNING: Only do this with *trusted* backups!",
|
||||
&sorted!([
|
||||
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
||||
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
||||
("repository", true, &REPO_URL_SCHEMA),
|
||||
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||
("verbose", true, &BooleanSchema::new("Verbose output and stay in foreground.").default(false).schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_UNMAP: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&unmap),
|
||||
&ObjectSchema::new(
|
||||
"Unmap a loop device mapped with 'map' and release all resources.",
|
||||
&sorted!([
|
||||
("name", true, &StringSchema::new(
|
||||
concat!("Archive name, path to loopdev (/dev/loopX) or loop device number. ",
|
||||
"Omit to list all current mappings and force cleaning up leftover instances.")
|
||||
).schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
@ -65,6 +100,34 @@ pub fn mount_cmd_def() -> CliCommand {
|
||||
.completion_cb("target", tools::complete_file_name)
|
||||
}
|
||||
|
||||
pub fn map_cmd_def() -> CliCommand {
|
||||
|
||||
CliCommand::new(&API_METHOD_MAP)
|
||||
.arg_param(&["snapshot", "archive-name"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("snapshot", complete_group_or_snapshot)
|
||||
.completion_cb("archive-name", complete_img_archive_name)
|
||||
}
|
||||
|
||||
pub fn unmap_cmd_def() -> CliCommand {
|
||||
|
||||
CliCommand::new(&API_METHOD_UNMAP)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", complete_mapping_names)
|
||||
}
|
||||
|
||||
fn complete_mapping_names<S: BuildHasher>(_arg: &str, _param: &HashMap<String, String, S>)
|
||||
-> Vec<String>
|
||||
{
|
||||
match tools::fuse_loop::find_all_mappings() {
|
||||
Ok(mappings) => mappings
|
||||
.filter_map(|(name, _)| {
|
||||
tools::systemd::unescape_unit(&name).ok()
|
||||
}).collect(),
|
||||
Err(_) => Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
fn mount(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
@ -100,8 +163,9 @@ fn mount(
|
||||
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
let target = tools::required_string_param(¶m, "target")?;
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let target = param["target"].as_str();
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
@ -124,9 +188,17 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
};
|
||||
|
||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||
if let None = target {
|
||||
bail!("use the 'mount' command to mount pxar archives");
|
||||
}
|
||||
format!("{}.didx", archive_name)
|
||||
} else if archive_name.ends_with(".img") {
|
||||
if let Some(_) = target {
|
||||
bail!("use the 'map' command to map drive images");
|
||||
}
|
||||
format!("{}.fidx", archive_name)
|
||||
} else {
|
||||
bail!("Can only mount pxar archives.");
|
||||
bail!("Can only mount/map pxar archives and drive images.");
|
||||
};
|
||||
|
||||
let client = BackupReader::start(
|
||||
@ -143,25 +215,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
|
||||
let file_info = manifest.lookup_file_info(&server_archive_name)?;
|
||||
|
||||
if server_archive_name.ends_with(".didx") {
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
let options = OsStr::new("ro,default_permissions");
|
||||
|
||||
let session = proxmox_backup::pxar::fuse::Session::mount(
|
||||
decoder,
|
||||
&options,
|
||||
false,
|
||||
Path::new(target),
|
||||
)
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
let daemonize = || -> Result<(), Error> {
|
||||
if let Some(pipe) = pipe {
|
||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||
// Finish creation of daemon by redirecting filedescriptors.
|
||||
@ -182,15 +236,132 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
nix::unistd::close(pipe).unwrap();
|
||||
}
|
||||
|
||||
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let options = OsStr::new("ro,default_permissions");
|
||||
|
||||
// handle SIGINT and SIGTERM
|
||||
let mut interrupt_int = signal(SignalKind::interrupt())?;
|
||||
let mut interrupt_term = signal(SignalKind::terminate())?;
|
||||
let mut interrupt = futures::future::select(interrupt_int.next(), interrupt_term.next());
|
||||
|
||||
if server_archive_name.ends_with(".didx") {
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
|
||||
let session = proxmox_backup::pxar::fuse::Session::mount(
|
||||
decoder,
|
||||
&options,
|
||||
false,
|
||||
Path::new(target.unwrap()),
|
||||
)
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
daemonize()?;
|
||||
|
||||
select! {
|
||||
res = session.fuse() => res?,
|
||||
_ = interrupt.recv().fuse() => {
|
||||
_ = interrupt => {
|
||||
// exit on interrupted
|
||||
}
|
||||
}
|
||||
} else if server_archive_name.ends_with(".fidx") {
|
||||
let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
|
||||
let size = index.index_bytes();
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), HashMap::new());
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
|
||||
let name = &format!("{}:{}/{}", repo.to_string(), path, archive_name);
|
||||
let name_escaped = tools::systemd::escape_unit(name, false);
|
||||
|
||||
let mut session = tools::fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
|
||||
let loopdev = session.loopdev_path.clone();
|
||||
|
||||
let (st_send, st_recv) = futures::channel::mpsc::channel(1);
|
||||
let (mut abort_send, abort_recv) = futures::channel::mpsc::channel(1);
|
||||
let mut st_recv = st_recv.fuse();
|
||||
let mut session_fut = session.main(st_send, abort_recv).boxed().fuse();
|
||||
|
||||
// poll until loop file is mapped (or errors)
|
||||
select! {
|
||||
res = session_fut => {
|
||||
bail!("FUSE session unexpectedly ended before loop file mapping");
|
||||
},
|
||||
res = st_recv.try_next() => {
|
||||
if let Err(err) = res {
|
||||
// init went wrong, abort now
|
||||
abort_send.try_send(()).map_err(|err|
|
||||
format_err!("error while sending abort signal - {}", err))?;
|
||||
// ignore and keep original error cause
|
||||
let _ = session_fut.await;
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// daemonize only now to be able to print mapped loopdev or startup errors
|
||||
println!("Image '{}' mapped on {}", name, loopdev);
|
||||
daemonize()?;
|
||||
|
||||
// continue polling until complete or interrupted (which also happens on unmap)
|
||||
select! {
|
||||
res = session_fut => res?,
|
||||
_ = interrupt => {
|
||||
// exit on interrupted
|
||||
abort_send.try_send(()).map_err(|err|
|
||||
format_err!("error while sending abort signal - {}", err))?;
|
||||
session_fut.await?;
|
||||
}
|
||||
}
|
||||
|
||||
println!("Image unmapped");
|
||||
} else {
|
||||
bail!("unknown archive file extension (expected .pxar)");
|
||||
bail!("unknown archive file extension (expected .pxar or .img)");
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn unmap(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut name = match param["name"].as_str() {
|
||||
Some(name) => name.to_owned(),
|
||||
None => {
|
||||
tools::fuse_loop::cleanup_unused_run_files(None);
|
||||
let mut any = false;
|
||||
for (backing, loopdev) in tools::fuse_loop::find_all_mappings()? {
|
||||
let name = tools::systemd::unescape_unit(&backing)?;
|
||||
println!("{}:\t{}", loopdev.unwrap_or("(unmapped)".to_owned()), name);
|
||||
any = true;
|
||||
}
|
||||
if !any {
|
||||
println!("Nothing mapped.");
|
||||
}
|
||||
return Ok(Value::Null);
|
||||
},
|
||||
};
|
||||
|
||||
// allow loop device number alone
|
||||
if let Ok(num) = name.parse::<u8>() {
|
||||
name = format!("/dev/loop{}", num);
|
||||
}
|
||||
|
||||
if name.starts_with("/dev/loop") {
|
||||
tools::fuse_loop::unmap_loopdev(name)?;
|
||||
} else {
|
||||
let name = tools::systemd::escape_unit(&name, false);
|
||||
tools::fuse_loop::unmap_name(name)?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
|
@ -48,7 +48,7 @@ async fn task_list(param: Value) -> Result<Value, Error> {
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
||||
let running = !param["all"].as_bool().unwrap_or(false);
|
||||
@ -96,7 +96,7 @@ async fn task_log(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let upid = tools::required_string_param(¶m, "upid")?;
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
display_task_log(client, upid, true).await?;
|
||||
|
||||
@ -122,7 +122,7 @@ async fn task_stop(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let upid_str = tools::required_string_param(¶m, "upid")?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
let mut client = connect(repo.host(), repo.port(), repo.user())?;
|
||||
|
||||
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
||||
let _ = client.delete(&path, None).await?;
|
||||
|
@ -1,16 +1,18 @@
|
||||
use anyhow::{format_err, Error};
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::io::{Write, Seek, SeekFrom};
|
||||
use std::fs::File;
|
||||
use std::sync::Arc;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::future::AbortHandle;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::tools::digest_to_hex;
|
||||
|
||||
use crate::backup::*;
|
||||
use crate::{
|
||||
tools::compute_file_csum,
|
||||
backup::*,
|
||||
};
|
||||
|
||||
use super::{HttpClient, H2Client};
|
||||
|
||||
@ -41,18 +43,18 @@ impl BackupReader {
|
||||
datastore: &str,
|
||||
backup_type: &str,
|
||||
backup_id: &str,
|
||||
backup_time: DateTime<Utc>,
|
||||
backup_time: i64,
|
||||
debug: bool,
|
||||
) -> Result<Arc<BackupReader>, Error> {
|
||||
|
||||
let param = json!({
|
||||
"backup-type": backup_type,
|
||||
"backup-id": backup_id,
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"store": datastore,
|
||||
"debug": debug,
|
||||
});
|
||||
let req = HttpClient::request_builder(client.server(), "GET", "/api2/json/reader", Some(param)).unwrap();
|
||||
let req = HttpClient::request_builder(client.server(), client.port(), "GET", "/api2/json/reader", Some(param)).unwrap();
|
||||
|
||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())).await?;
|
||||
|
||||
@ -220,29 +222,3 @@ impl BackupReader {
|
||||
Ok(index)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compute_file_csum(file: &mut File) -> Result<([u8; 32], u64), Error> {
|
||||
|
||||
file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let mut hasher = openssl::sha::Sha256::new();
|
||||
let mut buffer = proxmox::tools::vec::undefined(256*1024);
|
||||
let mut size: u64 = 0;
|
||||
|
||||
loop {
|
||||
let count = match file.read(&mut buffer) {
|
||||
Ok(count) => count,
|
||||
Err(ref err) if err.kind() == std::io::ErrorKind::Interrupted => { continue; }
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
if count == 0 {
|
||||
break;
|
||||
}
|
||||
size += count as u64;
|
||||
hasher.update(&buffer[..count]);
|
||||
}
|
||||
|
||||
let csum = hasher.finish();
|
||||
|
||||
Ok((csum, size))
|
||||
}
|
||||
|
@ -19,14 +19,22 @@ pub struct BackupRepository {
|
||||
user: Option<Userid>,
|
||||
/// The host name or IP address
|
||||
host: Option<String>,
|
||||
/// The port
|
||||
port: Option<u16>,
|
||||
/// The name of the datastore
|
||||
store: String,
|
||||
}
|
||||
|
||||
impl BackupRepository {
|
||||
|
||||
pub fn new(user: Option<Userid>, host: Option<String>, store: String) -> Self {
|
||||
Self { user, host, store }
|
||||
pub fn new(user: Option<Userid>, host: Option<String>, port: Option<u16>, store: String) -> Self {
|
||||
let host = match host {
|
||||
Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => {
|
||||
Some(format!("[{}]", host))
|
||||
},
|
||||
other => other,
|
||||
};
|
||||
Self { user, host, port, store }
|
||||
}
|
||||
|
||||
pub fn user(&self) -> &Userid {
|
||||
@ -43,6 +51,13 @@ impl BackupRepository {
|
||||
"localhost"
|
||||
}
|
||||
|
||||
pub fn port(&self) -> u16 {
|
||||
if let Some(port) = self.port {
|
||||
return port;
|
||||
}
|
||||
8007
|
||||
}
|
||||
|
||||
pub fn store(&self) -> &str {
|
||||
&self.store
|
||||
}
|
||||
@ -50,12 +65,11 @@ impl BackupRepository {
|
||||
|
||||
impl fmt::Display for BackupRepository {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
if let Some(ref user) = self.user {
|
||||
write!(f, "{}@{}:{}", user, self.host(), self.store)
|
||||
} else if let Some(ref host) = self.host {
|
||||
write!(f, "{}:{}", host, self.store)
|
||||
} else {
|
||||
write!(f, "{}", self.store)
|
||||
match (&self.user, &self.host, self.port) {
|
||||
(Some(user), _, _) => write!(f, "{}@{}:{}:{}", user, self.host(), self.port(), self.store),
|
||||
(None, Some(host), None) => write!(f, "{}:{}", host, self.store),
|
||||
(None, _, Some(port)) => write!(f, "{}:{}:{}", self.host(), port, self.store),
|
||||
(None, None, None) => write!(f, "{}", self.store),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -76,7 +90,8 @@ impl std::str::FromStr for BackupRepository {
|
||||
Ok(Self {
|
||||
user: cap.get(1).map(|m| Userid::try_from(m.as_str().to_owned())).transpose()?,
|
||||
host: cap.get(2).map(|m| m.as_str().to_owned()),
|
||||
store: cap[3].to_owned(),
|
||||
port: cap.get(3).map(|m| m.as_str().parse::<u16>()).transpose()?,
|
||||
store: cap[4].to_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::*;
|
||||
use futures::stream::Stream;
|
||||
use futures::future::AbortHandle;
|
||||
@ -51,7 +50,7 @@ impl BackupWriter {
|
||||
datastore: &str,
|
||||
backup_type: &str,
|
||||
backup_id: &str,
|
||||
backup_time: DateTime<Utc>,
|
||||
backup_time: i64,
|
||||
debug: bool,
|
||||
benchmark: bool
|
||||
) -> Result<Arc<BackupWriter>, Error> {
|
||||
@ -59,14 +58,14 @@ impl BackupWriter {
|
||||
let param = json!({
|
||||
"backup-type": backup_type,
|
||||
"backup-id": backup_id,
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"store": datastore,
|
||||
"debug": debug,
|
||||
"benchmark": benchmark
|
||||
});
|
||||
|
||||
let req = HttpClient::request_builder(
|
||||
client.server(), "GET", "/api2/json/backup", Some(param)).unwrap();
|
||||
client.server(), client.port(), "GET", "/api2/json/backup", Some(param)).unwrap();
|
||||
|
||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
||||
|
||||
@ -263,7 +262,7 @@ impl BackupWriter {
|
||||
let archive = if self.verbose {
|
||||
archive_name.to_string()
|
||||
} else {
|
||||
crate::tools::format::strip_server_file_expenstion(archive_name.clone())
|
||||
crate::tools::format::strip_server_file_extension(archive_name.clone())
|
||||
};
|
||||
if archive_name != CATALOG_NAME {
|
||||
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
||||
|
@ -1,8 +1,8 @@
|
||||
use std::io::Write;
|
||||
use std::task::{Context, Poll};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use chrono::Utc;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use http::Uri;
|
||||
@ -30,7 +30,7 @@ use crate::tools::{self, BroadcastFuture, DEFAULT_ENCODE_SET};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AuthInfo {
|
||||
pub username: String,
|
||||
pub userid: Userid,
|
||||
pub ticket: String,
|
||||
pub token: String,
|
||||
}
|
||||
@ -99,8 +99,11 @@ impl HttpClientOptions {
|
||||
pub struct HttpClient {
|
||||
client: Client<HttpsConnector>,
|
||||
server: String,
|
||||
port: u16,
|
||||
fingerprint: Arc<Mutex<Option<String>>>,
|
||||
auth: BroadcastFuture<AuthInfo>,
|
||||
first_auth: BroadcastFuture<()>,
|
||||
auth: Arc<RwLock<AuthInfo>>,
|
||||
ticket_abort: futures::future::AbortHandle,
|
||||
_options: HttpClientOptions,
|
||||
}
|
||||
|
||||
@ -199,7 +202,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
||||
|
||||
let mut data = file_get_json(&path, Some(json!({})))?;
|
||||
|
||||
let now = Utc::now().timestamp();
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token});
|
||||
|
||||
@ -230,7 +233,7 @@ fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(Stri
|
||||
// usually /run/user/<uid>/...
|
||||
let path = base.place_runtime_file("tickets").ok()?;
|
||||
let data = file_get_json(&path, None).ok()?;
|
||||
let now = Utc::now().timestamp();
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
|
||||
let uinfo = data[server][userid.as_str()].as_object()?;
|
||||
let timestamp = uinfo["timestamp"].as_i64()?;
|
||||
@ -248,6 +251,7 @@ fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(Stri
|
||||
impl HttpClient {
|
||||
pub fn new(
|
||||
server: &str,
|
||||
port: u16,
|
||||
userid: &Userid,
|
||||
mut options: HttpClientOptions,
|
||||
) -> Result<Self, Error> {
|
||||
@ -318,29 +322,69 @@ impl HttpClient {
|
||||
}
|
||||
};
|
||||
|
||||
let auth = Arc::new(RwLock::new(AuthInfo {
|
||||
userid: userid.clone(),
|
||||
ticket: password.clone(),
|
||||
token: "".to_string(),
|
||||
}));
|
||||
|
||||
let server2 = server.to_string();
|
||||
let client2 = client.clone();
|
||||
let auth2 = auth.clone();
|
||||
let prefix2 = options.prefix.clone();
|
||||
|
||||
let renewal_future = async move {
|
||||
loop {
|
||||
tokio::time::delay_for(Duration::new(60*15, 0)).await; // 15 minutes
|
||||
let (userid, ticket) = {
|
||||
let authinfo = auth2.read().unwrap().clone();
|
||||
(authinfo.userid, authinfo.ticket)
|
||||
};
|
||||
match Self::credentials(client2.clone(), server2.clone(), port, userid, ticket).await {
|
||||
Ok(auth) => {
|
||||
if use_ticket_cache & &prefix2.is_some() {
|
||||
let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.userid.to_string(), &auth.ticket, &auth.token);
|
||||
}
|
||||
*auth2.write().unwrap() = auth;
|
||||
},
|
||||
Err(err) => {
|
||||
eprintln!("re-authentication failed: {}", err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let (renewal_future, ticket_abort) = futures::future::abortable(renewal_future);
|
||||
|
||||
let login_future = Self::credentials(
|
||||
client.clone(),
|
||||
server.to_owned(),
|
||||
port,
|
||||
userid.to_owned(),
|
||||
password.to_owned(),
|
||||
).map_ok({
|
||||
let server = server.to_string();
|
||||
let prefix = options.prefix.clone();
|
||||
let authinfo = auth.clone();
|
||||
|
||||
move |auth| {
|
||||
if use_ticket_cache & &prefix.is_some() {
|
||||
let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.username, &auth.ticket, &auth.token);
|
||||
let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.userid.to_string(), &auth.ticket, &auth.token);
|
||||
}
|
||||
|
||||
auth
|
||||
*authinfo.write().unwrap() = auth;
|
||||
tokio::spawn(renewal_future);
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Self {
|
||||
client,
|
||||
server: String::from(server),
|
||||
port,
|
||||
fingerprint: verified_fingerprint,
|
||||
auth: BroadcastFuture::new(Box::new(login_future)),
|
||||
auth,
|
||||
ticket_abort,
|
||||
first_auth: BroadcastFuture::new(Box::new(login_future)),
|
||||
_options: options,
|
||||
})
|
||||
}
|
||||
@ -350,7 +394,9 @@ impl HttpClient {
|
||||
/// Login is done on demand, so this is only required if you need
|
||||
/// access to authentication data in 'AuthInfo'.
|
||||
pub async fn login(&self) -> Result<AuthInfo, Error> {
|
||||
self.auth.listen().await
|
||||
self.first_auth.listen().await?;
|
||||
let authinfo = self.auth.read().unwrap();
|
||||
Ok(authinfo.clone())
|
||||
}
|
||||
|
||||
/// Returns the optional fingerprint passed to the new() constructor.
|
||||
@ -444,7 +490,7 @@ impl HttpClient {
|
||||
path: &str,
|
||||
data: Option<Value>,
|
||||
) -> Result<Value, Error> {
|
||||
let req = Self::request_builder(&self.server, "GET", path, data).unwrap();
|
||||
let req = Self::request_builder(&self.server, self.port, "GET", path, data)?;
|
||||
self.request(req).await
|
||||
}
|
||||
|
||||
@ -453,7 +499,7 @@ impl HttpClient {
|
||||
path: &str,
|
||||
data: Option<Value>,
|
||||
) -> Result<Value, Error> {
|
||||
let req = Self::request_builder(&self.server, "DELETE", path, data).unwrap();
|
||||
let req = Self::request_builder(&self.server, self.port, "DELETE", path, data)?;
|
||||
self.request(req).await
|
||||
}
|
||||
|
||||
@ -462,7 +508,7 @@ impl HttpClient {
|
||||
path: &str,
|
||||
data: Option<Value>,
|
||||
) -> Result<Value, Error> {
|
||||
let req = Self::request_builder(&self.server, "POST", path, data).unwrap();
|
||||
let req = Self::request_builder(&self.server, self.port, "POST", path, data)?;
|
||||
self.request(req).await
|
||||
}
|
||||
|
||||
@ -471,7 +517,7 @@ impl HttpClient {
|
||||
path: &str,
|
||||
output: &mut (dyn Write + Send),
|
||||
) -> Result<(), Error> {
|
||||
let mut req = Self::request_builder(&self.server, "GET", path, None).unwrap();
|
||||
let mut req = Self::request_builder(&self.server, self.port, "GET", path, None)?;
|
||||
|
||||
let client = self.client.clone();
|
||||
|
||||
@ -507,7 +553,7 @@ impl HttpClient {
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let path = path.trim_matches('/');
|
||||
let mut url = format!("https://{}:8007/{}", &self.server, path);
|
||||
let mut url = format!("https://{}:{}/{}", &self.server, self.port, path);
|
||||
|
||||
if let Some(data) = data {
|
||||
let query = tools::json_object_to_query(data).unwrap();
|
||||
@ -582,14 +628,15 @@ impl HttpClient {
|
||||
async fn credentials(
|
||||
client: Client<HttpsConnector>,
|
||||
server: String,
|
||||
port: u16,
|
||||
username: Userid,
|
||||
password: String,
|
||||
) -> Result<AuthInfo, Error> {
|
||||
let data = json!({ "username": username, "password": password });
|
||||
let req = Self::request_builder(&server, "POST", "/api2/json/access/ticket", Some(data)).unwrap();
|
||||
let req = Self::request_builder(&server, port, "POST", "/api2/json/access/ticket", Some(data))?;
|
||||
let cred = Self::api_request(client, req).await?;
|
||||
let auth = AuthInfo {
|
||||
username: cred["data"]["username"].as_str().unwrap().to_owned(),
|
||||
userid: cred["data"]["username"].as_str().unwrap().parse()?,
|
||||
ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(),
|
||||
token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(),
|
||||
};
|
||||
@ -630,9 +677,13 @@ impl HttpClient {
|
||||
&self.server
|
||||
}
|
||||
|
||||
pub fn request_builder(server: &str, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> {
|
||||
pub fn port(&self) -> u16 {
|
||||
self.port
|
||||
}
|
||||
|
||||
pub fn request_builder(server: &str, port: u16, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> {
|
||||
let path = path.trim_matches('/');
|
||||
let url: Uri = format!("https://{}:8007/{}", server, path).parse()?;
|
||||
let url: Uri = format!("https://{}:{}/{}", server, port, path).parse()?;
|
||||
|
||||
if let Some(data) = data {
|
||||
if method == "POST" {
|
||||
@ -645,7 +696,7 @@ impl HttpClient {
|
||||
return Ok(request);
|
||||
} else {
|
||||
let query = tools::json_object_to_query(data)?;
|
||||
let url: Uri = format!("https://{}:8007/{}?{}", server, path, query).parse()?;
|
||||
let url: Uri = format!("https://{}:{}/{}?{}", server, port, path, query).parse()?;
|
||||
let request = Request::builder()
|
||||
.method(method)
|
||||
.uri(url)
|
||||
@ -667,6 +718,12 @@ impl HttpClient {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for HttpClient {
|
||||
fn drop(&mut self) {
|
||||
self.ticket_abort.abort();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct H2Client {
|
||||
|
@ -3,15 +3,20 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::json;
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::time::SystemTime;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use proxmox::api::error::{StatusCode, HttpError};
|
||||
use crate::server::{WorkerTask};
|
||||
use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
use super::*;
|
||||
use crate::{
|
||||
tools::{ParallelHandler, compute_file_csum},
|
||||
server::WorkerTask,
|
||||
backup::*,
|
||||
api2::types::*,
|
||||
client::*,
|
||||
};
|
||||
|
||||
|
||||
// fixme: implement filters
|
||||
@ -19,27 +24,86 @@ use super::*;
|
||||
// Todo: correctly lock backup groups
|
||||
|
||||
async fn pull_index_chunks<I: IndexFile>(
|
||||
_worker: &WorkerTask,
|
||||
chunk_reader: &mut RemoteChunkReader,
|
||||
worker: &WorkerTask,
|
||||
chunk_reader: RemoteChunkReader,
|
||||
target: Arc<DataStore>,
|
||||
index: I,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
use futures::stream::{self, StreamExt, TryStreamExt};
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let chunk_exists = target.cond_touch_chunk(&info.digest, false)?;
|
||||
let start_time = SystemTime::now();
|
||||
|
||||
let stream = stream::iter(
|
||||
(0..index.index_count())
|
||||
.map(|pos| index.chunk_info(pos).unwrap())
|
||||
.filter(|info| {
|
||||
let mut guard = downloaded_chunks.lock().unwrap();
|
||||
let done = guard.contains(&info.digest);
|
||||
if !done {
|
||||
// Note: We mark a chunk as downloaded before its actually downloaded
|
||||
// to avoid duplicate downloads.
|
||||
guard.insert(info.digest);
|
||||
}
|
||||
!done
|
||||
})
|
||||
);
|
||||
|
||||
let target2 = target.clone();
|
||||
let verify_pool = ParallelHandler::new(
|
||||
"sync chunk writer", 4,
|
||||
move |(chunk, digest, size): (DataBlob, [u8;32], u64)| {
|
||||
// println!("verify and write {}", proxmox::tools::digest_to_hex(&digest));
|
||||
chunk.verify_unencrypted(size as usize, &digest)?;
|
||||
target2.insert_chunk(&chunk, &digest)?;
|
||||
Ok(())
|
||||
}
|
||||
);
|
||||
|
||||
let verify_and_write_channel = verify_pool.channel();
|
||||
|
||||
let bytes = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
stream
|
||||
.map(|info| {
|
||||
|
||||
let target = Arc::clone(&target);
|
||||
let chunk_reader = chunk_reader.clone();
|
||||
let bytes = Arc::clone(&bytes);
|
||||
let verify_and_write_channel = verify_and_write_channel.clone();
|
||||
|
||||
Ok::<_, Error>(async move {
|
||||
let chunk_exists = crate::tools::runtime::block_in_place(|| target.cond_touch_chunk(&info.digest, false))?;
|
||||
if chunk_exists {
|
||||
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
continue;
|
||||
return Ok::<_, Error>(());
|
||||
}
|
||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
|
||||
let raw_size = chunk.raw_size() as usize;
|
||||
|
||||
chunk.verify_unencrypted(info.size() as usize, &info.digest)?;
|
||||
// decode, verify and write in a separate threads to maximize throughput
|
||||
crate::tools::runtime::block_in_place(|| verify_and_write_channel.send((chunk, info.digest, info.size())))?;
|
||||
|
||||
target.insert_chunk(&chunk, &info.digest)?;
|
||||
}
|
||||
bytes.fetch_add(raw_size, Ordering::SeqCst);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.try_buffer_unordered(20)
|
||||
.try_for_each(|_res| futures::future::ok(()))
|
||||
.await?;
|
||||
|
||||
drop(verify_and_write_channel);
|
||||
|
||||
verify_pool.complete()?;
|
||||
|
||||
let elapsed = start_time.elapsed()?.as_secs_f64();
|
||||
|
||||
let bytes = bytes.load(Ordering::SeqCst);
|
||||
|
||||
worker.log(format!("downloaded {} bytes ({} MiB/s)", bytes, (bytes as f64)/(1024.0*1024.0*elapsed)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -52,6 +116,7 @@ async fn download_manifest(
|
||||
let mut tmp_manifest_file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.read(true)
|
||||
.open(&filename)?;
|
||||
|
||||
@ -85,6 +150,7 @@ async fn pull_single_archive(
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
archive_info: &FileInfo,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let archive_name = &archive_info.filename;
|
||||
@ -111,7 +177,7 @@ async fn pull_single_archive(
|
||||
let (csum, size) = index.compute_csum();
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
pull_index_chunks(worker, chunk_reader.clone(), tgt_store.clone(), index, downloaded_chunks).await?;
|
||||
}
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
@ -119,7 +185,7 @@ async fn pull_single_archive(
|
||||
let (csum, size) = index.compute_csum();
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
pull_index_chunks(worker, chunk_reader.clone(), tgt_store.clone(), index, downloaded_chunks).await?;
|
||||
}
|
||||
ArchiveType::Blob => {
|
||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||
@ -165,6 +231,7 @@ async fn pull_snapshot(
|
||||
reader: Arc<BackupReader>,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut manifest_name = tgt_store.base_path();
|
||||
@ -184,8 +251,8 @@ async fn pull_snapshot(
|
||||
Err(err) => {
|
||||
match err.downcast_ref::<HttpError>() {
|
||||
Some(HttpError { code, message }) => {
|
||||
match code {
|
||||
&StatusCode::NOT_FOUND => {
|
||||
match *code {
|
||||
StatusCode::NOT_FOUND => {
|
||||
worker.log(format!("skipping snapshot {} - vanished since start of sync", snapshot));
|
||||
return Ok(());
|
||||
},
|
||||
@ -218,6 +285,7 @@ async fn pull_snapshot(
|
||||
try_client_log_download(worker, reader, &client_log_name).await?;
|
||||
}
|
||||
worker.log("no data changes");
|
||||
let _ = std::fs::remove_file(&tmp_manifest_name);
|
||||
return Ok(()); // nothing changed
|
||||
}
|
||||
}
|
||||
@ -273,6 +341,7 @@ async fn pull_snapshot(
|
||||
tgt_store.clone(),
|
||||
snapshot,
|
||||
&item,
|
||||
downloaded_chunks.clone(),
|
||||
).await?;
|
||||
}
|
||||
|
||||
@ -295,6 +364,7 @@ pub async fn pull_snapshot_from(
|
||||
reader: Arc<BackupReader>,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&snapshot)?;
|
||||
@ -302,7 +372,7 @@ pub async fn pull_snapshot_from(
|
||||
if is_new {
|
||||
worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
|
||||
|
||||
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await {
|
||||
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks).await {
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot, true) {
|
||||
worker.log(format!("cleanup error - {}", cleanup_err));
|
||||
}
|
||||
@ -311,7 +381,7 @@ pub async fn pull_snapshot_from(
|
||||
worker.log(format!("sync snapshot {:?} done", snapshot.relative_path()));
|
||||
} else {
|
||||
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks).await?;
|
||||
worker.log(format!("re-sync snapshot {:?} done", snapshot.relative_path()));
|
||||
}
|
||||
|
||||
@ -325,6 +395,7 @@ pub async fn pull_group(
|
||||
tgt_store: Arc<DataStore>,
|
||||
group: &BackupGroup,
|
||||
delete: bool,
|
||||
progress: Option<(usize, usize)>, // (groups_done, group_count)
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/snapshots", src_repo.store());
|
||||
@ -346,11 +417,24 @@ pub async fn pull_group(
|
||||
|
||||
let mut remote_snapshots = std::collections::HashSet::new();
|
||||
|
||||
for item in list {
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
let (per_start, per_group) = if let Some((groups_done, group_count)) = progress {
|
||||
let per_start = (groups_done as f64)/(group_count as f64);
|
||||
let per_group = 1.0/(group_count as f64);
|
||||
(per_start, per_group)
|
||||
} else {
|
||||
(0.0, 1.0)
|
||||
};
|
||||
|
||||
// start with 16384 chunks (up to 65GB)
|
||||
let downloaded_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*64)));
|
||||
|
||||
let snapshot_count = list.len();
|
||||
|
||||
for (pos, item) in list.into_iter().enumerate() {
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
|
||||
// in-progress backups can't be synced
|
||||
if let None = item.size {
|
||||
if item.size.is_none() {
|
||||
worker.log(format!("skipping snapshot {} - in-progress backup", snapshot));
|
||||
continue;
|
||||
}
|
||||
@ -367,7 +451,7 @@ pub async fn pull_group(
|
||||
.password(Some(auth_info.ticket.clone()))
|
||||
.fingerprint(fingerprint.clone());
|
||||
|
||||
let new_client = HttpClient::new(src_repo.host(), src_repo.user(), options)?;
|
||||
let new_client = HttpClient::new(src_repo.host(), src_repo.port(), src_repo.user(), options)?;
|
||||
|
||||
let reader = BackupReader::start(
|
||||
new_client,
|
||||
@ -379,7 +463,13 @@ pub async fn pull_group(
|
||||
true,
|
||||
).await?;
|
||||
|
||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
let result = pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks.clone()).await;
|
||||
|
||||
let percentage = (pos as f64)/(snapshot_count as f64);
|
||||
let percentage = per_start + percentage*per_group;
|
||||
worker.log(format!("percentage done: {:.2}%", percentage*100.0));
|
||||
|
||||
result?; // stop on error
|
||||
}
|
||||
|
||||
if delete {
|
||||
@ -429,7 +519,9 @@ pub async fn pull_store(
|
||||
new_groups.insert(BackupGroup::new(&item.backup_type, &item.backup_id));
|
||||
}
|
||||
|
||||
for item in list {
|
||||
let group_count = list.len();
|
||||
|
||||
for (groups_done, item) in list.into_iter().enumerate() {
|
||||
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
||||
|
||||
let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &userid)?;
|
||||
@ -437,14 +529,22 @@ pub async fn pull_store(
|
||||
if userid != owner { // only the owner is allowed to create additional snapshots
|
||||
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
|
||||
item.backup_type, item.backup_id, userid, owner));
|
||||
errors = true;
|
||||
continue; // do not stop here, instead continue
|
||||
}
|
||||
errors = true; // do not stop here, instead continue
|
||||
|
||||
if let Err(err) = pull_group(worker, client, src_repo, tgt_store.clone(), &group, delete).await {
|
||||
} else {
|
||||
|
||||
if let Err(err) = pull_group(
|
||||
worker,
|
||||
client,
|
||||
src_repo,
|
||||
tgt_store.clone(),
|
||||
&group,
|
||||
delete,
|
||||
Some((groups_done, group_count)),
|
||||
).await {
|
||||
worker.log(format!("sync group {}/{} failed - {}", item.backup_type, item.backup_id, err));
|
||||
errors = true;
|
||||
continue; // do not stop here, instead continue
|
||||
errors = true; // do not stop here, instead continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::collections::HashSet;
|
||||
use std::io::Write;
|
||||
//use std::os::unix::io::FromRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::Path;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::{Context, Poll};
|
||||
@ -38,9 +38,8 @@ impl Drop for PxarBackupStream {
|
||||
impl PxarBackupStream {
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
dir: Dir,
|
||||
_path: PathBuf,
|
||||
device_set: Option<HashSet<u64>>,
|
||||
_verbose: bool,
|
||||
verbose: bool,
|
||||
skip_lost_and_found: bool,
|
||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||
patterns: Vec<MatchEntry>,
|
||||
@ -70,7 +69,12 @@ impl PxarBackupStream {
|
||||
crate::pxar::Flags::DEFAULT,
|
||||
device_set,
|
||||
skip_lost_and_found,
|
||||
|_| Ok(()),
|
||||
|path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
entries_max,
|
||||
Some(&mut *catalog_guard),
|
||||
) {
|
||||
@ -97,11 +101,9 @@ impl PxarBackupStream {
|
||||
entries_max: usize,
|
||||
) -> Result<Self, Error> {
|
||||
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
|
||||
let path = std::path::PathBuf::from(dirname);
|
||||
|
||||
Self::new(
|
||||
dir,
|
||||
path,
|
||||
device_set,
|
||||
verbose,
|
||||
skip_lost_and_found,
|
||||
|
@ -15,7 +15,7 @@ pub struct RemoteChunkReader {
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
cache_hint: HashMap<[u8; 32], usize>,
|
||||
cache_hint: Arc<HashMap<[u8; 32], usize>>,
|
||||
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
|
||||
}
|
||||
|
||||
@ -33,7 +33,7 @@ impl RemoteChunkReader {
|
||||
client,
|
||||
crypt_config,
|
||||
crypt_mode,
|
||||
cache_hint,
|
||||
cache_hint: Arc::new(cache_hint),
|
||||
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
@ -12,42 +12,47 @@ use ::serde::{Deserialize, Serialize};
|
||||
use serde::de::{value, IntoDeserializer};
|
||||
|
||||
use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
||||
use proxmox::constnamemap;
|
||||
use proxmox::constnamedbitmap;
|
||||
use proxmox::api::{api, schema::*};
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
|
||||
// define Privilege bitfield
|
||||
|
||||
constnamemap! {
|
||||
constnamedbitmap! {
|
||||
/// Contains a list of Privileges
|
||||
PRIVILEGES: u64 => {
|
||||
PRIV_SYS_AUDIT("Sys.Audit") = 1 << 0;
|
||||
PRIV_SYS_MODIFY("Sys.Modify") = 1 << 1;
|
||||
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement") = 1 << 2;
|
||||
PRIV_SYS_AUDIT("Sys.Audit");
|
||||
PRIV_SYS_MODIFY("Sys.Modify");
|
||||
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
|
||||
|
||||
PRIV_DATASTORE_AUDIT("Datastore.Audit") = 1 << 3;
|
||||
PRIV_DATASTORE_MODIFY("Datastore.Modify") = 1 << 4;
|
||||
PRIV_DATASTORE_READ("Datastore.Read") = 1 << 5;
|
||||
PRIV_DATASTORE_AUDIT("Datastore.Audit");
|
||||
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
|
||||
PRIV_DATASTORE_MODIFY("Datastore.Modify");
|
||||
PRIV_DATASTORE_READ("Datastore.Read");
|
||||
|
||||
/// Datastore.Backup also requires backup ownership
|
||||
PRIV_DATASTORE_BACKUP("Datastore.Backup") = 1 << 6;
|
||||
PRIV_DATASTORE_BACKUP("Datastore.Backup");
|
||||
/// Datastore.Prune also requires backup ownership
|
||||
PRIV_DATASTORE_PRUNE("Datastore.Prune") = 1 << 7;
|
||||
PRIV_DATASTORE_PRUNE("Datastore.Prune");
|
||||
|
||||
PRIV_PERMISSIONS_MODIFY("Permissions.Modify") = 1 << 8;
|
||||
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
|
||||
|
||||
PRIV_REMOTE_AUDIT("Remote.Audit") = 1 << 9;
|
||||
PRIV_REMOTE_MODIFY("Remote.Modify") = 1 << 10;
|
||||
PRIV_REMOTE_READ("Remote.Read") = 1 << 11;
|
||||
PRIV_REMOTE_PRUNE("Remote.Prune") = 1 << 12;
|
||||
PRIV_REMOTE_AUDIT("Remote.Audit");
|
||||
PRIV_REMOTE_MODIFY("Remote.Modify");
|
||||
PRIV_REMOTE_READ("Remote.Read");
|
||||
PRIV_REMOTE_PRUNE("Remote.Prune");
|
||||
|
||||
PRIV_SYS_CONSOLE("Sys.Console") = 1 << 13;
|
||||
PRIV_SYS_CONSOLE("Sys.Console");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Admin always has all privileges. It can do everything except a few actions
|
||||
/// which are limited to the 'root@pam` superuser
|
||||
pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
||||
|
||||
/// NoAccess can be used to remove privileges from specific paths
|
||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||
|
||||
pub const ROLE_AUDIT: u64 =
|
||||
|
@ -96,9 +96,7 @@ impl CachedUserInfo {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl CachedUserInfo {
|
||||
pub fn is_superuser(&self, userid: &Userid) -> bool {
|
||||
userid == "root@pam"
|
||||
}
|
||||
|
@ -44,6 +44,10 @@ pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name").schema()
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"verify-schedule": {
|
||||
optional: true,
|
||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
@ -83,6 +87,8 @@ pub struct DataStoreConfig {
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub prune_schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub verify_schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_last: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_hourly: Option<u64>,
|
||||
|
@ -48,7 +48,6 @@ use proxmox::tools::fs::{
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::server::{upid_read_status, worker_is_active_local, TaskState, UPID};
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -98,7 +97,7 @@ where
|
||||
{
|
||||
let mut path = path.as_ref().to_path_buf();
|
||||
path.set_extension("lck");
|
||||
let lock = open_file_locked(&path, Duration::new(10, 0))?;
|
||||
let lock = open_file_locked(&path, Duration::new(10, 0), true)?;
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
|
||||
Ok(lock)
|
||||
@ -178,7 +177,7 @@ impl JobState {
|
||||
}
|
||||
} else {
|
||||
Ok(JobState::Created {
|
||||
time: epoch_now_u64()? as i64 - 30,
|
||||
time: proxmox::tools::time::epoch_i64() - 30,
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -199,7 +198,7 @@ impl Job {
|
||||
jobtype: jobtype.to_string(),
|
||||
jobname: jobname.to_string(),
|
||||
state: JobState::Created {
|
||||
time: epoch_now_u64()? as i64,
|
||||
time: proxmox::tools::time::epoch_i64(),
|
||||
},
|
||||
_lock,
|
||||
})
|
||||
|
@ -17,7 +17,7 @@ pub use lexer::*;
|
||||
mod parser;
|
||||
pub use parser::*;
|
||||
|
||||
use crate::api2::types::{Interface, NetworkConfigMethod, NetworkInterfaceType, LinuxBondMode};
|
||||
use crate::api2::types::{Interface, NetworkConfigMethod, NetworkInterfaceType, LinuxBondMode, BondXmitHashPolicy};
|
||||
|
||||
lazy_static!{
|
||||
static ref PHYSICAL_NIC_REGEX: Regex = Regex::new(r"^(?:eth\d+|en[^:.]+|ib\d+)$").unwrap();
|
||||
@ -44,6 +44,19 @@ pub fn bond_mode_to_str(mode: LinuxBondMode) -> &'static str {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bond_xmit_hash_policy_from_str(s: &str) -> Result<BondXmitHashPolicy, Error> {
|
||||
BondXmitHashPolicy::deserialize(s.into_deserializer())
|
||||
.map_err(|_: value::Error| format_err!("invalid bond_xmit_hash_policy '{}'", s))
|
||||
}
|
||||
|
||||
pub fn bond_xmit_hash_policy_to_str(policy: &BondXmitHashPolicy) -> &'static str {
|
||||
match policy {
|
||||
BondXmitHashPolicy::layer2 => "layer2",
|
||||
BondXmitHashPolicy::layer2_3 => "layer2+3",
|
||||
BondXmitHashPolicy::layer3_4 => "layer3+4",
|
||||
}
|
||||
}
|
||||
|
||||
impl Interface {
|
||||
|
||||
pub fn new(name: String) -> Self {
|
||||
@ -67,6 +80,8 @@ impl Interface {
|
||||
bridge_vlan_aware: None,
|
||||
slaves: None,
|
||||
bond_mode: None,
|
||||
bond_primary: None,
|
||||
bond_xmit_hash_policy: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -169,6 +184,19 @@ impl Interface {
|
||||
NetworkInterfaceType::Bond => {
|
||||
let mode = self.bond_mode.unwrap_or(LinuxBondMode::balance_rr);
|
||||
writeln!(w, "\tbond-mode {}", bond_mode_to_str(mode))?;
|
||||
if let Some(primary) = &self.bond_primary {
|
||||
if mode == LinuxBondMode::active_backup {
|
||||
writeln!(w, "\tbond-primary {}", primary)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(xmit_policy) = &self.bond_xmit_hash_policy {
|
||||
if mode == LinuxBondMode::ieee802_3ad ||
|
||||
mode == LinuxBondMode::balance_xor
|
||||
{
|
||||
writeln!(w, "\tbond_xmit_hash_policy {}", bond_xmit_hash_policy_to_str(xmit_policy))?;
|
||||
}
|
||||
}
|
||||
|
||||
let slaves = self.slaves.as_ref().unwrap_or(&EMPTY_LIST);
|
||||
if slaves.is_empty() {
|
||||
|
@ -149,7 +149,7 @@ pub fn compute_file_diff(filename: &str, shadow: &str) -> Result<String, Error>
|
||||
.output()
|
||||
.map_err(|err| format_err!("failed to execute diff - {}", err))?;
|
||||
|
||||
let diff = crate::tools::command_output(output, Some(|c| c == 0 || c == 1))
|
||||
let diff = crate::tools::command_output_as_string(output, Some(|c| c == 0 || c == 1))
|
||||
.map_err(|err| format_err!("diff failed: {}", err))?;
|
||||
|
||||
Ok(diff)
|
||||
|
@ -26,6 +26,8 @@ pub enum Token {
|
||||
BridgeVlanAware,
|
||||
BondSlaves,
|
||||
BondMode,
|
||||
BondPrimary,
|
||||
BondXmitHashPolicy,
|
||||
EOF,
|
||||
}
|
||||
|
||||
@ -51,7 +53,10 @@ lazy_static! {
|
||||
map.insert("bond-slaves", Token::BondSlaves);
|
||||
map.insert("bond_slaves", Token::BondSlaves);
|
||||
map.insert("bond-mode", Token::BondMode);
|
||||
map.insert("bond_mode", Token::BondMode);
|
||||
map.insert("bond-primary", Token::BondPrimary);
|
||||
map.insert("bond_primary", Token::BondPrimary);
|
||||
map.insert("bond_xmit_hash_policy", Token::BondXmitHashPolicy);
|
||||
map.insert("bond-xmit-hash-policy", Token::BondXmitHashPolicy);
|
||||
map
|
||||
};
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ use regex::Regex;
|
||||
use super::helper::*;
|
||||
use super::lexer::*;
|
||||
|
||||
use super::{NetworkConfig, NetworkOrderEntry, Interface, NetworkConfigMethod, NetworkInterfaceType, bond_mode_from_str};
|
||||
use super::{NetworkConfig, NetworkOrderEntry, Interface, NetworkConfigMethod, NetworkInterfaceType, bond_mode_from_str, bond_xmit_hash_policy_from_str};
|
||||
|
||||
pub struct NetworkParser<R: BufRead> {
|
||||
input: Peekable<Lexer<R>>,
|
||||
@ -243,6 +243,18 @@ impl <R: BufRead> NetworkParser<R> {
|
||||
interface.bond_mode = Some(bond_mode_from_str(&mode)?);
|
||||
self.eat(Token::Newline)?;
|
||||
}
|
||||
Token::BondPrimary => {
|
||||
self.eat(Token::BondPrimary)?;
|
||||
let primary = self.next_text()?;
|
||||
interface.bond_primary = Some(primary);
|
||||
self.eat(Token::Newline)?;
|
||||
}
|
||||
Token::BondXmitHashPolicy => {
|
||||
self.eat(Token::BondXmitHashPolicy)?;
|
||||
let policy = bond_xmit_hash_policy_from_str(&self.next_text()?)?;
|
||||
interface.bond_xmit_hash_policy = Some(policy);
|
||||
self.eat(Token::Newline)?;
|
||||
}
|
||||
Token::Netmask => bail!("netmask is deprecated and no longer supported"),
|
||||
|
||||
_ => { // parse addon attributes
|
||||
|
@ -39,6 +39,11 @@ pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth t
|
||||
host: {
|
||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||
},
|
||||
port: {
|
||||
optional: true,
|
||||
description: "The (optional) port",
|
||||
type: u16,
|
||||
},
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
@ -58,6 +63,8 @@ pub struct Remote {
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
pub host: String,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if="String::is_empty")]
|
||||
#[serde(with = "proxmox::tools::serde::string_as_base64")]
|
||||
|
@ -1,3 +1,5 @@
|
||||
pub mod task;
|
||||
|
||||
#[macro_use]
|
||||
pub mod buildcfg;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user