Compare commits
297 Commits
Author | SHA1 | Date | |
---|---|---|---|
052aaeb5e9 | |||
5f249127b2 | |||
8277f4ace5 | |||
9b1aa424b9 | |||
fef2b3e04c | |||
7cebe5a1f4 | |||
309ef20d6d | |||
d0833a70f7 | |||
dda246403c | |||
16e0dd65f1 | |||
c5a516918f | |||
cc275e8f93 | |||
d8dc281992 | |||
2c66a590c0 | |||
485841da2c | |||
e8f5810aa1 | |||
3e930f2bdc | |||
dd15c0aa3b | |||
c1b24fbf0b | |||
3f23b17298 | |||
c25c9d8dd1 | |||
84dc6adcc1 | |||
0c4344650d | |||
4f9513996c | |||
736edc7a7e | |||
2b55de407e | |||
a608806f65 | |||
8f0cec2642 | |||
0ed9a2b3ae | |||
58edd33d2b | |||
4fb05fde17 | |||
daca4f7888 | |||
4e6585839b | |||
8c981ae379 | |||
803ab12ad4 | |||
a4a3f7ca5e | |||
ba1c249eec | |||
a2f862eed6 | |||
eaeda365e0 | |||
6359dc891a | |||
07ad6470ca | |||
a6160cdfeb | |||
183125d576 | |||
a3016d6583 | |||
b29d046e89 | |||
380bd7df97 | |||
ea6f404e55 | |||
a35a211d9e | |||
53e14507c1 | |||
6fa39e53e0 | |||
a220a4564a | |||
6f652b1b3a | |||
b1d4edc769 | |||
b4900286ce | |||
c681885227 | |||
ee8b464466 | |||
51c63475e1 | |||
ce55db66d6 | |||
2882c881e9 | |||
c0ac207453 | |||
ee1458b61d | |||
0542cfdf4f | |||
12e3895399 | |||
11b6391c83 | |||
2072aeaee6 | |||
b05672579e | |||
5160c0e986 | |||
1ad9dd08f4 | |||
4d3369cb9a | |||
25829a879b | |||
872062ee9f | |||
67f7ffd0db | |||
0fafac2492 | |||
49ff10921c | |||
479e4932b5 | |||
dd7a7eae8f | |||
8545480a31 | |||
d6c28ddf84 | |||
42fdbe5112 | |||
a67b70c154 | |||
9c5c383bff | |||
7d4e362993 | |||
88acc86129 | |||
1d8ef0dcf7 | |||
522c0da0a0 | |||
16c75c580b | |||
07ce44a633 | |||
6c5024b050 | |||
b2c9c793ad | |||
79166b3935 | |||
e8d1da6a15 | |||
2e686e0a63 | |||
7a314d18f7 | |||
2d08c97ae2 | |||
50ce1f987d | |||
d1a5ffdf78 | |||
99baf7afcc | |||
fed270bf3f | |||
e05b637c73 | |||
2ee6b3fdb9 | |||
f3a96b2cdb | |||
a260c74a12 | |||
52c70f3f5e | |||
30f577248b | |||
00491c0230 | |||
2ebdbac1c4 | |||
b4a85a3fa8 | |||
f486e9e50e | |||
7f5a27d302 | |||
40a36bcc57 | |||
b61d344f01 | |||
65dab0266c | |||
525008f7ad | |||
5bef0f43da | |||
0f6bdbb01f | |||
a4ccb46176 | |||
80bf084876 | |||
db5672e83e | |||
86a5d56c4e | |||
3dd27a3bf8 | |||
3aedb73816 | |||
bab5d18c3d | |||
c2ffc68554 | |||
9651833130 | |||
7b22acd0c2 | |||
5751e49566 | |||
197de83ffa | |||
10effc9849 | |||
139f891087 | |||
99641a6bbb | |||
74f7240b8d | |||
a66d5898a1 | |||
db1e061dcb | |||
96feecd621 | |||
f9dcfa4149 | |||
25cf09065f | |||
fc598cdbe1 | |||
bca294a17c | |||
a02e8b1e95 | |||
26d29e0ec7 | |||
15d74eaaf4 | |||
8df51d4852 | |||
8f3b3cc1f9 | |||
17ec699d79 | |||
b080583ba8 | |||
32d83bb34c | |||
e325dbd4a3 | |||
ecb53af6d9 | |||
ed751dc2ab | |||
ca9dfe5fa4 | |||
720af9f69b | |||
f1490da82a | |||
74c08a5782 | |||
7f402dafb7 | |||
bd88dc4116 | |||
ebe556d0e7 | |||
f9e3b1104e | |||
bc0d03885c | |||
acb428cdec | |||
de1f8f1d36 | |||
b9f2f761bb | |||
30fb602578 | |||
0a00f6e01c | |||
30003baaa4 | |||
5211705ff1 | |||
ec67af9af3 | |||
8247db5b39 | |||
409f44247b | |||
dd335b77f5 | |||
6f6aa95abb | |||
54552dda59 | |||
21690bfaef | |||
1347b1152d | |||
d00e1a216f | |||
9c7fe29dfc | |||
14627d671a | |||
76227a6acd | |||
6830608855 | |||
26d9aebc28 | |||
1ca540a63b | |||
9094186a57 | |||
27a3decbfe | |||
9af76ef075 | |||
00ec8d1685 | |||
fd7c0979b4 | |||
c67bc9c35c | |||
3181f9b625 | |||
2eefd9aee1 | |||
8a6b86b8a7 | |||
96d9478668 | |||
10a9be45bd | |||
5f60a58fd5 | |||
659c3be3d5 | |||
5e4e88e83f | |||
339965d720 | |||
c38b4bb8b2 | |||
42fbe91a34 | |||
1d9a68c2fc | |||
02269f3dba | |||
d5ca9bd5df | |||
02e36d96ad | |||
2c18efd902 | |||
4cb6bd894c | |||
b1564af25a | |||
bf004ecd87 | |||
f1026a5aa9 | |||
3fce3bc36e | |||
f8e7ac686a | |||
c016482c7a | |||
27f2c23049 | |||
df6bb03d0e | |||
e2d940b949 | |||
0c226bc173 | |||
76cf5208cf | |||
2ea7bf1b3d | |||
8b57cd4441 | |||
68da20bf62 | |||
c357260d09 | |||
7e02d08cd0 | |||
ca0e534796 | |||
904e988667 | |||
3f129233be | |||
a9bb491e35 | |||
1ec7f8a0dd | |||
92310d585c | |||
f34d4401f7 | |||
6e695960ca | |||
365f0f720c | |||
a737179eb4 | |||
bb072ba49c | |||
ff329f970b | |||
f7d4e4b506 | |||
404d78c41e | |||
1bfc1efa50 | |||
73ce1d1146 | |||
70e5f2461d | |||
c0ef209aeb | |||
9f9f7eefa3 | |||
bb34b58910 | |||
5972def5ec | |||
aa90ced3bf | |||
ca257c8097 | |||
3fff55b293 | |||
4f66423fcc | |||
d4f020f4c5 | |||
d28ddb8e04 | |||
83b6a7cf71 | |||
e4681f9f71 | |||
b5037fa8ed | |||
9989d2c4e9 | |||
1cf7bbf412 | |||
68ed0c629d | |||
4b40148caa | |||
423e656163 | |||
1ce8a5d0b7 | |||
109d7817cd | |||
5354511fd0 | |||
bd098a7f77 | |||
8d048af2bf | |||
4f3db187cf | |||
9a328319dd | |||
7e3d2e5b41 | |||
9c06f6c292 | |||
9f4e47dd93 | |||
d83175dd69 | |||
68ccdf09a4 | |||
9765092ede | |||
ed3e60ae69 | |||
a83eab3c4d | |||
0815ec7e65 | |||
5c6cdf9815 | |||
9abcae1b0e | |||
b88f9c5b1e | |||
879546aff6 | |||
73b40e9b46 | |||
708db4b3ae | |||
685e13347e | |||
7d817b0358 | |||
579728c641 | |||
cf459b1982 | |||
d16122cd87 | |||
dda7015497 | |||
5b5ca60a07 | |||
aeee4329b0 | |||
5f44899207 | |||
b1127fd0d0 | |||
4299ca727c | |||
3383973532 | |||
555dfe7b8e | |||
e8f0ad19af | |||
a83ee10c49 | |||
9abc1166b0 | |||
99c287861e | |||
6650a242fb | |||
66b4593b04 | |||
0e7ab0567c | |||
10426c1750 |
20
Cargo.toml
20
Cargo.toml
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.1.3"
|
||||
version = "0.2.0"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -14,44 +14,48 @@ name = "proxmox_backup"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.10"
|
||||
base64 = "0.12"
|
||||
bitflags = "1.2.1"
|
||||
bytes = "0.5"
|
||||
chrono = "0.4" # Date and time library for Rust
|
||||
crc32fast = "1"
|
||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||
failure = "0.1"
|
||||
anyhow = "1.0"
|
||||
futures = "0.3"
|
||||
h2 = { version = "0.2", features = ["stream"] }
|
||||
handlebars = "3.0"
|
||||
http = "0.2"
|
||||
hyper = "0.13"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
native-tls = "0.2"
|
||||
nix = "0.16"
|
||||
once_cell = "1.3.1"
|
||||
openssl = "0.10"
|
||||
pam = "0.7"
|
||||
pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0-alpha"
|
||||
proxmox = { version = "0.1.18", features = [ "sortable-macro", "api-macro" ] }
|
||||
pin-utils = "0.1.0"
|
||||
proxmox = { version = "0.1.36", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "5.0.5"
|
||||
rustyline = "6"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
siphasher = "0.3"
|
||||
syslog = "4.0"
|
||||
tokio = { version = "0.2.9", features = [ "blocking", "fs", "io-util", "macros", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
||||
tokio-openssl = "0.4.0"
|
||||
tokio-util = { version = "0.2.0", features = [ "codec" ] }
|
||||
tokio-util = { version = "0.3", features = [ "codec" ] }
|
||||
tower-service = "0.3.0"
|
||||
udev = "0.3"
|
||||
url = "2.1"
|
||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||
walkdir = "2"
|
||||
xdg = "2.2"
|
||||
zstd = { version = "0.4", features = [ "bindgen" ] }
|
||||
nom = "5.1"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
11
Makefile
11
Makefile
@ -37,10 +37,12 @@ CARGO ?= cargo
|
||||
COMPILED_BINS := \
|
||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
||||
|
||||
DEBS= ${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb ${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||
|
||||
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||
|
||||
DEBS=${SERVER_DEB} ${CLIENT_DEB}
|
||||
|
||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||
|
||||
DESTDIR=
|
||||
@ -135,7 +137,8 @@ install: $(COMPILED_BINS)
|
||||
$(MAKE) -C docs install
|
||||
|
||||
.PHONY: upload
|
||||
upload: ${DEBS}
|
||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
||||
# check if working directory is clean
|
||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||
tar cf - ${DEBS} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${SERVER_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${CLIENT_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
||||
|
23
TODO.rst
23
TODO.rst
@ -1,22 +1,39 @@
|
||||
TODO list for Proxmox Backup
|
||||
============================
|
||||
|
||||
* user management api
|
||||
|
||||
* disk management api
|
||||
|
||||
* start writing server GUI
|
||||
|
||||
* improve catalog shell commands
|
||||
|
||||
* improve user documentation
|
||||
|
||||
|
||||
GUI
|
||||
===
|
||||
|
||||
* fix network/dns GUI (network/dns api changed)
|
||||
|
||||
* user/acl/permission management GUI
|
||||
|
||||
* implement GUI to configure remotes
|
||||
|
||||
* implement fancy DatastoreStatus.js dashboard
|
||||
|
||||
* implement PVE GUI to add PBS storage (with convenient copy/paste
|
||||
functionality, like we have for cluster join)
|
||||
|
||||
|
||||
|
||||
Chores:
|
||||
=======
|
||||
|
||||
* move tools/xattr.rs and tools/acl.rs to proxmox/sys/linux/
|
||||
|
||||
* recompute PXAR_ header types from strings: avoid using numbers from casync
|
||||
|
||||
* remove pbs-* systemd timers and services on package purge
|
||||
|
||||
|
||||
Suggestions
|
||||
===========
|
||||
|
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -1,3 +1,9 @@
|
||||
rust-proxmox-backup (0.2.0-1) unstable; urgency=medium
|
||||
|
||||
* see git changelog (too many changes)
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 25 May 2020 19:17:03 +0200
|
||||
|
||||
rust-proxmox-backup (0.1.3-1) unstable; urgency=medium
|
||||
|
||||
* use SectionConfig from proxmox 0.1.18-1
|
||||
|
2
debian/control.in
vendored
2
debian/control.in
vendored
@ -4,7 +4,7 @@ Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 6.0.1),
|
||||
libzstd1 (>= 1.3.8),
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
Description: Proxmox Backup Server daemon with tools and GUI
|
||||
|
29
debian/debcargo.toml
vendored
29
debian/debcargo.toml
vendored
@ -11,8 +11,31 @@ vcs_git = ""
|
||||
vcs_browser = ""
|
||||
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
||||
section = "admin"
|
||||
build_depends = [ "debhelper (>= 12~)", "bash-completion" ]
|
||||
build_depends_excludes = [ "debhelper (>=11)" ]
|
||||
build_depends = [
|
||||
"debhelper (>= 12~)",
|
||||
"bash-completion",
|
||||
"python3-docutils",
|
||||
"python3-pygments",
|
||||
"rsync",
|
||||
"fonts-dejavu-core <!nodoc>",
|
||||
"fonts-lato <!nodoc>",
|
||||
"fonts-open-sans <!nodoc>",
|
||||
"graphviz <!nodoc>",
|
||||
"latexmk <!nodoc>",
|
||||
"python3-sphinx <!nodoc>",
|
||||
"texlive-fonts-extra <!nodoc>",
|
||||
"texlive-fonts-recommended <!nodoc>",
|
||||
"texlive-xetex <!nodoc>",
|
||||
"xindy <!nodoc>",
|
||||
]
|
||||
build_depends_excludes = [
|
||||
"debhelper (>=11)",
|
||||
]
|
||||
|
||||
[packages.lib]
|
||||
depends = [ "libacl1-dev", "libsystemd-dev", "libfuse3-dev", "uuid-dev" ]
|
||||
depends = [
|
||||
"libacl1-dev",
|
||||
"libfuse3-dev",
|
||||
"libsystemd-dev",
|
||||
"uuid-dev",
|
||||
]
|
||||
|
1
debian/proxmox-backup-server.install
vendored
1
debian/proxmox-backup-server.install
vendored
@ -5,6 +5,7 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||
usr/sbin/proxmox-backup-manager
|
||||
usr/share/javascript/proxmox-backup/index.hbs
|
||||
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
||||
usr/share/javascript/proxmox-backup/images/logo-128.png
|
||||
usr/share/javascript/proxmox-backup/images/proxmox_logo.png
|
||||
|
@ -73,10 +73,11 @@ html: ${GENERATED_SYNOPSIS}
|
||||
|
||||
.PHONY: latexpdf
|
||||
latexpdf: ${GENERATED_SYNOPSIS}
|
||||
@echo "Requires python3-sphinx, texlive-xetex, xindy and texlive-fonts-extra"
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
@echo "Running LaTeX files through xelatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
@echo "xelatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
.PHONY: epub3
|
||||
epub3: ${GENERATED_SYNOPSIS}
|
||||
|
@ -3,6 +3,7 @@ Administration Guide
|
||||
|
||||
The administration guide.
|
||||
|
||||
.. todo:: either add a bit more explanation or remove the previous sentence
|
||||
|
||||
Terminology
|
||||
-----------
|
||||
@ -12,7 +13,7 @@ Backup Content
|
||||
|
||||
When doing deduplication, there are different strategies to get
|
||||
optimal results in terms of performance and/or deduplication rates.
|
||||
Depending on the type of data, one can split data into fixed or variable
|
||||
Depending on the type of data, one can split data into *fixed* or *variable*
|
||||
sized chunks.
|
||||
|
||||
Fixed sized chunking needs almost no CPU performance, and is used to
|
||||
@ -21,7 +22,7 @@ backup virtual machine images.
|
||||
Variable sized chunking needs more CPU power, but is essential to get
|
||||
good deduplication rates for file archives.
|
||||
|
||||
Therefore, the backup server supports both strategies.
|
||||
The backup server supports both strategies.
|
||||
|
||||
|
||||
File Archives: ``<name>.pxar``
|
||||
@ -29,9 +30,9 @@ File Archives: ``<name>.pxar``
|
||||
|
||||
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
|
||||
|
||||
A file archive stores a whole directory tree. Content is stored using
|
||||
A file archive stores a full directory tree. Content is stored using
|
||||
the :ref:`pxar-format`, split into variable sized chunks. The format
|
||||
is specially optimized to achieve good deduplication rates.
|
||||
is optimized to achieve good deduplication rates.
|
||||
|
||||
|
||||
Image Archives: ``<name>.img``
|
||||
@ -44,8 +45,8 @@ data. Content is split into fixed sized chunks.
|
||||
Binary Data (BLOBs)
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This type is used to store smaller (< 16MB) binaries like
|
||||
configuration data. Larger files should be stored as image archive.
|
||||
This type is used to store smaller (< 16MB) binary data such as
|
||||
configuration files. Larger files should be stored as image archive.
|
||||
|
||||
.. caution:: Please do not store all files as BLOBs. Instead, use the
|
||||
file archive to store whole directory trees.
|
||||
@ -54,15 +55,15 @@ configuration data. Larger files should be stored as image archive.
|
||||
Catalog File: ``catalog.pcat1``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The catalog file is basically an index for file archive. It contains
|
||||
the list of files, and is used to speedup search operations.
|
||||
The catalog file is an index for file archives. It contains
|
||||
the list of files and is used to speed-up search operations.
|
||||
|
||||
|
||||
The Manifest: ``index.json``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The manifest contains the list of all backup files, including
|
||||
file sizes and checksums. It is used to verify the consistency of a
|
||||
The manifest contains the list of all backup files, their
|
||||
sizes and checksums. It is used to verify the consistency of a
|
||||
backup.
|
||||
|
||||
|
||||
@ -73,17 +74,17 @@ The backup server groups backups by *type*, where *type* is one of:
|
||||
|
||||
``vm``
|
||||
This type is used for :term:`virtual machine`\ s. Typically
|
||||
contains the virtual machine configuration and an image archive
|
||||
contains the virtual machine's configuration and an image archive
|
||||
for each disk.
|
||||
|
||||
``ct``
|
||||
This type is used for :term:`container`\ s. Contains the container
|
||||
This type is used for :term:`container`\ s. Contains the container's
|
||||
configuration and a single file archive for the container content.
|
||||
|
||||
``host``
|
||||
This type is used for physical host, or if you want to run backups
|
||||
manually from inside virtual machines or containers. Such backups
|
||||
may contain file and image archives (no restrictions here).
|
||||
This type is used for backups created from within the backed up machine.
|
||||
Typically this would be a physical host but could also be a virtual machine
|
||||
or container. Such backups may contain file and image archives, there are no restrictions in this regard.
|
||||
|
||||
|
||||
Backup ID
|
||||
@ -102,14 +103,14 @@ The time when the backup was made.
|
||||
Backup Group
|
||||
~~~~~~~~~~~~
|
||||
|
||||
We call the tuple ``<type>/<ID>`` a backup group. Such group
|
||||
may contains one or more backup snapshots.
|
||||
The tuple ``<type>/<ID>`` is called a backup group. Such a group
|
||||
may contain one or more backup snapshots.
|
||||
|
||||
|
||||
Backup Snapshot
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
We call the triplet ``<type>/<ID>/<time>`` a backup snapshot. It
|
||||
The triplet ``<type>/<ID>/<time>`` is called a backup snapshot. It
|
||||
uniquely identifies a specific backup within a datastore.
|
||||
|
||||
.. code-block:: console
|
||||
@ -118,25 +119,25 @@ uniquely identifies a specific backup within a datastore.
|
||||
vm/104/2019-10-09T08:01:06Z
|
||||
host/elsa/2019-11-08T09:48:14Z
|
||||
|
||||
As you can see, the time is formatted as RFC3399_ using Coordinated
|
||||
As you can see, the time format is RFC3399_ with Coordinated
|
||||
Universal Time (UTC_, identified by the trailing *Z*).
|
||||
|
||||
|
||||
:term:`DataStore`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
A datastore is a place to store backups. The current implementation
|
||||
A datastore is a place where backups are stored. The current implementation
|
||||
uses a directory inside a standard unix file system (``ext4``, ``xfs``
|
||||
or ``zfs``) to store backup data.
|
||||
or ``zfs``) to store the backup data.
|
||||
|
||||
Datastores are identified by a simple *ID*. You can configure that
|
||||
Datastores are identified by a simple *ID*. You can configure it
|
||||
when setting up the backup server.
|
||||
|
||||
|
||||
Backup Server Management
|
||||
------------------------
|
||||
|
||||
The command line tool to configure and manage the server is called
|
||||
The command line tool to configure and manage the backup server is called
|
||||
:command:`proxmox-backup-manager`.
|
||||
|
||||
|
||||
@ -144,7 +145,9 @@ Datastore Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A :term:`datastore` is a place to store backups. You can configure
|
||||
several datastores, but you need at least one of them. The datastore is identified by a simple `name` and point to a directory.
|
||||
multiple datastores. At least one datastore needs to be
|
||||
configured. The datastore is identified by a simple `name` and points
|
||||
to a directory.
|
||||
|
||||
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||
|
||||
@ -152,20 +155,24 @@ The following command creates a new datastore called ``store1`` on :file:`/backu
|
||||
|
||||
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
|
||||
|
||||
To list existing datastores use:
|
||||
To list existing datastores run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore list
|
||||
store1 /backup/disk1/store1
|
||||
┌────────┬──────────────────────┬─────────────────────────────┐
|
||||
│ name │ path │ comment │
|
||||
╞════════╪══════════════════════╪═════════════════════════════╡
|
||||
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
|
||||
└────────┴──────────────────────┴─────────────────────────────┘
|
||||
|
||||
Finally, it is also possible to remove the datastore configuration:
|
||||
Finally, it is possible to remove the datastore configuration:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore remove store1
|
||||
|
||||
.. note:: Above command removes the datastore configuration. It does
|
||||
.. note:: The above command removes only the datastore configuration. It does
|
||||
not delete any data from the underlying directory.
|
||||
|
||||
|
||||
@ -175,6 +182,126 @@ File Layout
|
||||
.. todo:: Add datastore file layout example
|
||||
|
||||
|
||||
User Management
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox Backup support several authentication realms, and you need to
|
||||
choose the realm when you add a new user. Possible realms are:
|
||||
|
||||
:pam: Linux PAM standard authentication. Use this if you want to
|
||||
authenticate as Linux system user (Users needs to exist on the
|
||||
system).
|
||||
|
||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||
``/etc/proxmox-backup/shadow.json``.
|
||||
|
||||
After installation, there is a single user ``root@pam``, which
|
||||
corresponds to the Unix superuser. You can use the
|
||||
``proxmox-backup-manager`` command line tool to list or manipulate
|
||||
users:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user list
|
||||
┌─────────────┬────────┬────────┬───────────┬──────────┬────────────────┬────────────────────┐
|
||||
│ userid │ enable │ expire │ firstname │ lastname │ email │ comment │
|
||||
╞═════════════╪════════╪════════╪═══════════╪══════════╪════════════════╪════════════════════╡
|
||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||
└─────────────┴────────┴────────┴───────────┴──────────┴────────────────┴────────────────────┘
|
||||
|
||||
The superuser has full administration rights on everything, so you
|
||||
normally want to add other users with less privileges:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user create john@pbs --email john@example.com
|
||||
|
||||
The create command lets you specify many option like ``--email`` or
|
||||
``--password``, but you can update or change any of them using the
|
||||
update command later:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
||||
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
||||
|
||||
|
||||
.. todo:: Mention how to set password without passing plaintext password as cli argument.
|
||||
|
||||
|
||||
The resulting use list looks like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user list
|
||||
┌──────────┬────────┬────────┬───────────┬──────────┬──────────────────┬──────────────────┐
|
||||
│ userid │ enable │ expire │ firstname │ lastname │ email │ comment │
|
||||
╞══════════╪════════╪════════╪═══════════╪══════════╪══════════════════╪══════════════════╡
|
||||
│ john@pbs │ 1 │ │ John │ Smith │ john@example.com │ An example user. │
|
||||
├──────────┼────────┼────────┼───────────┼──────────┼──────────────────┼──────────────────┤
|
||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||
|
||||
Newly created users do not have an permissions. Please read the next
|
||||
section to learn how to set access permissions.
|
||||
|
||||
If you want to disable an user account, you can do that by setting ``--enable`` to ``0``
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user update john@pbs --enable 0
|
||||
|
||||
Or completely remove the users with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user remove john@pbs
|
||||
|
||||
|
||||
Access Control
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Users do not have any permission by default. Instead you need to
|
||||
specify what is allowed and what not. You can do this by assigning
|
||||
roles to users on specific objects like datastores or remotes. The
|
||||
following roles exist:
|
||||
|
||||
**Admin**
|
||||
The Administrator can do anything.
|
||||
|
||||
**Audit**
|
||||
An Auditor can view things, but is not allowed to change settings.
|
||||
|
||||
**NoAccess**
|
||||
Disable Access - nothing is allowed.
|
||||
|
||||
**DatastoreAdmin**
|
||||
Can do anything on datastores.
|
||||
|
||||
**DatastoreAudit**
|
||||
Can view datastore settings and list content. But
|
||||
is not allowed to read the actual data.
|
||||
|
||||
**DataStoreReader**
|
||||
Can Inspect datastore content and can do restores.
|
||||
|
||||
**DataStoreBackup**
|
||||
Can backup and restore owned backups.
|
||||
|
||||
**DatastorePowerUser**
|
||||
Can backup, restore, and prune owned backups.
|
||||
|
||||
**RemoteAdmin**
|
||||
Can do anything on remotes.
|
||||
|
||||
**RemoteAudit**
|
||||
Can view remote settings.
|
||||
|
||||
**RemoteSyncOperator**
|
||||
Is allowed to read data from a remote.
|
||||
|
||||
|
||||
|
||||
Backup Client usage
|
||||
-------------------
|
||||
|
||||
@ -184,16 +311,16 @@ The command line client is called :command:`proxmox-backup-client`.
|
||||
Respository Locations
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The client uses a special repository notation to specify a datastore
|
||||
The client uses the following notation to specify a datastore repository
|
||||
on the backup server.
|
||||
|
||||
[[username@]server:]datastore
|
||||
|
||||
If you do not specify a ``username`` the default is ``root@pam``. The
|
||||
default for server is to use the local host (``localhost``).
|
||||
The default value for ``username`` ist ``root``. If no server is specified, the
|
||||
default is the local host (``localhost``).
|
||||
|
||||
You can pass the repository by setting the ``--repository`` command
|
||||
line options, or by setting the ``PBS_REPOSITORY`` environment
|
||||
You can pass the repository with the ``--repository`` command
|
||||
line option, or by setting the ``PBS_REPOSITORY`` environment
|
||||
variable.
|
||||
|
||||
|
||||
@ -219,8 +346,8 @@ Environment Variables
|
||||
Output Format
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Most commands support the ``--output-format`` parameter, which can be
|
||||
set to the following values:
|
||||
Most commands support the ``--output-format`` parameter. It accepts
|
||||
the following values:
|
||||
|
||||
:``text``: Text format (default). Structured data is rendered as a table.
|
||||
|
||||
@ -240,9 +367,9 @@ Please use the following environment variables to modify output behavior:
|
||||
``PROXMOX_OUTPUT_NO_HEADER``
|
||||
If set (to any value), do not render table headers.
|
||||
|
||||
.. note:: The ``text`` format is designed to be human readable, but
|
||||
.. note:: The ``text`` format is designed to be human readable, and
|
||||
not meant to be parsed by automation tools. Please use the ``json``
|
||||
format for such purpose because it is machine readable.
|
||||
format if you need to process the output.
|
||||
|
||||
|
||||
.. _creating-backups:
|
||||
@ -250,15 +377,15 @@ Please use the following environment variables to modify output behavior:
|
||||
Creating Backups
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
This section explains how to create backup on physical host, or from
|
||||
inside virtual machines or containers. Such backups may contain file
|
||||
and image archives (no restrictions here).
|
||||
This section explains how to create a backup from within the machine. This can
|
||||
be a physical host, a virtual machine, or a container. Such backups may contain file
|
||||
and image archives. There are no restrictions in this case.
|
||||
|
||||
.. note:: If you want to backup virtual machines or containers see :ref:`pve-integration`.
|
||||
.. note:: If you want to backup virtual machines or containers on Proxmov VE, see :ref:`pve-integration`.
|
||||
|
||||
The prerequisite is that you have already set up (or can access) a
|
||||
backup server. It is assumed that you know the repository name and
|
||||
credentials. In the following examples we simply use ``backup-server:store1``.
|
||||
For the following example you need to have a backup server set up, working
|
||||
credentials and need to know the repository name.
|
||||
In the following examples we use ``backup-server:store1``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -275,15 +402,15 @@ credentials. In the following examples we simply use ``backup-server:store1``.
|
||||
This will prompt you for a password and then uploads a file archive named
|
||||
``root.pxar`` containing all the files in the ``/`` directory.
|
||||
|
||||
.. Caution:: Please note that proxmox-backup-client does not
|
||||
.. Caution:: Please note that the proxmox-backup-client does not
|
||||
automatically include mount points. Instead, you will see a short
|
||||
``skip mount point`` notice for each of them. The idea is that you
|
||||
create a separate file archive for each mounted disk. You can also
|
||||
``skip mount point`` notice for each of them. The idea is to
|
||||
create a separate file archive for each mounted disk. You can
|
||||
explicitly include them using the ``--include-dev`` option
|
||||
(i.e. ``--include-dev /boot/efi``). You can use this option
|
||||
multiple times, once for each mount point you want to include.
|
||||
multiple times for each mount point that should be included.
|
||||
|
||||
The ``--repository`` option is sometimes quite long and is used by all
|
||||
The ``--repository`` option can get quite long and is used by all
|
||||
commands. You can avoid having to enter this value by setting the
|
||||
environment variable ``PBS_REPOSITORY``.
|
||||
|
||||
@ -291,26 +418,26 @@ environment variable ``PBS_REPOSITORY``.
|
||||
|
||||
# export PBS_REPOSTORY=backup-server:store1
|
||||
|
||||
You can then execute all commands without specifying the ``--repository``
|
||||
After this you can execute all commands without specifying the ``--repository``
|
||||
option.
|
||||
|
||||
One single backup is allowed to contain more than one archive. For example, assume you want to backup two disks mounted at ``/mmt/disk1`` and ``/mnt/disk2``:
|
||||
One single backup is allowed to contain more than one archive. For example, if
|
||||
you want to backup two disks mounted at ``/mmt/disk1`` and ``/mnt/disk2``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup disk1.pxar:/mnt/disk1 disk2.pxar:/mnt/disk2
|
||||
|
||||
This create a backup of both disks.
|
||||
This creates a backup of both disks.
|
||||
|
||||
The backup command takes a list of backup specifications, which
|
||||
include archive name on the server, the type of the archive, and the
|
||||
archive source at the client. The format is quite simple to understand:
|
||||
include the archive name on the server, the type of the archive, and the
|
||||
archive source at the client. The format is:
|
||||
|
||||
<archive-name>.<type>:<source-path>
|
||||
|
||||
Common types are ``.pxar`` for file archives, and ``.img`` for block
|
||||
device images. Thus it is quite easy to create a backup for a block
|
||||
device:
|
||||
device images. To create a backup of a block device run the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -320,42 +447,43 @@ Excluding files/folders from a backup
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Sometimes it is desired to exclude certain files or folders from a backup archive.
|
||||
Using the proxmox backup client this is possible via simple text based
|
||||
``.pxarexclude`` files placed in the filesystem hierarchy.
|
||||
To tell the Proxmox backup client when and how to ignore files and directories,
|
||||
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
|
||||
Whenever the backup client encounters such a file in a directory, it interprets
|
||||
each line as glob match patterns for files and directories that are to be excluded
|
||||
from the backup.
|
||||
|
||||
Whenever such a file is encountered in a directory, the backup client reads its
|
||||
content and lines are interpreted as glob match patterns for files/directories
|
||||
to exclude from the archive.
|
||||
The file must contain a single glob pattern on each line. Empty lines are ignored.
|
||||
The same is true for lines starting with ``#``, indicating a line containing comments.
|
||||
Lines starting with ``!`` correspond to glob match patterns for explicit inclusion
|
||||
of files previously excluded by a match. This allows for example to exclude
|
||||
all entries in a directory except for a few single files.
|
||||
Lines ending in ``/`` match directory entries only.
|
||||
The folder containing the ``.pxarexclude`` file is considered to be the root of
|
||||
the given patterns. It is only possible to match files in this or below this folder.
|
||||
The file must contain a single glob pattern per line. Empty lines are ignored.
|
||||
The same is true for lines starting with ``#``, which indicates a comment.
|
||||
A ``!`` at the beginning of a line reverses the glob match pattern from an exclusion
|
||||
to an explicit inclusion. This makes it possible to exclude all entries in a
|
||||
directory except for a few single files/subdirectories.
|
||||
Lines ending in ``/`` match only on directories.
|
||||
The directory containing the ``.pxarexclude`` file is considered to be the root of
|
||||
the given patterns. It is only possible to match files in this directory and its subdirectories.
|
||||
|
||||
``\`` is used to escape glob characters. ``?`` matches any single character,
|
||||
``*`` matches any character including the empty string.
|
||||
``**`` is used to match also subdirectories and can be used to exclude for example
|
||||
all files ending in ``.tmp`` within the directory or a subdirectory by the
|
||||
``\`` is used to escape special glob characters.
|
||||
``?`` matches any single character.
|
||||
``*`` matches any character, including an empty string.
|
||||
``**`` is used to match subdirectories. It can be used to, for example, exclude
|
||||
all files ending in ``.tmp`` within the directory or subdirectories with the
|
||||
following pattern ``**/*.tmp``.
|
||||
``[...]`` matches a single character from any of the provided characters within
|
||||
the brackets. ``[!...]`` does the complementary and matches any singe character
|
||||
not contained within the brackets. It is also possible to specify ranges by two
|
||||
characters separated by ``-``. For example ``[a-z]`` matches any lowercase
|
||||
alphabetic character, ``[0-9]`` matches any one single digit.
|
||||
not contained within the brackets. It is also possible to specify ranges with two
|
||||
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
||||
alphabetic character and ``[0-9]`` matches any one single digit.
|
||||
|
||||
The order of the glob match patterns defines if the file is finally included or
|
||||
The order of the glob match patterns defines if a file is included or
|
||||
excluded, later entries win over previous ones.
|
||||
This is also true for match patterns encountered deeper down the directory tree,
|
||||
which may then override a previous exclusion.
|
||||
Note however that folders marked for exclusion are not read by the client,
|
||||
so ``.pxarexclude`` files contained within have no effect.
|
||||
``.pxarexclude`` files are treated as regular files and are also included in the
|
||||
which can override a previous exclusion.
|
||||
Be aware that excluded directories will **not** be read by the backup client.
|
||||
A ``.pxarexclude`` file in a subdirectory will have no effect.
|
||||
``.pxarexclude`` files are treated as regular files and will be included in the
|
||||
backup archive.
|
||||
|
||||
For example, consider the following folder structure:
|
||||
For example, consider the following directory structure:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -369,7 +497,7 @@ For example, consider the following folder structure:
|
||||
folder/subfolder1:
|
||||
. .. file0 file1 file2 file3
|
||||
|
||||
The ``.pxarexclude`` files containing the following:
|
||||
The different ``.pxarexclude`` files contain the following:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -386,7 +514,7 @@ The ``.pxarexclude`` files containing the following:
|
||||
This would exclude ``file1`` and ``file3`` in ``subfolder0`` and all of
|
||||
``subfolder1`` except ``file2``.
|
||||
|
||||
Restoring this archive form backup results in:
|
||||
Restoring this backup will result in:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -403,8 +531,8 @@ Restoring this archive form backup results in:
|
||||
Encryption
|
||||
^^^^^^^^^^
|
||||
|
||||
Proxmox backup support client side encryption using AES-256 in GCM_
|
||||
mode. You first need to create an encryption key in order to use that:
|
||||
Proxmox backup supports client side encryption with AES-256 in GCM_
|
||||
mode. First you need to create an encryption key:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -427,7 +555,7 @@ extra protection, you can also create it without a password:
|
||||
...
|
||||
|
||||
|
||||
You can avoid having to enter the passwords by setting the environment
|
||||
You can avoid entering the passwords by setting the environment
|
||||
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
||||
|
||||
.. todo:: Explain master-key
|
||||
@ -437,22 +565,26 @@ Restoring Data
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The regular creation of backups is a necessary step to avoid data
|
||||
loss. More important, however, is the restoration. Be sure to perform
|
||||
periodic recovery tests to ensure that you can access your data in
|
||||
loss. More important, however, is the restoration. It is good practice to perform
|
||||
periodic recovery tests to ensure that you can access the data in
|
||||
case of problems.
|
||||
|
||||
First, you need to find the snapshot you want to restore. The snapshot
|
||||
command gives you a list of all snapshots on the server:
|
||||
First, you need to find the snapshot which you want to restore. The snapshot
|
||||
command gives a list of all snapshots on the server:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client snapshots
|
||||
...
|
||||
host/elsa/2019-12-03T09:30:15Z | 51788646825 | root.pxar catalog.pcat1 index.json
|
||||
host/elsa/2019-12-03T09:35:01Z | 51790622048 | root.pxar catalog.pcat1 index.json
|
||||
┌────────────────────────────────┬─────────────┬────────────────────────────────────┐
|
||||
│ snapshot │ size │ files │
|
||||
╞════════════════════════════════╪═════════════╪════════════════════════════════════╡
|
||||
│ host/elsa/2019-12-03T09:30:15Z │ 51788646825 │ root.pxar catalog.pcat1 index.json │
|
||||
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
|
||||
│ host/elsa/2019-12-03T09:35:01Z │ 51790622048 │ root.pxar catalog.pcat1 index.json │
|
||||
├────────────────────────────────┼─────────────┼────────────────────────────────────┤
|
||||
...
|
||||
|
||||
You can also inspect the catalog to find specific files.
|
||||
You can inspect the catalog to find specific files.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -470,9 +602,8 @@ backup.
|
||||
|
||||
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
|
||||
|
||||
You can instead simply download the contents of any archive using '-'
|
||||
instead of ``/target/path``. This dumps the content to standard
|
||||
output:
|
||||
To get the contents of any archive you can restore the ``ìndex.json`` file in the
|
||||
repository and restore it to '-'. This will dump the content to the standard output.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -494,20 +625,18 @@ to use the interactive recovery shell.
|
||||
...
|
||||
|
||||
The interactive recovery shell is a minimalistic command line interface that
|
||||
utilizes the metadata stored in the catalog for you to quickly list, navigate and
|
||||
search files contained within a file archive.
|
||||
You can select individual files as well as select files matched by a glob pattern
|
||||
for restore.
|
||||
utilizes the metadata stored in the catalog to quickly list, navigate and
|
||||
search files in a file archive.
|
||||
To restore files, you can select them individually or match them with a glob
|
||||
pattern.
|
||||
|
||||
The use of the catalog for navigation reduces the overhead otherwise caused by
|
||||
network traffic and decryption, as instead of downloading and decrypting
|
||||
individual encrypted chunks from the chunk-store to access the metadata, we only
|
||||
need to download and decrypt the catalog.
|
||||
Using the catalog for navigation reduces the overhead considerably because only
|
||||
the catalog needs to be downloaded and, optionally, decrypted.
|
||||
The actual chunks are only accessed if the metadata in the catalog is not enough
|
||||
or for the actual restore.
|
||||
|
||||
Similar to common UNIX shells ``cd`` and ``ls`` are the commands used to change
|
||||
working directory and list directory contents of the archive.
|
||||
working directory and list directory contents in the archive.
|
||||
``pwd`` shows the full path of the current working directory with respect to the
|
||||
archive root.
|
||||
|
||||
@ -567,7 +696,7 @@ This allows you to access the full content of the archive in a seamless manner.
|
||||
load on your host, depending on the operations you perform on the mounted
|
||||
filesystem.
|
||||
|
||||
To unmount the filesystem simply use the ``umount`` command on the mountpoint:
|
||||
To unmount the filesystem use the ``umount`` command on the mountpoint:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -579,7 +708,7 @@ Login and Logout
|
||||
The client tool prompts you to enter the logon password as soon as you
|
||||
want to access the backup server. The server checks your credentials
|
||||
and responds with a ticket that is valid for two hours. The client
|
||||
tool automatically stores that ticket and use it for further requests
|
||||
tool automatically stores that ticket and uses it for further requests
|
||||
to this server.
|
||||
|
||||
You can also manually trigger this login/logout using the login and
|
||||
@ -590,7 +719,7 @@ logout commands:
|
||||
# proxmox-backup-client login
|
||||
Password: **********
|
||||
|
||||
To remove the ticket, simply issue a logout:
|
||||
To remove the ticket, issue a logout:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -608,76 +737,78 @@ command:
|
||||
# proxmox-backup-client forget <snapshot>
|
||||
|
||||
|
||||
.. caution:: This command removes all the archives in this backup
|
||||
snapshot so that they are inaccessible and unrecoverable.
|
||||
.. caution:: This command removes all archives in this backup
|
||||
snapshot. They will be inaccessible and unrecoverable.
|
||||
|
||||
|
||||
Such manual removal is sometimes required, but normally the prune
|
||||
The manual removal is sometimes required, but normally the prune
|
||||
command is used to systematically delete older backups. Prune lets
|
||||
you specify which backup snapshots you want to keep. There are the
|
||||
following retention options:
|
||||
you specify which backup snapshots you want to keep. The
|
||||
following retention options are available:
|
||||
|
||||
``--keep-last <N>``
|
||||
Keep the last ``<N>`` backup snapshots.
|
||||
|
||||
``--keep-hourly <N>``
|
||||
Keep backups for the last ``<N>`` different hours. If there is more than one
|
||||
backup for a single hour, only the latest one is kept.
|
||||
Keep backups for the last ``<N>`` hours. If there is more than one
|
||||
backup for a single hour, only the latest is kept.
|
||||
|
||||
``--keep-daily <N>``
|
||||
Keep backups for the last ``<N>`` different days. If there is more than one
|
||||
backup for a single day, only the latest one is kept.
|
||||
Keep backups for the last ``<N>`` days. If there is more than one
|
||||
backup for a single day, only the latest is kept.
|
||||
|
||||
``--keep-weekly <N>``
|
||||
Keep backups for the last ``<N>`` different weeks. If there is more than one
|
||||
backup for a single week, only the latest one is kept.
|
||||
Keep backups for the last ``<N>`` weeks. If there is more than one
|
||||
backup for a single week, only the latest is kept.
|
||||
|
||||
.. note:: The weeks start on Monday and end on Sunday. The software
|
||||
uses the `ISO week date`_ system and correctly handles weeks at
|
||||
the end of the year.
|
||||
.. note:: Weeks start on Monday and end on Sunday. The software
|
||||
uses the `ISO week date`_ system and handles weeks at
|
||||
the end of the year correctly.
|
||||
|
||||
``--keep-monthly <N>``
|
||||
Keep backups for the last ``<N>`` different months. If there is more than one
|
||||
backup for a single month, only the latest one is kept.
|
||||
Keep backups for the last ``<N>`` months. If there is more than one
|
||||
backup for a single month, only the latest is kept.
|
||||
|
||||
``--keep-yearly <N>``
|
||||
Keep backups for the last ``<N>`` different years. If there is more than one
|
||||
backup for a single year, only the latest one is kept.
|
||||
Keep backups for the last ``<N>`` years. If there is more than one
|
||||
backup for a single year, only the latest is kept.
|
||||
|
||||
The retention options are processed in the order given above. Each option
|
||||
only covers backups within its time period. The next option does not take care
|
||||
of already covered backups. It will only consider older backups.
|
||||
|
||||
Those retention options are processed in the order given above. Each
|
||||
option covers a specific period of time. We say that backups within
|
||||
this period are covered by this option. The next option does not take
|
||||
care of already covered backups and only considers older backups.
|
||||
|
||||
The prune command also looks for unfinished and incomplete backups and
|
||||
removes them unless they are newer than the last successful backup. In
|
||||
this case, the last failed backup is retained.
|
||||
Unfinished and incomplete backups will be removed by the prune command unless
|
||||
they are newer than the last successful backup. In this case, the last failed
|
||||
backup is retained.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client prune <group> --keep-daily 7 --keep-weekly 4 --keep-monthly 3
|
||||
|
||||
|
||||
You can use the ``--dry-run`` option to test your settings. This just
|
||||
shows the list of existing snapshots and what action prune would take
|
||||
on that.
|
||||
You can use the ``--dry-run`` option to test your settings. This only
|
||||
shows the list of existing snapshots and which action prune would take.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client prune host/elsa --dry-run --keep-daily 1 --keep-weekly 3
|
||||
retention options: --keep-daily 1 --keep-weekly 3
|
||||
Testing prune on store "store2" group "host/elsa"
|
||||
host/elsa/2019-12-04T13:20:37Z keep
|
||||
host/elsa/2019-12-03T09:35:01Z remove
|
||||
host/elsa/2019-11-22T11:54:47Z keep
|
||||
host/elsa/2019-11-21T12:36:25Z remove
|
||||
host/elsa/2019-11-10T10:42:20Z keep
|
||||
|
||||
┌────────────────────────────────┬──────┐
|
||||
│ snapshot │ keep │
|
||||
╞════════════════════════════════╪══════╡
|
||||
│ host/elsa/2019-12-04T13:20:37Z │ 1 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-12-03T09:35:01Z │ 0 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-11-22T11:54:47Z │ 1 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-11-21T12:36:25Z │ 0 │
|
||||
├────────────────────────────────┼──────┤
|
||||
│ host/elsa/2019-11-10T10:42:20Z │ 1 │
|
||||
└────────────────────────────────┴──────┘
|
||||
|
||||
.. note:: Neither the ``prune`` command nor the ``forget`` command free space
|
||||
in the chunk-store. The chunk-store still contains the data blocks
|
||||
unless you are performing :ref:`garbage-collection`.
|
||||
in the chunk-store. The chunk-store still contains the data blocks. To free
|
||||
space you need to perform :ref:`garbage-collection`.
|
||||
|
||||
|
||||
.. _garbage-collection:
|
||||
@ -687,8 +818,7 @@ Garbage Collection
|
||||
|
||||
The ``prune`` command removes only the backup index files, not the data
|
||||
from the data store. This task is left to the garbage collection
|
||||
command. It is therefore recommended to carry out garbage collection
|
||||
regularly.
|
||||
command. It is recommended to carry out garbage collection on a regular basis.
|
||||
|
||||
The garbage collection works in two phases. In the first phase, all
|
||||
data blocks that are still in use are marked. In the second phase,
|
||||
@ -727,6 +857,41 @@ unused data blocks are removed.
|
||||
`Proxmox VE`_ integration
|
||||
-------------------------
|
||||
|
||||
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
||||
node. The following example uses ``store2`` as storage name, and
|
||||
assumes the server address is ``localhost``, and you want to connect
|
||||
as ``user1@pbs``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pvesm add pbs store2 --server localhost --datastore store2
|
||||
# pvesm set store2 --username user1@pbs --password <secret>
|
||||
|
||||
If your backup server uses a self signed certificate, you need to add
|
||||
the certificate fingerprint to the configuration. You can get the
|
||||
fingerprint by running the following command on the backup server:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager cert info |grep Fingerprint
|
||||
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
Please add that fingerprint to your configuration to establish a trust
|
||||
relationship:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pvesm set store2 --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
After that you should be able to see storage status with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pvesm status --storage store2
|
||||
Name Type Status Total Used Available %
|
||||
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
||||
|
||||
|
||||
|
||||
.. include:: command-line-tools.rst
|
||||
|
||||
|
33
docs/conf.py
33
docs/conf.py
@ -21,6 +21,21 @@
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- Implement custom formatter for code-blocks ---------------------------
|
||||
#
|
||||
# * use smaller font
|
||||
# * avoid space between lines to nicely format utf8 tables
|
||||
|
||||
from sphinx.highlighting import PygmentsBridge
|
||||
from pygments.formatters.latex import LatexFormatter
|
||||
|
||||
class CustomLatexFormatter(LatexFormatter):
|
||||
def __init__(self, **options):
|
||||
super(CustomLatexFormatter, self).__init__(**options)
|
||||
self.verboptions = r"formatcom=\footnotesize\relax\let\strut\empty"
|
||||
|
||||
PygmentsBridge.latex_formatter = CustomLatexFormatter
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
@ -53,7 +68,7 @@ rst_epilog = epilog_file.read()
|
||||
|
||||
# General information about the project.
|
||||
project = 'Proxmox Backup'
|
||||
copyright = '2019, Proxmox Support Team'
|
||||
copyright = '2019-2020, Proxmox Support Team'
|
||||
author = 'Proxmox Support Team'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
@ -61,9 +76,9 @@ author = 'Proxmox Support Team'
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '1.0'
|
||||
version = '0.2'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '1.0-1'
|
||||
release = '0.2-1'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@ -251,14 +266,24 @@ htmlhelp_basename = 'ProxmoxBackupdoc'
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_engine = 'xelatex'
|
||||
|
||||
latex_elements = {
|
||||
'fontenc': '\\usepackage{fontspec}',
|
||||
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
'papersize': 'a4paper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
'pointsize': '12pt',
|
||||
'pointsize': '10pt',
|
||||
|
||||
'fontpkg': r'''
|
||||
\setmainfont{Open Sans}
|
||||
\setsansfont{Lato}
|
||||
\setmonofont{DejaVu Sans Mono}
|
||||
''',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
|
@ -5,24 +5,23 @@ Glossary
|
||||
|
||||
`Virtual machine`_
|
||||
|
||||
A Virtual machine is a program that can execute an entire
|
||||
operatin system inside an emulated hardware environment.
|
||||
A virtual machine is a program that can execute an entire
|
||||
operating system inside an emulated hardware environment.
|
||||
|
||||
`Container`_
|
||||
|
||||
A Container is an isolated user space. Programs runs directly on
|
||||
the hosts kernel, but with limited access to the host resources.
|
||||
A container is an isolated user space. Programs run directly on
|
||||
the host's kernel, but with limited access to the host resources.
|
||||
|
||||
Datastore
|
||||
|
||||
A place to store backups. The current implemenation is
|
||||
file-system based, so this refers to a directory containing the
|
||||
backup data.
|
||||
A place to store backups. A directory which contains the backup data.
|
||||
The current implemenation is file-system based.
|
||||
|
||||
`Rust`_
|
||||
|
||||
Rust is a new, fast and memory-efficient system programming
|
||||
language, with no runtime or garbage collector. Rust’s rich type
|
||||
language. It has no runtime or garbage collector. Rust’s rich type
|
||||
system and ownership model guarantee memory-safety and
|
||||
thread-safety. I can eliminate many classes of bugs
|
||||
at compile-time.
|
||||
@ -31,11 +30,9 @@ Glossary
|
||||
|
||||
Is a tool that makes it easy to create intelligent and
|
||||
beautiful documentation. It was originally created for the
|
||||
Python documentation, and it has excellent facilities for the
|
||||
documentation of the Python programming language. It has excellent facilities for the
|
||||
documentation of software projects in a range of languages.
|
||||
|
||||
|
||||
|
||||
`reStructuredText`_
|
||||
|
||||
Is an easy-to-read, what-you-see-is-what-you-get plaintext
|
||||
@ -44,8 +41,8 @@ Glossary
|
||||
`FUSE`
|
||||
|
||||
Filesystem in Userspace (`FUSE <https://en.wikipedia.org/wiki/Filesystem_in_Userspace>`_)
|
||||
defines an interface which allows to implement a filesystem in
|
||||
defines an interface which makes it possible to implement a filesystem in
|
||||
userspace as opposed to implementing it in the kernel. The fuse
|
||||
kernel driver handles filesystem requests and sends them to an
|
||||
userspace application for reply.
|
||||
kernel driver handles filesystem requests and sends them to a
|
||||
userspace application.
|
||||
|
||||
|
@ -1,55 +1,50 @@
|
||||
Installation
|
||||
============
|
||||
|
||||
`Proxmox Backup`_ is split into a server part and a client part. The
|
||||
server part comes with it's own graphical installer, but we also
|
||||
ship Debian_ package repositories, so you can easily install those
|
||||
packages on any Debian_ based system.
|
||||
`Proxmox Backup`_ is split into a server and client part. The server part
|
||||
can either be installed with a graphical installer or on top of
|
||||
Debian_ from the provided package repository.
|
||||
|
||||
.. include:: package-repositories.rst
|
||||
|
||||
|
||||
Server installation
|
||||
-------------------
|
||||
|
||||
The backup server stores the actual backup data, but also provides a
|
||||
web based GUI for various management tasks, for example disk
|
||||
management.
|
||||
The backup server stores the actual backed up data and provides a web based GUI
|
||||
for various management tasks such as disk management.
|
||||
|
||||
.. note:: You always need a backup server. It is not possible to use
|
||||
`Proxmox Backup`_ without the server part.
|
||||
|
||||
The server is based on Debian, therefore the disk image (ISO file) provided
|
||||
by us includes a complete Debian system ("buster" for version 1.x) as
|
||||
well as all necessary backup packages.
|
||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
|
||||
|
||||
Using the installer will guide you through the setup, allowing
|
||||
The installer will guide you through the setup process and allows
|
||||
you to partition the local disk(s), apply basic system configurations
|
||||
(e.g. timezone, language, network) and install all required packages.
|
||||
Using the provided ISO will get you started in just a few minutes,
|
||||
that's why we recommend this method for new and existing users.
|
||||
(e.g. timezone, language, network), and installs all required packages.
|
||||
The provided ISO will get you started in just a few minutes, and is the
|
||||
recommended method for new and existing users.
|
||||
|
||||
Alternatively, `Proxmox Backup`_ server can be installed on top of an
|
||||
existing Debian system.
|
||||
|
||||
Using the `Proxmox Backup`_ Installer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Install `Proxmox Backup`_ with the Installer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can download the ISO from |DOWNLOADS|.
|
||||
Download the ISO from |DOWNLOADS|.
|
||||
It includes the following:
|
||||
|
||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
||||
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
|
||||
system.
|
||||
|
||||
* Complete operating system (Debian Linux, 64-bit)
|
||||
|
||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
||||
disk(s) with ext4, ext3, xfs or ZFS and installs the operating
|
||||
system.
|
||||
|
||||
* Our Linux kernel with ZFS support.
|
||||
|
||||
* Complete toolset for administering backups and all necessary
|
||||
resources
|
||||
* Complete tool-set to administer backups and all necessary resources
|
||||
|
||||
* Web based management interface for using the toolset
|
||||
* Web based GUI management interface
|
||||
|
||||
.. note:: During the installation process, the complete server
|
||||
is used by default and all existing data is removed.
|
||||
@ -58,8 +53,8 @@ It includes the following:
|
||||
Install `Proxmox Backup`_ server on Debian
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox ships as a set of Debian packages, so you can install it on
|
||||
top of a standard Debian installation. After configuring the
|
||||
Proxmox ships as a set of Debian packages which can be installed on top of a
|
||||
standard Debian installation. After configuring the
|
||||
:ref:`sysadmin_package_repositories`, you need to run:
|
||||
|
||||
.. code-block:: console
|
||||
@ -67,7 +62,7 @@ top of a standard Debian installation. After configuring the
|
||||
# apt-get update
|
||||
# apt-get install proxmox-backup-server
|
||||
|
||||
Above code keeps the current (Debian) kernel and installs a minimal
|
||||
The commands above keep the current (Debian) kernel and install a minimal
|
||||
set of required packages.
|
||||
|
||||
If you want to install the same set of packages as the installer
|
||||
@ -78,16 +73,15 @@ does, please use the following:
|
||||
# apt-get update
|
||||
# apt-get install proxmox-backup
|
||||
|
||||
This installs all required packages, the Proxmox kernel with ZFS_
|
||||
support, and a set of commonly useful packages.
|
||||
This will install all required packages, the Proxmox kernel with ZFS_
|
||||
support, and a set of common and useful packages.
|
||||
|
||||
Installing on top of an existing Debian_ installation looks easy, but
|
||||
it presumes that you have correctly installed the base system, and you
|
||||
know how you want to configure and use the local storage. Network
|
||||
configuration is also completely up to you.
|
||||
Installing `Proxmox Backup`_ on top of an existing Debian_ installation looks easy, but
|
||||
it presumes that the base system and local storage has been set up correctly.
|
||||
|
||||
In general, this is not trivial, especially when you use LVM_ or
|
||||
ZFS_.
|
||||
In general this is not trivial, especially when LVM_ or ZFS_ is used.
|
||||
|
||||
The network configuration is completely up to you as well.
|
||||
|
||||
Install Proxmox Backup server on `Proxmox VE`_
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -101,9 +95,9 @@ After configuring the
|
||||
# apt-get install proxmox-backup-server
|
||||
|
||||
.. caution:: Installing the backup server directly on the hypervisor
|
||||
is not recommended. It is more secure to use a separate physical
|
||||
server to store backups. If the hypervisor server fails, you can
|
||||
still access your backups.
|
||||
is not recommended. It is safer to use a separate physical
|
||||
server to store backups. Should the hypervisor server fail, you can
|
||||
still access the backups.
|
||||
|
||||
Client installation
|
||||
-------------------
|
||||
@ -111,7 +105,7 @@ Client installation
|
||||
Install `Proxmox Backup`_ client on Debian
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox ships as a set of Debian packages, so you can install it on
|
||||
Proxmox ships as a set of Debian packages to be installed on
|
||||
top of a standard Debian installation. After configuring the
|
||||
:ref:`sysadmin_package_repositories`, you need to run:
|
||||
|
||||
|
@ -1,14 +1,14 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
This documentationm is written in :term:`reStructuredText` and formatted with :term:`Sphinx`.
|
||||
This documentation is written in :term:`reStructuredText` and formatted with :term:`Sphinx`.
|
||||
|
||||
|
||||
What is Proxmox Backup
|
||||
----------------------
|
||||
|
||||
Proxmox Backup is an enterprise class client-server backup software,
|
||||
specially optimized for `Proxmox Virtual Environment`_ to backup
|
||||
specially optimized for the `Proxmox Virtual Environment`_ to backup
|
||||
:term:`virtual machine`\ s and :term:`container`\ s. It is also
|
||||
possible to backup physical hosts.
|
||||
|
||||
@ -24,23 +24,23 @@ Architecture
|
||||
------------
|
||||
|
||||
Proxmox Backup uses a `Client-server model`_. The server is
|
||||
responsible to store the backup data, and provides an API to create
|
||||
backups and restore data. It is also possible to manage disks and
|
||||
responsible to store the backup data and provides an API to create
|
||||
backups and restore data. It is possible to manage disks and
|
||||
other server side resources using this API.
|
||||
|
||||
A backup client uses this API to access the backed up data,
|
||||
i.e. ``proxmox-backup-client`` is a command line tool to create
|
||||
backups and restore data. We also deliver an integrated client for
|
||||
backups and restore data. We deliver an integrated client for
|
||||
QEMU_ with `Proxmox Virtual Environment`_.
|
||||
|
||||
A single backup is allowed to contain several archives. For example,
|
||||
when you backup a :term:`virtual machine`, each disk is stored as a
|
||||
separate archive inside that backup. The VM configuration also gets an
|
||||
extra file. This way, it is easy to access and restore important parts
|
||||
of the backup, without having to scan the whole backup.
|
||||
of the backup without having to scan the whole backup.
|
||||
|
||||
|
||||
Main features
|
||||
Main Features
|
||||
-------------
|
||||
|
||||
:Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
||||
@ -49,52 +49,52 @@ Main features
|
||||
|
||||
:GUI: We provide a graphical, web based user interface.
|
||||
|
||||
:Deduplication: Incremental backup produces large amounts of duplicate
|
||||
:Deduplication: Incremental backups produce large amounts of duplicate
|
||||
data. The deduplication layer removes that redundancy and makes
|
||||
inkremental backup small and space efficient.
|
||||
incremental backups small and space efficient.
|
||||
|
||||
:Data Integrity: The built in `SHA-256`_ checksum algorithm assures the
|
||||
accuracy and consistency of your backups.
|
||||
|
||||
:Remote Sync: It is possible to efficently synchronize data to remote
|
||||
sites. Only deltas containing new data are transfered.
|
||||
:Remote Sync: It is possible to efficiently synchronize data to remote
|
||||
sites. Only deltas containing new data are transferred.
|
||||
|
||||
:Performance: The whole software stack is written in :term:`Rust`,
|
||||
which provides high speed and memory efficiency.
|
||||
to provide high speed and memory efficiency.
|
||||
|
||||
:Compression: Ultra fast Zstandard_ compression is able to compress
|
||||
several gigabytes of data per second.
|
||||
|
||||
:Encryption: Backups can be encrypted at client side using AES-256 in
|
||||
:Encryption: Backups can be encrypted client-side using AES-256 in
|
||||
GCM_ mode. This authenticated encryption mode (AE_) provides very
|
||||
high performance on modern hardware.
|
||||
|
||||
:Open Source: No secrets. You have access to the whole source tree.
|
||||
:Open Source: No secrets. You have access to all the source code.
|
||||
|
||||
:Support: Commercial support options available from `Proxmox`_.
|
||||
:Support: Commercial support options are available from `Proxmox`_.
|
||||
|
||||
|
||||
Why Backup?
|
||||
-----------
|
||||
|
||||
The primary purpose of backup is to protect against data loss. Data
|
||||
loss can happen because of faulty hardware, but also by human errors.
|
||||
The primary purpose of a backup is to protect against data loss. Data
|
||||
loss can be caused by faulty hardware, but also by human error.
|
||||
|
||||
A common mistake is to delete a file or folder which is still
|
||||
required. Virtualization can amplify this problem, because it is now
|
||||
easy to delete a whole virtual machine by a single button press.
|
||||
required. Virtualization can amplify this problem. It is now
|
||||
easy to delete a whole virtual machine by pressing a single button.
|
||||
|
||||
Backups can also serve as a toolkit for administrators to temporarily
|
||||
Backups can serve as a toolkit for administrators to temporarily
|
||||
store data. For example, it is common practice to perform full backups
|
||||
before installing major software updates. If something goes wrong, you
|
||||
can just restore the previous state.
|
||||
can restore the previous state.
|
||||
|
||||
Another reason for backups are legal requirements. Some data must be
|
||||
kept in a safe place for several years so that you can access it if
|
||||
required by law.
|
||||
kept in a safe place for several years by law, so that it can be accessed if
|
||||
required.
|
||||
|
||||
Data loss can be very costly as it can severely restrict your
|
||||
business. Therefore, make sure that you regularly perform a backup
|
||||
business. Therefore, make sure that you perform a backup regularly
|
||||
and run restore tests.
|
||||
|
||||
|
||||
|
@ -5,12 +5,12 @@ Debian Package Repositories
|
||||
|
||||
All Debian based systems use APT_ as package
|
||||
management tool. The list of repositories is defined in
|
||||
``/etc/apt/sources.list`` and ``.list`` files found inside
|
||||
``/etc/apt/sources.d/``. Updates can be installed directly using
|
||||
``/etc/apt/sources.list`` and ``.list`` files found in the
|
||||
``/etc/apt/sources.d/`` directory. Updates can be installed directly with
|
||||
the ``apt`` command line tool, or via the GUI.
|
||||
|
||||
APT_ ``sources.list`` files list one package repository per line, with
|
||||
the most preferred source listed first. Empty lines are ignored, and a
|
||||
the most preferred source listed first. Empty lines are ignored and a
|
||||
``#`` character anywhere on a line marks the remainder of that line as a
|
||||
comment. The information available from the configured sources is
|
||||
acquired by ``apt update``.
|
||||
@ -33,7 +33,7 @@ the backup server binaries.
|
||||
`Proxmox Backup`_ Enterprise Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This is the default, stable and recommended repository, available for
|
||||
This is the default, stable, and recommended repository. It is available for
|
||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||
enabled by default:
|
||||
@ -44,15 +44,13 @@ enabled by default:
|
||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||
|
||||
|
||||
As soon as updates are available, the superuser (``root@pam`` user) is
|
||||
notified via email about the available new packages. On the GUI, the
|
||||
change-log of each package can be viewed (if available), showing all
|
||||
details of the update. So you will never miss important security
|
||||
fixes.
|
||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||
notified via email about new packages as soon as they are available. The
|
||||
change-log and details of each package can be viewed in the GUI (if available).
|
||||
|
||||
Please note that you need a valid subscription key to access this
|
||||
repository. We offer different support levels, and you can find further
|
||||
details at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
repository. More information regarding subscription levels and pricing can be
|
||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
|
||||
.. note:: You can disable this repository by commenting out the above
|
||||
line using a `#` (at the start of the line). This prevents error
|
||||
@ -65,7 +63,7 @@ details at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
|
||||
As the name suggests, you do not need a subscription key to access
|
||||
this repository. It can be used for testing and non-production
|
||||
use. Its not recommended to run on production servers, as these
|
||||
use. It is not recommended to use it on production servers, because these
|
||||
packages are not always heavily tested and validated.
|
||||
|
||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||
@ -92,9 +90,9 @@ latest packages and is heavily used by developers to test new
|
||||
features.
|
||||
|
||||
.. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
only be used for testing new features or bug fixes.
|
||||
only be used to test new features or bug fixes.
|
||||
|
||||
As usual, you can configure this using ``/etc/apt/sources.list`` by
|
||||
You can configure this using ``/etc/apt/sources.list`` by
|
||||
adding the following line:
|
||||
|
||||
.. code-block:: sources.list
|
||||
|
@ -1,15 +1,15 @@
|
||||
Description
|
||||
^^^^^^^^^^^
|
||||
|
||||
``pxar`` is a command line utility used to create and manipulate archives in the
|
||||
``pxar`` is a command line utility to create and manipulate archives in the
|
||||
:ref:`pxar-format`.
|
||||
It is inspired by `casync file archive format
|
||||
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
|
||||
which has a similar use-case.
|
||||
The ``.pxar`` format is adapted to fulfill the specific needs of the proxmox
|
||||
backup server, for example efficient storage of hardlinks.
|
||||
which caters to a similar use-case.
|
||||
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
||||
Backup Server, for example, efficient storage of hardlinks.
|
||||
The format is designed to reduce storage space needed on the server by achieving
|
||||
high de-duplication.
|
||||
a high level of de-duplication.
|
||||
|
||||
Creating an Archive
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
@ -20,23 +20,23 @@ Run the following command to create an archive of a folder named ``source``:
|
||||
|
||||
# pxar create archive.pxar source
|
||||
|
||||
This will create a new archive called ``archive.pxar`` from the contents of the
|
||||
This will create a new archive called ``archive.pxar`` with the contents of the
|
||||
``source`` folder.
|
||||
|
||||
.. NOTE:: ``pxar`` will not overwrite any existing archives. If an archive with
|
||||
the same name is already present in the target folder, the creation will
|
||||
fail.
|
||||
|
||||
By default, ``pxar`` will skip certain mountpoints and not follow device
|
||||
By default, ``pxar`` will skip certain mountpoints and will not follow device
|
||||
boundaries. This design decision is based on the primary use case of creating
|
||||
archives for backups, where it makes no sense to store the content of certain
|
||||
archives for backups. It is sensible to not back up the contents of certain
|
||||
temporary or system specific files.
|
||||
In order to alter this behavior and follow device boundaries, use the
|
||||
To alter this behavior and follow device boundaries, use the
|
||||
``--all-file-systems`` flag.
|
||||
|
||||
It is possible to exclude certain files and/or folders from the archive by
|
||||
passing glob match patterns as additional parameters. Whenever a file is matched
|
||||
by one of the patterns, you will get a warning saying that this file is skipped
|
||||
by one of the patterns, you will get a warning stating that this file is skipped
|
||||
and therefore not included in the archive.
|
||||
|
||||
For example, you can exclude all files ending in ``.txt`` from the archive
|
||||
@ -50,7 +50,7 @@ Be aware that the shell itself will try to expand all of the glob patterns befor
|
||||
invoking ``pxar``.
|
||||
In order to avoid this, all globs have to be quoted correctly.
|
||||
|
||||
It is also possible to pass a list of match pattern to fulfill more complex
|
||||
It is possible to pass a list of match patterns to fulfill more complex
|
||||
file exclusion/inclusion behavior, although it is recommended to use the
|
||||
``.pxarexclude`` files instead for such cases.
|
||||
|
||||
@ -67,7 +67,7 @@ All the glob pattern are relative to the ``source`` directory.
|
||||
previous ones. Permutations of the same patterns lead to different results.
|
||||
|
||||
``pxar`` will store the list of glob match patterns passed as parameters via the
|
||||
command line in a file called ``.pxarexclude-cli`` and store it at the root of
|
||||
command line in a file called ``.pxarexclude-cli`` and stores it at the root of
|
||||
the archive.
|
||||
If a file with this name is already present in the source folder during archive
|
||||
creation, this file is not included in the archive and the file containing the
|
||||
@ -79,9 +79,9 @@ It is possible to create and place these files in any directory of the filesyste
|
||||
tree.
|
||||
These files must contain one pattern per line, again later patterns win over
|
||||
previous ones.
|
||||
The patterns control file exclusion of files present within the given directory
|
||||
The patterns control file exclusions of files present within the given directory
|
||||
or further below it in the tree.
|
||||
The behaviour is the same as described in :ref:`creating-backups`.
|
||||
The behavior is the same as described in :ref:`creating-backups`.
|
||||
|
||||
Extracting an Archive
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
@ -96,7 +96,7 @@ with the following command:
|
||||
If no target is provided, the content of the archive is extracted to the current
|
||||
working directory.
|
||||
|
||||
In order to restore only part of an archive or single files and/or folders,
|
||||
In order to restore only parts of an archive, single files and/or folders,
|
||||
it is possible to pass the corresponding glob match patterns as additional
|
||||
parameters or use the patterns stored in a file:
|
||||
|
||||
@ -109,8 +109,8 @@ sub-folders in the archive ``etc.pxar`` to the target ``/restore/target/etc``.
|
||||
A path to the file containing match patterns can be specified using the
|
||||
``--files-from`` parameter.
|
||||
|
||||
List the Content of an Archive
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
List the Contents of an Archive
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To display the files and directories contained in an archive ``archive.pxar``,
|
||||
run the following command:
|
||||
@ -126,7 +126,7 @@ Mounting an Archive
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``pxar`` allows you to mount and inspect the contents of an archive via _`FUSE`.
|
||||
In order to mount an archive named ``archive.pxar`` to the mountpoint ``mnt``,
|
||||
In order to mount an archive named ``archive.pxar`` to the mountpoint ``/mnt``,
|
||||
run the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
@ -1,4 +1,4 @@
|
||||
mod access;
|
||||
pub mod access;
|
||||
pub mod admin;
|
||||
pub mod backup;
|
||||
pub mod config;
|
||||
|
@ -1,18 +1,33 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::{api, RpcEnvironment, Permission, UserInformation};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::sortable;
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::{http_err, list_subdirs_api_method};
|
||||
|
||||
use crate::tools;
|
||||
use crate::tools::ticket::*;
|
||||
use crate::auth_helpers::*;
|
||||
use crate::api2::types::*;
|
||||
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::PRIV_PERMISSIONS_MODIFY;
|
||||
|
||||
pub mod user;
|
||||
pub mod domain;
|
||||
pub mod acl;
|
||||
pub mod role;
|
||||
|
||||
fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
if !user_info.is_active_user(&username) {
|
||||
bail!("user account disabled or expired.");
|
||||
}
|
||||
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
||||
|
||||
if password.starts_with("PBS:") {
|
||||
@ -25,27 +40,17 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
if username == "root@pam" {
|
||||
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
||||
auth.get_handler().set_credentials("root", password);
|
||||
auth.authenticate()?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
bail!("inavlid credentials");
|
||||
crate::auth::authenticate_user(username, password)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
username: {
|
||||
type: String,
|
||||
description: "User name.",
|
||||
max_length: 64,
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
password: {
|
||||
type: String,
|
||||
description: "The secret password. This can also be a valid ticket.",
|
||||
schema: PASSWORD_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -66,6 +71,9 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
},
|
||||
},
|
||||
protected: true,
|
||||
access: {
|
||||
permission: &Permission::World,
|
||||
},
|
||||
)]
|
||||
/// Create or verify authentication ticket.
|
||||
///
|
||||
@ -94,13 +102,72 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
password: {
|
||||
schema: PASSWORD_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Anybody is allowed to change there own password. In addition, users with 'Permissions:Modify' privilege may change any password.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
|
||||
)]
|
||||
/// Change user password
|
||||
///
|
||||
/// Each user is allowed to change his own password. Superuser
|
||||
/// can change all passwords.
|
||||
fn change_password(
|
||||
userid: String,
|
||||
password: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let current_user = rpcenv.get_user()
|
||||
.ok_or_else(|| format_err!("unknown user"))?;
|
||||
|
||||
let mut allowed = userid == current_user;
|
||||
|
||||
if userid == "root@pam" { allowed = true; }
|
||||
|
||||
if !allowed {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let privs = user_info.lookup_privs(¤t_user, &[]);
|
||||
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
bail!("you are not authorized to change the password.");
|
||||
}
|
||||
|
||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
authenticator.store_password(&username, &password)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
const SUBDIRS: SubdirMap = &sorted!([
|
||||
("acl", &acl::ROUTER),
|
||||
(
|
||||
"password", &Router::new()
|
||||
.put(&API_METHOD_CHANGE_PASSWORD)
|
||||
),
|
||||
(
|
||||
"ticket", &Router::new()
|
||||
.post(&API_METHOD_CREATE_TICKET)
|
||||
)
|
||||
];
|
||||
),
|
||||
("domains", &domain::ROUTER),
|
||||
("roles", &role::ROUTER),
|
||||
("users", &user::ROUTER),
|
||||
]);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
|
228
src/api2/access/acl.rs
Normal file
228
src/api2/access/acl.rs
Normal file
@ -0,0 +1,228 @@
|
||||
use anyhow::{bail, Error};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl;
|
||||
use crate::config::acl::{Role, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
propagate: {
|
||||
schema: ACL_PROPAGATE_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
},
|
||||
ugid_type: {
|
||||
schema: ACL_UGID_TYPE_SCHEMA,
|
||||
},
|
||||
ugid: {
|
||||
type: String,
|
||||
description: "User or Group ID.",
|
||||
},
|
||||
roleid: {
|
||||
type: Role,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// ACL list entry.
|
||||
pub struct AclListItem {
|
||||
path: String,
|
||||
ugid: String,
|
||||
ugid_type: String,
|
||||
propagate: bool,
|
||||
roleid: String,
|
||||
}
|
||||
|
||||
fn extract_acl_node_data(
|
||||
node: &acl::AclTreeNode,
|
||||
path: &str,
|
||||
list: &mut Vec<AclListItem>,
|
||||
exact: bool,
|
||||
) {
|
||||
for (user, roles) in &node.users {
|
||||
for (role, propagate) in roles {
|
||||
list.push(AclListItem {
|
||||
path: if path.is_empty() { String::from("/") } else { path.to_string() },
|
||||
propagate: *propagate,
|
||||
ugid_type: String::from("user"),
|
||||
ugid: user.to_string(),
|
||||
roleid: role.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
for (group, roles) in &node.groups {
|
||||
for (role, propagate) in roles {
|
||||
list.push(AclListItem {
|
||||
path: if path.is_empty() { String::from("/") } else { path.to_string() },
|
||||
propagate: *propagate,
|
||||
ugid_type: String::from("group"),
|
||||
ugid: group.to_string(),
|
||||
roleid: role.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
if exact {
|
||||
return;
|
||||
}
|
||||
for (comp, child) in &node.children {
|
||||
let new_path = format!("{}/{}", path, comp);
|
||||
extract_acl_node_data(child, &new_path, list, exact);
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
exact: {
|
||||
description: "If set, returns only ACL for the exact path.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "ACL entry list.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: AclListItem,
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "acl"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read Access Control List (ACLs).
|
||||
pub fn read_acl(
|
||||
path: Option<String>,
|
||||
exact: bool,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<AclListItem>, Error> {
|
||||
|
||||
//let auth_user = rpcenv.get_user().unwrap();
|
||||
|
||||
let (mut tree, digest) = acl::config()?;
|
||||
|
||||
let mut list: Vec<AclListItem> = Vec::new();
|
||||
if let Some(path) = &path {
|
||||
if let Some(node) = &tree.find_node(path) {
|
||||
extract_acl_node_data(&node, path, &mut list, exact);
|
||||
}
|
||||
} else {
|
||||
extract_acl_node_data(&tree.root, "", &mut list, exact);
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
},
|
||||
role: {
|
||||
type: Role,
|
||||
},
|
||||
propagate: {
|
||||
optional: true,
|
||||
schema: ACL_PROPAGATE_SCHEMA,
|
||||
},
|
||||
userid: {
|
||||
optional: true,
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
group: {
|
||||
optional: true,
|
||||
schema: PROXMOX_GROUP_ID_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
optional: true,
|
||||
description: "Remove permissions (instead of adding it).",
|
||||
type: bool,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "acl"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update Access Control List (ACLs).
|
||||
pub fn update_acl(
|
||||
path: String,
|
||||
role: String,
|
||||
propagate: Option<bool>,
|
||||
userid: Option<String>,
|
||||
group: Option<String>,
|
||||
delete: Option<bool>,
|
||||
digest: Option<String>,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut tree, expected_digest) = acl::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let propagate = propagate.unwrap_or(true);
|
||||
|
||||
let delete = delete.unwrap_or(false);
|
||||
|
||||
if let Some(ref _group) = group {
|
||||
bail!("parameter 'group' - groups are currently not supported.");
|
||||
} else if let Some(ref userid) = userid {
|
||||
if !delete { // Note: we allow to delete non-existent users
|
||||
let user_cfg = crate::config::user::cached_config()?;
|
||||
if user_cfg.sections.get(userid).is_none() {
|
||||
bail!("no such user.");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bail!("missing 'userid' or 'group' parameter.");
|
||||
}
|
||||
|
||||
if !delete { // Note: we allow to delete entries with invalid path
|
||||
acl::check_acl_path(&path)?;
|
||||
}
|
||||
|
||||
if let Some(userid) = userid {
|
||||
if delete {
|
||||
tree.delete_user_role(&path, &userid, &role);
|
||||
} else {
|
||||
tree.insert_user_role(&path, &userid, &role, propagate);
|
||||
}
|
||||
} else if let Some(group) = group {
|
||||
if delete {
|
||||
tree.delete_group_role(&path, &group, &role);
|
||||
} else {
|
||||
tree.insert_group_role(&path, &group, &role, propagate);
|
||||
}
|
||||
}
|
||||
|
||||
acl::save_config(&tree)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_ACL)
|
||||
.put(&API_METHOD_UPDATE_ACL);
|
47
src/api2/access/domain.rs
Normal file
47
src/api2/access/domain.rs
Normal file
@ -0,0 +1,47 @@
|
||||
use anyhow::{Error};
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Permission};
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "List of realms.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: Object,
|
||||
description: "User configuration (without password).",
|
||||
properties: {
|
||||
realm: {
|
||||
description: "Realm ID.",
|
||||
type: String,
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
default: {
|
||||
description: "Default realm.",
|
||||
type: bool,
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
description: "Anyone can access this, because we need that list for the login box (before the user is authenticated).",
|
||||
permission: &Permission::World,
|
||||
}
|
||||
)]
|
||||
/// Authentication domain/realm index.
|
||||
fn list_domains() -> Result<Value, Error> {
|
||||
let mut list = Vec::new();
|
||||
list.push(json!({ "realm": "pam", "comment": "Linux PAM standard authentication", "default": true }));
|
||||
list.push(json!({ "realm": "pbs", "comment": "Proxmox Backup authentication server" }));
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_DOMAINS);
|
58
src/api2/access/role.rs
Normal file
58
src/api2/access/role.rs
Normal file
@ -0,0 +1,58 @@
|
||||
use anyhow::Error;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Permission};
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::{Role, ROLE_NAMES, PRIVILEGES};
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "List of roles.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: Object,
|
||||
description: "User name with description.",
|
||||
properties: {
|
||||
roleid: {
|
||||
type: Role,
|
||||
},
|
||||
privs: {
|
||||
type: Array,
|
||||
description: "List of Privileges",
|
||||
items: {
|
||||
type: String,
|
||||
description: "A Privilege",
|
||||
},
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
}
|
||||
)]
|
||||
/// Role list
|
||||
fn list_roles() -> Result<Value, Error> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (role, (privs, comment)) in ROLE_NAMES.iter() {
|
||||
let mut priv_list = Vec::new();
|
||||
for (name, privilege) in PRIVILEGES.iter() {
|
||||
if privs & privilege > 0 {
|
||||
priv_list.push(name.clone());
|
||||
}
|
||||
}
|
||||
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));
|
||||
}
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_ROLES);
|
295
src/api2/access/user.rs
Normal file
295
src/api2/access/user.rs
Normal file
@ -0,0 +1,295 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::{Schema, StringSchema};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::user;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||
|
||||
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(5)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
},
|
||||
returns: {
|
||||
description: "List users (with config digest).",
|
||||
type: Array,
|
||||
items: { type: user::User },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List all users
|
||||
pub fn list_users(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<user::User>, Error> {
|
||||
|
||||
let (config, digest) = user::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("user")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
password: {
|
||||
schema: PBS_PASSWORD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
enable: {
|
||||
schema: user::ENABLE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
expire: {
|
||||
schema: user::EXPIRE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
firstname: {
|
||||
schema: user::FIRST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
lastname: {
|
||||
schema: user::LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: user::EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create new user.
|
||||
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let user: user::User = serde_json::from_value(param)?;
|
||||
|
||||
let (mut config, _digest) = user::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&user.userid) {
|
||||
bail!("user '{}' already exists.", user.userid);
|
||||
}
|
||||
|
||||
let (username, realm) = crate::auth::parse_userid(&user.userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
|
||||
config.set_data(&user.userid, "user", &user)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
if let Some(password) = password {
|
||||
authenticator.store_password(&username, &password)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "The user configuration (with config digest).",
|
||||
type: user::User,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read user configuration data.
|
||||
pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> {
|
||||
let (config, digest) = user::config()?;
|
||||
let user = config.lookup("user", &userid)?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
password: {
|
||||
schema: PBS_PASSWORD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
enable: {
|
||||
schema: user::ENABLE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
expire: {
|
||||
schema: user::EXPIRE_USER_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
firstname: {
|
||||
schema: user::FIRST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
lastname: {
|
||||
schema: user::LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: user::EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update user configuration.
|
||||
pub fn update_user(
|
||||
userid: String,
|
||||
comment: Option<String>,
|
||||
enable: Option<bool>,
|
||||
expire: Option<i64>,
|
||||
password: Option<String>,
|
||||
firstname: Option<String>,
|
||||
lastname: Option<String>,
|
||||
email: Option<String>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let mut data: user::User = config.lookup("user", &userid)?;
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
data.comment = None;
|
||||
} else {
|
||||
data.comment = Some(comment);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(enable) = enable {
|
||||
data.enable = if enable { None } else { Some(false) };
|
||||
}
|
||||
|
||||
if let Some(expire) = expire {
|
||||
data.expire = if expire > 0 { Some(expire) } else { None };
|
||||
}
|
||||
|
||||
if let Some(password) = password {
|
||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
authenticator.store_password(&username, &password)?;
|
||||
}
|
||||
|
||||
if let Some(firstname) = firstname {
|
||||
data.firstname = if firstname.is_empty() { None } else { Some(firstname) };
|
||||
}
|
||||
|
||||
if let Some(lastname) = lastname {
|
||||
data.lastname = if lastname.is_empty() { None } else { Some(lastname) };
|
||||
}
|
||||
if let Some(email) = email {
|
||||
data.email = if email.is_empty() { None } else { Some(email) };
|
||||
}
|
||||
|
||||
config.set_data(&userid, "user", &data)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a user from the configuration file.
|
||||
pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.sections.get(&userid) {
|
||||
Some(_) => { config.sections.remove(&userid); },
|
||||
None => bail!("user '{}' does not exist.", userid),
|
||||
}
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_USER)
|
||||
.put(&API_METHOD_UPDATE_USER)
|
||||
.delete(&API_METHOD_DELETE_USER);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_USERS)
|
||||
.post(&API_METHOD_CREATE_USER)
|
||||
.match_all("userid", &ITEM_ROUTER);
|
@ -2,14 +2,15 @@ use std::collections::{HashSet, HashMap};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use chrono::{TimeZone, Local};
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use futures::*;
|
||||
use hyper::http::request::Parts;
|
||||
use hyper::{header, Body, Response, StatusCode};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox::api::{
|
||||
api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
|
||||
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
@ -19,8 +20,25 @@ use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||
use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::config::datastore;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
use crate::config::acl::{
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
PRIV_DATASTORE_READ,
|
||||
PRIV_DATASTORE_PRUNE,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
};
|
||||
|
||||
fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
|
||||
let owner = store.get_owner(group)?;
|
||||
if &owner != userid {
|
||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
||||
|
||||
@ -78,12 +96,23 @@ fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo
|
||||
type: GroupListItem,
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
|
||||
true),
|
||||
},
|
||||
)]
|
||||
/// List backup groups.
|
||||
fn list_groups(
|
||||
store: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<GroupListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
|
||||
@ -97,8 +126,15 @@ fn list_groups(
|
||||
BackupInfo::sort_list(&mut list, false);
|
||||
|
||||
let info = &list[0];
|
||||
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
if !list_all {
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if owner != username { continue; }
|
||||
}
|
||||
|
||||
let result_item = GroupListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
@ -136,6 +172,12 @@ fn list_groups(
|
||||
type: BackupContent,
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
true),
|
||||
},
|
||||
)]
|
||||
/// List snapshot files.
|
||||
pub fn list_snapshot_files(
|
||||
@ -144,12 +186,20 @@ pub fn list_snapshot_files(
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<BackupContent>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
||||
|
||||
let mut files = read_backup_index(&datastore, &snapshot)?;
|
||||
|
||||
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
||||
@ -184,6 +234,12 @@ pub fn list_snapshot_files(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
|
||||
true),
|
||||
},
|
||||
)]
|
||||
/// Delete backup snapshot.
|
||||
fn delete_snapshot(
|
||||
@ -192,13 +248,20 @@ fn delete_snapshot(
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
||||
|
||||
datastore.remove_backup_dir(&snapshot)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -227,19 +290,28 @@ fn delete_snapshot(
|
||||
type: SnapshotListItem,
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
|
||||
true),
|
||||
},
|
||||
)]
|
||||
/// List backup snapshots.
|
||||
pub fn list_snapshots (
|
||||
param: Value,
|
||||
store: String,
|
||||
backup_type: Option<String>,
|
||||
backup_id: Option<String>,
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<SnapshotListItem>, Error> {
|
||||
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let backup_type = param["backup-type"].as_str();
|
||||
let backup_id = param["backup-id"].as_str();
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let base_path = datastore.base_path();
|
||||
|
||||
@ -249,13 +321,19 @@ pub fn list_snapshots (
|
||||
|
||||
for info in backup_list {
|
||||
let group = info.backup_dir.group();
|
||||
if let Some(backup_type) = backup_type {
|
||||
if let Some(ref backup_type) = backup_type {
|
||||
if backup_type != group.backup_type() { continue; }
|
||||
}
|
||||
if let Some(backup_id) = backup_id {
|
||||
if let Some(ref backup_id) = backup_id {
|
||||
if backup_id != group.backup_id() { continue; }
|
||||
}
|
||||
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
if !list_all {
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if owner != username { continue; }
|
||||
}
|
||||
|
||||
let mut result_item = SnapshotListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
@ -291,6 +369,9 @@ pub fn list_snapshots (
|
||||
returns: {
|
||||
type: StorageStatus,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
|
||||
},
|
||||
)]
|
||||
/// Get datastore status.
|
||||
pub fn status(
|
||||
@ -330,50 +411,43 @@ macro_rules! add_common_prune_prameters {
|
||||
(
|
||||
"keep-daily",
|
||||
true,
|
||||
&IntegerSchema::new("Number of daily backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
&PRUNE_SCHEMA_KEEP_DAILY,
|
||||
),
|
||||
(
|
||||
"keep-hourly",
|
||||
true,
|
||||
&IntegerSchema::new("Number of hourly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
&PRUNE_SCHEMA_KEEP_HOURLY,
|
||||
),
|
||||
(
|
||||
"keep-last",
|
||||
true,
|
||||
&IntegerSchema::new("Number of backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
&PRUNE_SCHEMA_KEEP_LAST,
|
||||
),
|
||||
(
|
||||
"keep-monthly",
|
||||
true,
|
||||
&IntegerSchema::new("Number of monthly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
&PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||
),
|
||||
(
|
||||
"keep-weekly",
|
||||
true,
|
||||
&IntegerSchema::new("Number of weekly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
&PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||
),
|
||||
(
|
||||
"keep-yearly",
|
||||
true,
|
||||
&IntegerSchema::new("Number of yearly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
&PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
),
|
||||
$( $list2 )*
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
|
||||
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
|
||||
PruneListItem::API_SCHEMA
|
||||
).schema();
|
||||
|
||||
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&prune),
|
||||
&ObjectSchema::new(
|
||||
@ -388,25 +462,36 @@ const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
||||
],[
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
])
|
||||
)
|
||||
))
|
||||
.returns(&API_RETURN_SCHEMA_PRUNE)
|
||||
.access(None, &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
|
||||
true)
|
||||
);
|
||||
|
||||
fn prune(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let store = param["store"].as_str().unwrap();
|
||||
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
|
||||
|
||||
let group = BackupGroup::new(backup_type, backup_id);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, &group, &username)?; }
|
||||
|
||||
let prune_options = PruneOptions {
|
||||
keep_last: param["keep-last"].as_u64(),
|
||||
@ -419,22 +504,7 @@ fn prune(
|
||||
|
||||
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||
|
||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||
let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
|
||||
let result = try_block! {
|
||||
if !prune_options.keeps_something() {
|
||||
worker.log("No prune selection - keeping all files.");
|
||||
return Ok(());
|
||||
} else {
|
||||
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
||||
if dry_run {
|
||||
worker.log(format!("Testing prune on store \"{}\" group \"{}/{}\"",
|
||||
store, backup_type, backup_id));
|
||||
} else {
|
||||
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
|
||||
store, backup_type, backup_id));
|
||||
}
|
||||
}
|
||||
let mut prune_result = Vec::new();
|
||||
|
||||
let list = group.list_backups(&datastore.base_path())?;
|
||||
|
||||
@ -442,11 +512,46 @@ fn prune(
|
||||
|
||||
prune_info.reverse(); // delete older snapshots first
|
||||
|
||||
for (info, keep) in prune_info {
|
||||
let keep_all = !prune_options.keeps_something();
|
||||
|
||||
if dry_run {
|
||||
for (info, mut keep) in prune_info {
|
||||
if keep_all { keep = true; }
|
||||
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
prune_result.push(json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"keep": keep,
|
||||
}));
|
||||
}
|
||||
return Ok(json!(prune_result));
|
||||
}
|
||||
|
||||
|
||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||
let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
|
||||
|
||||
let result = try_block! {
|
||||
if keep_all {
|
||||
worker.log("No prune selection - keeping all files.");
|
||||
} else {
|
||||
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
||||
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
|
||||
store, backup_type, backup_id));
|
||||
}
|
||||
|
||||
for (info, mut keep) in prune_info {
|
||||
if keep_all { keep = true; }
|
||||
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
let timestamp = BackupDir::backup_time_to_string(backup_time);
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
|
||||
let msg = format!(
|
||||
"{}/{}/{} {}",
|
||||
group.backup_type(),
|
||||
@ -457,6 +562,13 @@ fn prune(
|
||||
|
||||
worker.log(msg);
|
||||
|
||||
prune_result.push(json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"keep": keep,
|
||||
}));
|
||||
|
||||
if !(dry_run || keep) {
|
||||
datastore.remove_backup_dir(&info.backup_dir)?;
|
||||
}
|
||||
@ -469,9 +581,9 @@ fn prune(
|
||||
|
||||
if let Err(err) = result {
|
||||
bail!("prune failed - {}", err);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(json!(worker.to_string())) // return the UPID
|
||||
Ok(json!(prune_result))
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -485,6 +597,9 @@ fn prune(
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Start garbage collection.
|
||||
fn start_garbage_collection(
|
||||
@ -503,7 +618,7 @@ fn start_garbage_collection(
|
||||
"garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
datastore.garbage_collection(worker)
|
||||
datastore.garbage_collection(&worker)
|
||||
})?;
|
||||
|
||||
Ok(json!(upid_str))
|
||||
@ -519,7 +634,10 @@ fn start_garbage_collection(
|
||||
},
|
||||
returns: {
|
||||
type: GarbageCollectionStatus,
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Garbage collection status.
|
||||
pub fn garbage_collection_status(
|
||||
@ -535,16 +653,54 @@ pub fn garbage_collection_status(
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "List the accessible datastores.",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Datastore name and description.",
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Datastore list
|
||||
fn get_datastore_list(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
Ok(config.convert_to_array("store", None, &[]))
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, data)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if allowed {
|
||||
let mut entry = json!({ "store": store });
|
||||
if let Some(comment) = data["comment"].as_str() {
|
||||
entry["comment"] = comment.into();
|
||||
}
|
||||
list.push(entry);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
@ -560,6 +716,10 @@ pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
|
||||
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).access(None, &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
true)
|
||||
);
|
||||
|
||||
fn download_file(
|
||||
@ -567,25 +727,31 @@ fn download_file(
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: Box<dyn RpcEnvironment>,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
|
||||
backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
@ -611,7 +777,7 @@ fn download_file(
|
||||
pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&upload_backup_log),
|
||||
&ObjectSchema::new(
|
||||
"Download single raw file from backup snapshot.",
|
||||
"Upload the client backup log file into a backup snapshot ('client.log.blob').",
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
@ -619,6 +785,9 @@ pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).access(
|
||||
Some("Only the backup creator/owner is allowed to do this."),
|
||||
&Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
|
||||
);
|
||||
|
||||
fn upload_backup_log(
|
||||
@ -626,12 +795,11 @@ fn upload_backup_log(
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: Box<dyn RpcEnvironment>,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let file_name = "client.log.blob";
|
||||
@ -642,6 +810,9 @@ fn upload_backup_log(
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
check_backup_owner(&datastore, backup_dir.group(), &username)?;
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
@ -724,10 +895,5 @@ const DATASTORE_INFO_ROUTER: Router = Router::new()
|
||||
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_datastore_list),
|
||||
&ObjectSchema::new("Directory index.", &[])
|
||||
)
|
||||
)
|
||||
.get(&API_METHOD_GET_DATASTORE_LIST)
|
||||
.match_all("store", &DATASTORE_INFO_ROUTER);
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::header::{HeaderValue, UPGRADE};
|
||||
use hyper::http::request::Parts;
|
||||
@ -6,7 +6,7 @@ use hyper::{Body, Response, StatusCode};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{sortable, identity, list_subdirs_api_method};
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
@ -14,6 +14,8 @@ use crate::tools::{self, WrappedReaderStream};
|
||||
use crate::server::{WorkerTask, H2Service};
|
||||
use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -37,6 +39,10 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
||||
]),
|
||||
)
|
||||
).access(
|
||||
// Note: parameter 'store' is no uri parameter, so we need to test inside function body
|
||||
Some("The user needs Datastore.Backup privilege on /datastore/{store} and needs to own the backup group."),
|
||||
&Permission::Anybody
|
||||
);
|
||||
|
||||
fn upgrade_to_backup_protocol(
|
||||
@ -50,7 +56,13 @@ fn upgrade_to_backup_protocol(
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
@ -73,10 +85,15 @@ fn upgrade_to_backup_protocol(
|
||||
|
||||
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||
let owner = datastore.create_backup_group(&backup_group, &username)?;
|
||||
// permission check
|
||||
if owner != username { // only the owner is allowed to create additional snapshots
|
||||
bail!("backup owner check failed ({} != {})", username, owner);
|
||||
}
|
||||
|
||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
|
||||
|
||||
@ -106,13 +123,12 @@ fn upgrade_to_backup_protocol(
|
||||
let abort_future = worker.abort_future();
|
||||
|
||||
let env2 = env.clone();
|
||||
let env3 = env.clone();
|
||||
|
||||
let req_fut = req_body
|
||||
let mut req_fut = req_body
|
||||
.on_upgrade()
|
||||
.map_err(Error::from)
|
||||
.and_then(move |conn| {
|
||||
env3.debug("protocol upgrade done");
|
||||
env2.debug("protocol upgrade done");
|
||||
|
||||
let mut http = hyper::server::conn::Http::new();
|
||||
http.http2_only(true);
|
||||
@ -124,36 +140,39 @@ fn upgrade_to_backup_protocol(
|
||||
http.serve_connection(conn, service)
|
||||
.map_err(Error::from)
|
||||
});
|
||||
let abort_future = abort_future
|
||||
let mut abort_future = abort_future
|
||||
.map(|_| Err(format_err!("task aborted")));
|
||||
|
||||
use futures::future::Either;
|
||||
future::select(req_fut, abort_future)
|
||||
.map(|res| match res {
|
||||
Either::Left((Ok(res), _)) => Ok(res),
|
||||
Either::Left((Err(err), _)) => Err(err),
|
||||
Either::Right((Ok(res), _)) => Ok(res),
|
||||
Either::Right((Err(err), _)) => Err(err),
|
||||
})
|
||||
.and_then(move |_result| async move {
|
||||
env.ensure_finished()?;
|
||||
async move {
|
||||
let res = select!{
|
||||
req = req_fut => req,
|
||||
abrt = abort_future => abrt,
|
||||
};
|
||||
|
||||
match (res, env.ensure_finished()) {
|
||||
(Ok(_), Ok(())) => {
|
||||
env.log("backup finished sucessfully");
|
||||
Ok(())
|
||||
})
|
||||
.then(move |result| async move {
|
||||
if let Err(err) = result {
|
||||
match env2.ensure_finished() {
|
||||
Ok(()) => {}, // ignore error after finish
|
||||
_ => {
|
||||
env2.log(format!("backup failed: {}", err));
|
||||
env2.log("removing failed backup");
|
||||
env2.remove_backup()?;
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
(Err(err), Ok(())) => {
|
||||
// ignore errors after finish
|
||||
env.log(format!("backup had errors but finished: {}", err));
|
||||
Ok(())
|
||||
})
|
||||
},
|
||||
(Ok(_), Err(err)) => {
|
||||
env.log(format!("backup ended and finish failed: {}", err));
|
||||
env.log("removing unfinished backup");
|
||||
env.remove_backup()?;
|
||||
Err(err)
|
||||
},
|
||||
(Err(err), Err(_)) => {
|
||||
env.log(format!("backup failed: {}", err));
|
||||
env.log("removing failed backup");
|
||||
env.remove_backup()?;
|
||||
Err(err)
|
||||
},
|
||||
}
|
||||
}
|
||||
})?;
|
||||
|
||||
let response = Response::builder()
|
||||
|
@ -1,8 +1,8 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde_json::Value;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::tools::digest_to_hex;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
@ -80,7 +80,7 @@ impl SharedBackupState {
|
||||
#[derive(Clone)]
|
||||
pub struct BackupEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: HashMap<String, Value>,
|
||||
result_attributes: Value,
|
||||
user: String,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
@ -110,7 +110,7 @@ impl BackupEnvironment {
|
||||
};
|
||||
|
||||
Self {
|
||||
result_attributes: HashMap::new(),
|
||||
result_attributes: json!({}),
|
||||
env_type,
|
||||
user,
|
||||
worker,
|
||||
@ -480,12 +480,12 @@ impl BackupEnvironment {
|
||||
|
||||
impl RpcEnvironment for BackupEnvironment {
|
||||
|
||||
fn set_result_attrib(&mut self, name: &str, value: Value) {
|
||||
self.result_attributes.insert(name.into(), value);
|
||||
fn result_attrib_mut(&mut self) -> &mut Value {
|
||||
&mut self.result_attributes
|
||||
}
|
||||
|
||||
fn get_result_attrib(&self, name: &str) -> Option<&Value> {
|
||||
self.result_attributes.get(name)
|
||||
fn result_attrib(&self) -> &Value {
|
||||
&self.result_attributes
|
||||
}
|
||||
|
||||
fn env_type(&self) -> RpcEnvironmentType {
|
||||
|
@ -2,7 +2,7 @@ use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::Body;
|
||||
use hyper::http::request::Parts;
|
||||
|
@ -3,10 +3,12 @@ use proxmox::list_subdirs_api_method;
|
||||
|
||||
pub mod datastore;
|
||||
pub mod remote;
|
||||
pub mod sync;
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore", &datastore::ROUTER),
|
||||
("remote", &remote::ROUTER),
|
||||
("sync", &sync::ROUTER),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
|
@ -1,13 +1,15 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::config::datastore;
|
||||
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
||||
use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -16,23 +18,32 @@ use crate::config::datastore;
|
||||
returns: {
|
||||
description: "List the configured datastores (with config digest).",
|
||||
type: Array,
|
||||
items: {
|
||||
type: datastore::DataStoreConfig,
|
||||
items: { type: datastore::DataStoreConfig },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List all datastores
|
||||
pub fn list_datastores(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<DataStoreConfig>, Error> {
|
||||
|
||||
let (config, digest) = datastore::config()?;
|
||||
|
||||
Ok(config.convert_to_array("name", Some(&digest), &[]))
|
||||
let list = config.convert_to_typed_array("datastore")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
|
||||
// fixme: impl. const fn get_object_schema(datastore::DataStoreConfig::API_SCHEMA),
|
||||
// but this need support for match inside const fn
|
||||
// see: https://github.com/rust-lang/rust/issues/49146
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
@ -40,18 +51,53 @@ pub fn list_datastores(
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: DIR_NAME_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: datastore::DIR_NAME_SCHEMA,
|
||||
"gc-schedule": {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"prune-schedule": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
},
|
||||
"keep-hourly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_HOURLY,
|
||||
},
|
||||
"keep-daily": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_DAILY,
|
||||
},
|
||||
"keep-weekly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||
},
|
||||
"keep-monthly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||
},
|
||||
"keep-yearly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create new datastore config.
|
||||
pub fn create_datastore(name: String, param: Value) -> Result<(), Error> {
|
||||
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
@ -59,16 +105,16 @@ pub fn create_datastore(name: String, param: Value) -> Result<(), Error> {
|
||||
|
||||
let (mut config, _digest) = datastore::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&name) {
|
||||
bail!("datastore '{}' already exists.", name);
|
||||
if let Some(_) = config.sections.get(&datastore.name) {
|
||||
bail!("datastore '{}' already exists.", datastore.name);
|
||||
}
|
||||
|
||||
let path: PathBuf = datastore.path.clone().into();
|
||||
|
||||
let backup_user = crate::backup::backup_user()?;
|
||||
let _store = ChunkStore::create(&name, path, backup_user.uid, backup_user.gid)?;
|
||||
let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid)?;
|
||||
|
||||
config.set_data(&name, "datastore", &datastore)?;
|
||||
config.set_data(&datastore.name, "datastore", &datastore)?;
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
@ -87,14 +133,47 @@ pub fn create_datastore(name: String, param: Value) -> Result<(), Error> {
|
||||
description: "The datastore configuration (with config digest).",
|
||||
type: datastore::DataStoreConfig,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read a datastore configuration.
|
||||
pub fn read_datastore(name: String) -> Result<Value, Error> {
|
||||
pub fn read_datastore(
|
||||
name: String,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<DataStoreConfig, Error> {
|
||||
let (config, digest) = datastore::config()?;
|
||||
let mut data = config.lookup_json("datastore", &name)?;
|
||||
data.as_object_mut().unwrap()
|
||||
.insert("digest".into(), proxmox::tools::digest_to_hex(&digest).into());
|
||||
Ok(data)
|
||||
|
||||
let store_config = config.lookup("datastore", &name)?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(store_config)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the comment property.
|
||||
comment,
|
||||
/// Delete the garbage collection schedule.
|
||||
gc_schedule,
|
||||
/// Delete the prune job schedule.
|
||||
prune_schedule,
|
||||
/// Delete the keep-last property
|
||||
keep_last,
|
||||
/// Delete the keep-hourly property
|
||||
keep_hourly,
|
||||
/// Delete the keep-daily property
|
||||
keep_daily,
|
||||
/// Delete the keep-weekly property
|
||||
keep_weekly,
|
||||
/// Delete the keep-monthly property
|
||||
keep_monthly,
|
||||
/// Delete the keep-yearly property
|
||||
keep_yearly,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -108,17 +187,69 @@ pub fn read_datastore(name: String) -> Result<Value, Error> {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"gc-schedule": {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"prune-schedule": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
},
|
||||
"keep-hourly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_HOURLY,
|
||||
},
|
||||
"keep-daily": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_DAILY,
|
||||
},
|
||||
"keep-weekly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||
},
|
||||
"keep-monthly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||
},
|
||||
"keep-yearly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create new datastore config.
|
||||
/// Update datastore config.
|
||||
pub fn update_datastore(
|
||||
name: String,
|
||||
comment: Option<String>,
|
||||
gc_schedule: Option<String>,
|
||||
prune_schedule: Option<String>,
|
||||
keep_last: Option<u64>,
|
||||
keep_hourly: Option<u64>,
|
||||
keep_daily: Option<u64>,
|
||||
keep_weekly: Option<u64>,
|
||||
keep_monthly: Option<u64>,
|
||||
keep_yearly: Option<u64>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
@ -134,6 +265,22 @@ pub fn update_datastore(
|
||||
|
||||
let mut data: datastore::DataStoreConfig = config.lookup("datastore", &name)?;
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
|
||||
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
|
||||
DeletableProperty::keep_last => { data.keep_last = None; },
|
||||
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
|
||||
DeletableProperty::keep_daily => { data.keep_daily = None; },
|
||||
DeletableProperty::keep_weekly => { data.keep_weekly = None; },
|
||||
DeletableProperty::keep_monthly => { data.keep_monthly = None; },
|
||||
DeletableProperty::keep_yearly => { data.keep_yearly = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
@ -143,6 +290,16 @@ pub fn update_datastore(
|
||||
}
|
||||
}
|
||||
|
||||
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
|
||||
if prune_schedule.is_some() { data.prune_schedule = prune_schedule; }
|
||||
|
||||
if keep_last.is_some() { data.keep_last = keep_last; }
|
||||
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
|
||||
if keep_daily.is_some() { data.keep_daily = keep_daily; }
|
||||
if keep_weekly.is_some() { data.keep_weekly = keep_weekly; }
|
||||
if keep_monthly.is_some() { data.keep_monthly = keep_monthly; }
|
||||
if keep_yearly.is_some() { data.keep_yearly = keep_yearly; }
|
||||
|
||||
config.set_data(&name, "datastore", &data)?;
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
@ -157,16 +314,27 @@ pub fn update_datastore(
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a datastore configuration.
|
||||
pub fn delete_datastore(name: String) -> Result<(), Error> {
|
||||
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
// fixme: locking ?
|
||||
// fixme: check digest ?
|
||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, _digest) = datastore::config()?;
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.sections.get(&name) {
|
||||
Some(_) => { config.sections.remove(&name); },
|
||||
|
@ -1,10 +1,12 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::remote;
|
||||
use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -37,6 +39,9 @@ use crate::config::remote;
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["remote"], PRIV_REMOTE_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List all remotes
|
||||
pub fn list_remotes(
|
||||
@ -78,6 +83,9 @@ pub fn list_remotes(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["remote"], PRIV_REMOTE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create new remote.
|
||||
pub fn create_remote(name: String, param: Value) -> Result<(), Error> {
|
||||
@ -111,6 +119,9 @@ pub fn create_remote(name: String, param: Value) -> Result<(), Error> {
|
||||
description: "The remote configuration (with config digest).",
|
||||
type: remote::Remote,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
|
||||
}
|
||||
)]
|
||||
/// Read remote configuration data.
|
||||
pub fn read_remote(name: String) -> Result<Value, Error> {
|
||||
@ -121,6 +132,17 @@ pub fn read_remote(name: String) -> Result<Value, Error> {
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the comment property.
|
||||
comment,
|
||||
/// Delete the fingerprint property.
|
||||
fingerprint,
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
@ -148,12 +170,23 @@ pub fn read_remote(name: String) -> Result<Value, Error> {
|
||||
optional: true,
|
||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update remote configuration.
|
||||
pub fn update_remote(
|
||||
@ -163,6 +196,7 @@ pub fn update_remote(
|
||||
userid: Option<String>,
|
||||
password: Option<String>,
|
||||
fingerprint: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
@ -177,6 +211,15 @@ pub fn update_remote(
|
||||
|
||||
let mut data: remote::Remote = config.lookup("remote", &name)?;
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
DeletableProperty::fingerprint => { data.fingerprint = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
@ -189,7 +232,6 @@ pub fn update_remote(
|
||||
if let Some(userid) = userid { data.userid = userid; }
|
||||
if let Some(password) = password { data.password = password; }
|
||||
|
||||
// fixme: howto delete a fingeprint?
|
||||
if let Some(fingerprint) = fingerprint { data.fingerprint = Some(fingerprint); }
|
||||
|
||||
config.set_data(&name, "remote", &data)?;
|
||||
@ -208,6 +250,9 @@ pub fn update_remote(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a remote from the configuration file.
|
||||
pub fn delete_remote(name: String) -> Result<(), Error> {
|
||||
|
277
src/api2/config/sync.rs
Normal file
277
src/api2/config/sync.rs
Normal file
@ -0,0 +1,277 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::sync::{self, SyncJobConfig};
|
||||
|
||||
// fixme: add access permissions
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
},
|
||||
returns: {
|
||||
description: "List configured jobs.",
|
||||
type: Array,
|
||||
items: { type: sync::SyncJobConfig },
|
||||
},
|
||||
)]
|
||||
/// List all sync jobs
|
||||
pub fn list_sync_jobs(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<SyncJobConfig>, Error> {
|
||||
|
||||
let (config, digest) = sync::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("sync")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
remote: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Create a new sync job.
|
||||
pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
||||
|
||||
let (mut config, _digest) = sync::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&sync_job.id) {
|
||||
bail!("job '{}' already exists.", sync_job.id);
|
||||
}
|
||||
|
||||
config.set_data(&sync_job.id, "sync", &sync_job)?;
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "The sync job configuration.",
|
||||
type: sync::SyncJobConfig,
|
||||
},
|
||||
)]
|
||||
/// Read a sync job configuration.
|
||||
pub fn read_sync_job(
|
||||
id: String,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<SyncJobConfig, Error> {
|
||||
let (config, digest) = sync::config()?;
|
||||
|
||||
let sync_job = config.lookup("sync", &id)?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(sync_job)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the comment property.
|
||||
comment,
|
||||
/// Delete the job schedule.
|
||||
schedule,
|
||||
/// Delete the remove-vanished flag.
|
||||
remove_vanished,
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
remote: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Update sync job config.
|
||||
pub fn update_sync_job(
|
||||
id: String,
|
||||
store: Option<String>,
|
||||
remote: Option<String>,
|
||||
remote_store: Option<String>,
|
||||
remove_vanished: Option<bool>,
|
||||
comment: Option<String>,
|
||||
schedule: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = sync::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let mut data: sync::SyncJobConfig = config.lookup("sync", &id)?;
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
DeletableProperty::schedule => { data.schedule = None; },
|
||||
DeletableProperty::remove_vanished => { data.remove_vanished = None; },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
data.comment = None;
|
||||
} else {
|
||||
data.comment = Some(comment);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(store) = store { data.store = store; }
|
||||
if let Some(remote) = remote { data.remote = remote; }
|
||||
if let Some(remote_store) = remote_store { data.remote_store = remote_store; }
|
||||
|
||||
|
||||
if schedule.is_some() { data.schedule = schedule; }
|
||||
if remove_vanished.is_some() { data.remove_vanished = remove_vanished; }
|
||||
|
||||
config.set_data(&id, "sync", &data)?;
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Remove a sync job configuration
|
||||
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = sync::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.sections.get(&id) {
|
||||
Some(_) => { config.sections.remove(&id); },
|
||||
None => bail!("job '{}' does not exist.", id),
|
||||
}
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_SYNC_JOB)
|
||||
.put(&API_METHOD_UPDATE_SYNC_JOB)
|
||||
.delete(&API_METHOD_DELETE_SYNC_JOB);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_SYNC_JOBS)
|
||||
.post(&API_METHOD_CREATE_SYNC_JOB)
|
||||
.match_all("name", &ITEM_ROUTER);
|
@ -3,17 +3,19 @@ use proxmox::list_subdirs_api_method;
|
||||
|
||||
pub mod tasks;
|
||||
mod time;
|
||||
mod network;
|
||||
pub mod network;
|
||||
pub mod dns;
|
||||
mod syslog;
|
||||
mod journal;
|
||||
mod services;
|
||||
mod status;
|
||||
mod rrd;
|
||||
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("dns", &dns::ROUTER),
|
||||
("journal", &journal::ROUTER),
|
||||
("network", &network::ROUTER),
|
||||
("rrd", &rrd::ROUTER),
|
||||
("services", &services::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
("syslog", &syslog::ROUTER),
|
||||
|
@ -1,21 +1,34 @@
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use lazy_static::lazy_static;
|
||||
use openssl::sha;
|
||||
use regex::Regex;
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
|
||||
static RESOLV_CONF_FN: &str = "/etc/resolv.conf";
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete first nameserver entry
|
||||
dns1,
|
||||
/// Delete second nameserver entry
|
||||
dns2,
|
||||
/// Delete third nameserver entry
|
||||
dns3,
|
||||
}
|
||||
|
||||
pub fn read_etc_resolv_conf() -> Result<Value, Error> {
|
||||
|
||||
let mut result = json!({});
|
||||
@ -34,6 +47,8 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
|
||||
concat!(r"^\s*nameserver\s+(", IPRE!(), r")\s*")).unwrap();
|
||||
}
|
||||
|
||||
let mut options = String::new();
|
||||
|
||||
for line in data.lines() {
|
||||
|
||||
if let Some(caps) = DOMAIN_REGEX.captures(&line) {
|
||||
@ -44,16 +59,69 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
|
||||
let nameserver = &caps[1];
|
||||
let id = format!("dns{}", nscount);
|
||||
result[id] = Value::from(nameserver);
|
||||
} else {
|
||||
if !options.is_empty() { options.push('\n'); }
|
||||
options.push_str(line);
|
||||
}
|
||||
}
|
||||
|
||||
if !options.is_empty() {
|
||||
result["options"] = options.into();
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn update_dns(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
description: "Update DNS settings.",
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
search: {
|
||||
schema: SEARCH_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
dns1: {
|
||||
optional: true,
|
||||
schema: FIRST_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
dns2: {
|
||||
optional: true,
|
||||
schema: SECOND_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
dns3: {
|
||||
optional: true,
|
||||
schema: THIRD_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "dns"], PRIV_SYS_MODIFY, false),
|
||||
}
|
||||
)]
|
||||
/// Update DNS settings
|
||||
pub fn update_dns(
|
||||
search: Option<String>,
|
||||
dns1: Option<String>,
|
||||
dns2: Option<String>,
|
||||
dns3: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
lazy_static! {
|
||||
@ -62,33 +130,41 @@ fn update_dns(
|
||||
|
||||
let _guard = MUTEX.lock();
|
||||
|
||||
let search = crate::tools::required_string_param(¶m, "search")?;
|
||||
let mut config = read_etc_resolv_conf()?;
|
||||
let old_digest = config["digest"].as_str().unwrap();
|
||||
|
||||
let raw = file_get_contents(RESOLV_CONF_FN)?;
|
||||
let old_digest = proxmox::tools::digest_to_hex(&sha::sha256(&raw));
|
||||
|
||||
if let Some(digest) = param["digest"].as_str() {
|
||||
crate::tools::assert_if_modified(&old_digest, &digest)?;
|
||||
if let Some(digest) = digest {
|
||||
crate::tools::assert_if_modified(old_digest, &digest)?;
|
||||
}
|
||||
|
||||
let old_data = String::from_utf8(raw)?;
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
let config = config.as_object_mut().unwrap();
|
||||
match delete_prop {
|
||||
DeletableProperty::dns1 => { config.remove("dns1"); },
|
||||
DeletableProperty::dns2 => { config.remove("dns2"); },
|
||||
DeletableProperty::dns3 => { config.remove("dns3"); },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut data = format!("search {}\n", search);
|
||||
if let Some(search) = search { config["search"] = search.into(); }
|
||||
if let Some(dns1) = dns1 { config["dns1"] = dns1.into(); }
|
||||
if let Some(dns2) = dns2 { config["dns2"] = dns2.into(); }
|
||||
if let Some(dns3) = dns3 { config["dns3"] = dns3.into(); }
|
||||
|
||||
let mut data = String::new();
|
||||
|
||||
if let Some(search) = config["search"].as_str() {
|
||||
data.push_str(&format!("search {}\n", search));
|
||||
}
|
||||
for opt in &["dns1", "dns2", "dns3"] {
|
||||
if let Some(server) = param[opt].as_str() {
|
||||
if let Some(server) = config[opt].as_str() {
|
||||
data.push_str(&format!("nameserver {}\n", server));
|
||||
}
|
||||
}
|
||||
|
||||
// append other data
|
||||
lazy_static! {
|
||||
static ref SKIP_REGEX: Regex = Regex::new(r"^(search|domain|nameserver)\s+").unwrap();
|
||||
}
|
||||
for line in old_data.lines() {
|
||||
if SKIP_REGEX.is_match(line) { continue; }
|
||||
data.push_str(line);
|
||||
data.push('\n');
|
||||
if let Some(options) = config["options"].as_str() {
|
||||
data.push_str(options);
|
||||
}
|
||||
|
||||
replace_file(RESOLV_CONF_FN, data.as_bytes(), CreateOptions::new())?;
|
||||
@ -96,7 +172,45 @@ fn update_dns(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn get_dns(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Returns DNS server IPs and sreach domain.",
|
||||
type: Object,
|
||||
properties: {
|
||||
digest: {
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
search: {
|
||||
optional: true,
|
||||
schema: SEARCH_DOMAIN_SCHEMA,
|
||||
},
|
||||
dns1: {
|
||||
optional: true,
|
||||
schema: FIRST_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
dns2: {
|
||||
optional: true,
|
||||
schema: SECOND_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
dns3: {
|
||||
optional: true,
|
||||
schema: THIRD_DNS_SERVER_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "dns"], PRIV_SYS_AUDIT, false),
|
||||
}
|
||||
)]
|
||||
/// Read DNS settings.
|
||||
pub fn get_dns(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -105,41 +219,6 @@ fn get_dns(
|
||||
read_etc_resolv_conf()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_dns),
|
||||
&ObjectSchema::new(
|
||||
"Read DNS settings.",
|
||||
&sorted!([ ("node", false, &NODE_SCHEMA) ]),
|
||||
)
|
||||
).returns(
|
||||
&ObjectSchema::new(
|
||||
"Returns DNS server IPs and sreach domain.",
|
||||
&sorted!([
|
||||
("digest", false, &PROXMOX_CONFIG_DIGEST_SCHEMA),
|
||||
("search", true, &SEARCH_DOMAIN_SCHEMA),
|
||||
("dns1", true, &FIRST_DNS_SERVER_SCHEMA),
|
||||
("dns2", true, &SECOND_DNS_SERVER_SCHEMA),
|
||||
("dns3", true, &THIRD_DNS_SERVER_SCHEMA),
|
||||
]),
|
||||
).schema()
|
||||
)
|
||||
)
|
||||
.put(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&update_dns),
|
||||
&ObjectSchema::new(
|
||||
"Returns DNS server IPs and sreach domain.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("search", false, &SEARCH_DOMAIN_SCHEMA),
|
||||
("dns1", true, &FIRST_DNS_SERVER_SCHEMA),
|
||||
("dns2", true, &SECOND_DNS_SERVER_SCHEMA),
|
||||
("dns3", true, &THIRD_DNS_SERVER_SCHEMA),
|
||||
("digest", true, &PROXMOX_CONFIG_DIGEST_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
);
|
||||
.get(&API_METHOD_GET_DNS)
|
||||
.put(&API_METHOD_UPDATE_DNS);
|
||||
|
@ -1,12 +1,13 @@
|
||||
use std::process::{Command, Stdio};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
use std::io::{BufRead,BufReader};
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
@ -53,6 +54,9 @@ use crate::api2::types::*;
|
||||
description: "Line text.",
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "log"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read syslog entries.
|
||||
fn get_journal(
|
||||
|
@ -1,28 +1,671 @@
|
||||
use failure::*;
|
||||
use serde_json::{json, Value};
|
||||
use anyhow::{Error, bail};
|
||||
use serde_json::{Value, to_value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::schema::ObjectSchema;
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::parse_property_string;
|
||||
|
||||
use crate::config::network::{self, NetworkConfig};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::api2::types::*;
|
||||
use crate::server::{WorkerTask};
|
||||
|
||||
fn get_network_config(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
Ok(json!({}))
|
||||
fn split_interface_list(list: &str) -> Result<Vec<String>, Error> {
|
||||
let value = parse_property_string(&list, &NETWORK_INTERFACE_ARRAY_SCHEMA)?;
|
||||
Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect())
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_network_config),
|
||||
&ObjectSchema::new(
|
||||
"Read network configuration.",
|
||||
&[ ("node", false, &NODE_SCHEMA) ],
|
||||
)
|
||||
)
|
||||
);
|
||||
fn check_duplicate_gateway_v4(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
|
||||
|
||||
let current_gateway_v4 = config.interfaces.iter()
|
||||
.find(|(_, interface)| interface.gateway.is_some())
|
||||
.map(|(name, _)| name.to_string());
|
||||
|
||||
if let Some(current_gateway_v4) = current_gateway_v4 {
|
||||
if current_gateway_v4 != iface {
|
||||
bail!("Default IPv4 gateway already exists on interface '{}'", current_gateway_v4);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_duplicate_gateway_v6(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
|
||||
|
||||
let current_gateway_v6 = config.interfaces.iter()
|
||||
.find(|(_, interface)| interface.gateway6.is_some())
|
||||
.map(|(name, _)| name.to_string());
|
||||
|
||||
if let Some(current_gateway_v6) = current_gateway_v6 {
|
||||
if current_gateway_v6 != iface {
|
||||
bail!("Default IPv6 gateway already exists on interface '{}'", current_gateway_v6);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "List network devices (with config digest).",
|
||||
type: Array,
|
||||
items: {
|
||||
type: Interface,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List all datastores
|
||||
pub fn list_network_devices(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, digest) = network::config()?;
|
||||
let digest = proxmox::tools::digest_to_hex(&digest);
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (iface, interface) in config.interfaces.iter() {
|
||||
if iface == "lo" { continue; } // do not list lo
|
||||
let mut item: Value = to_value(interface)?;
|
||||
item["digest"] = digest.clone().into();
|
||||
item["iface"] = iface.to_string().into();
|
||||
list.push(item);
|
||||
}
|
||||
|
||||
let diff = network::changes()?;
|
||||
if !diff.is_empty() {
|
||||
rpcenv["changes"] = diff.into();
|
||||
}
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
iface: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "The network interface configuration (with config digest).",
|
||||
type: Interface,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces", "{name}"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read a network interface configuration.
|
||||
pub fn read_interface(iface: String) -> Result<Value, Error> {
|
||||
|
||||
let (config, digest) = network::config()?;
|
||||
|
||||
let interface = config.lookup(&iface)?;
|
||||
|
||||
let mut data: Value = to_value(interface)?;
|
||||
data["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
iface: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
description: "Interface type.",
|
||||
type: NetworkInterfaceType,
|
||||
optional: true,
|
||||
},
|
||||
autostart: {
|
||||
description: "Autostart interface.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
method: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
method6: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
comments: {
|
||||
description: "Comments (inet, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
comments6: {
|
||||
description: "Comments (inet5, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
cidr: {
|
||||
schema: CIDR_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
cidr6: {
|
||||
schema: CIDR_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway: {
|
||||
schema: IP_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway6: {
|
||||
schema: IP_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
mtu: {
|
||||
description: "Maximum Transmission Unit.",
|
||||
optional: true,
|
||||
minimum: 46,
|
||||
maximum: 65535,
|
||||
default: 1500,
|
||||
},
|
||||
bridge_ports: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bridge_vlan_aware: {
|
||||
description: "Enable bridge vlan support.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces", "{iface}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create network interface configuration.
|
||||
pub fn create_interface(
|
||||
iface: String,
|
||||
autostart: Option<bool>,
|
||||
method: Option<NetworkConfigMethod>,
|
||||
method6: Option<NetworkConfigMethod>,
|
||||
comments: Option<String>,
|
||||
comments6: Option<String>,
|
||||
cidr: Option<String>,
|
||||
gateway: Option<String>,
|
||||
cidr6: Option<String>,
|
||||
gateway6: Option<String>,
|
||||
mtu: Option<u64>,
|
||||
bridge_ports: Option<String>,
|
||||
bridge_vlan_aware: Option<bool>,
|
||||
bond_mode: Option<LinuxBondMode>,
|
||||
slaves: Option<String>,
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let interface_type = crate::tools::required_string_param(¶m, "type")?;
|
||||
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, _digest) = network::config()?;
|
||||
|
||||
if config.interfaces.contains_key(&iface) {
|
||||
bail!("interface '{}' already exists", iface);
|
||||
}
|
||||
|
||||
let mut interface = Interface::new(iface.clone());
|
||||
interface.interface_type = interface_type;
|
||||
|
||||
if let Some(autostart) = autostart { interface.autostart = autostart; }
|
||||
if method.is_some() { interface.method = method; }
|
||||
if method6.is_some() { interface.method6 = method6; }
|
||||
if mtu.is_some() { interface.mtu = mtu; }
|
||||
if comments.is_some() { interface.comments = comments; }
|
||||
if comments6.is_some() { interface.comments6 = comments6; }
|
||||
|
||||
if let Some(cidr) = cidr {
|
||||
let (_, _, is_v6) = network::parse_cidr(&cidr)?;
|
||||
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
|
||||
interface.cidr = Some(cidr);
|
||||
}
|
||||
|
||||
if let Some(cidr6) = cidr6 {
|
||||
let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
|
||||
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
|
||||
interface.cidr6 = Some(cidr6);
|
||||
}
|
||||
|
||||
if let Some(gateway) = gateway {
|
||||
let is_v6 = gateway.contains(':');
|
||||
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
|
||||
check_duplicate_gateway_v4(&config, &iface)?;
|
||||
interface.gateway = Some(gateway);
|
||||
}
|
||||
|
||||
if let Some(gateway6) = gateway6 {
|
||||
let is_v6 = gateway6.contains(':');
|
||||
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
|
||||
check_duplicate_gateway_v6(&config, &iface)?;
|
||||
interface.gateway6 = Some(gateway6);
|
||||
}
|
||||
|
||||
match interface_type {
|
||||
NetworkInterfaceType::Bridge => {
|
||||
if let Some(ports) = bridge_ports {
|
||||
let ports = split_interface_list(&ports)?;
|
||||
interface.set_bridge_ports(ports)?;
|
||||
}
|
||||
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
|
||||
}
|
||||
NetworkInterfaceType::Bond => {
|
||||
if bond_mode.is_some() { interface.bond_mode = bond_mode; }
|
||||
if let Some(slaves) = slaves {
|
||||
let slaves = split_interface_list(&slaves)?;
|
||||
interface.set_bond_slaves(slaves)?;
|
||||
}
|
||||
}
|
||||
_ => bail!("creating network interface type '{:?}' is not supported", interface_type),
|
||||
}
|
||||
|
||||
if interface.cidr.is_some() || interface.gateway.is_some() {
|
||||
interface.method = Some(NetworkConfigMethod::Static);
|
||||
} else if interface.method.is_none() {
|
||||
interface.method = Some(NetworkConfigMethod::Manual);
|
||||
}
|
||||
|
||||
if interface.cidr6.is_some() || interface.gateway6.is_some() {
|
||||
interface.method6 = Some(NetworkConfigMethod::Static);
|
||||
} else if interface.method6.is_none() {
|
||||
interface.method6 = Some(NetworkConfigMethod::Manual);
|
||||
}
|
||||
|
||||
config.interfaces.insert(iface, interface);
|
||||
|
||||
network::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[allow(non_camel_case_types)]
|
||||
/// Deletable property name
|
||||
pub enum DeletableProperty {
|
||||
/// Delete the IPv4 address property.
|
||||
cidr,
|
||||
/// Delete the IPv6 address property.
|
||||
cidr6,
|
||||
/// Delete the IPv4 gateway property.
|
||||
gateway,
|
||||
/// Delete the IPv6 gateway property.
|
||||
gateway6,
|
||||
/// Delete the whole IPv4 configuration entry.
|
||||
method,
|
||||
/// Delete the whole IPv6 configuration entry.
|
||||
method6,
|
||||
/// Delete IPv4 comments
|
||||
comments,
|
||||
/// Delete IPv6 comments
|
||||
comments6,
|
||||
/// Delete mtu.
|
||||
mtu,
|
||||
/// Delete autostart flag
|
||||
autostart,
|
||||
/// Delete bridge ports (set to 'none')
|
||||
bridge_ports,
|
||||
/// Delet bridge-vlan-aware flag
|
||||
bridge_vlan_aware,
|
||||
/// Delete bond-slaves (set to 'none')
|
||||
slaves,
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
iface: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
description: "Interface type. If specified, need to match the current type.",
|
||||
type: NetworkInterfaceType,
|
||||
optional: true,
|
||||
},
|
||||
autostart: {
|
||||
description: "Autostart interface.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
method: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
method6: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
comments: {
|
||||
description: "Comments (inet, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
comments6: {
|
||||
description: "Comments (inet5, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
cidr: {
|
||||
schema: CIDR_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
cidr6: {
|
||||
schema: CIDR_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway: {
|
||||
schema: IP_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway6: {
|
||||
schema: IP_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
mtu: {
|
||||
description: "Maximum Transmission Unit.",
|
||||
optional: true,
|
||||
minimum: 46,
|
||||
maximum: 65535,
|
||||
default: 1500,
|
||||
},
|
||||
bridge_ports: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bridge_vlan_aware: {
|
||||
description: "Enable bridge vlan support.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
type: Array,
|
||||
optional: true,
|
||||
items: {
|
||||
type: DeletableProperty,
|
||||
}
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces", "{iface}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update network interface config.
|
||||
pub fn update_interface(
|
||||
iface: String,
|
||||
autostart: Option<bool>,
|
||||
method: Option<NetworkConfigMethod>,
|
||||
method6: Option<NetworkConfigMethod>,
|
||||
comments: Option<String>,
|
||||
comments6: Option<String>,
|
||||
cidr: Option<String>,
|
||||
gateway: Option<String>,
|
||||
cidr6: Option<String>,
|
||||
gateway6: Option<String>,
|
||||
mtu: Option<u64>,
|
||||
bridge_ports: Option<String>,
|
||||
bridge_vlan_aware: Option<bool>,
|
||||
bond_mode: Option<LinuxBondMode>,
|
||||
slaves: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = network::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
if gateway.is_some() { check_duplicate_gateway_v4(&config, &iface)?; }
|
||||
if gateway6.is_some() { check_duplicate_gateway_v6(&config, &iface)?; }
|
||||
|
||||
let interface = config.lookup_mut(&iface)?;
|
||||
|
||||
if let Some(interface_type) = param.get("type") {
|
||||
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.clone())?;
|
||||
if interface_type != interface.interface_type {
|
||||
bail!("got unexpected interface type ({:?} != {:?})", interface_type, interface.interface_type);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletableProperty::cidr => { interface.cidr = None; },
|
||||
DeletableProperty::cidr6 => { interface.cidr6 = None; },
|
||||
DeletableProperty::gateway => { interface.gateway = None; },
|
||||
DeletableProperty::gateway6 => { interface.gateway6 = None; },
|
||||
DeletableProperty::method => { interface.method = None; },
|
||||
DeletableProperty::method6 => { interface.method6 = None; },
|
||||
DeletableProperty::comments => { interface.comments = None; },
|
||||
DeletableProperty::comments6 => { interface.comments6 = None; },
|
||||
DeletableProperty::mtu => { interface.mtu = None; },
|
||||
DeletableProperty::autostart => { interface.autostart = false; },
|
||||
DeletableProperty::bridge_ports => { interface.set_bridge_ports(Vec::new())?; }
|
||||
DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; }
|
||||
DeletableProperty::slaves => { interface.set_bond_slaves(Vec::new())?; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(autostart) = autostart { interface.autostart = autostart; }
|
||||
if method.is_some() { interface.method = method; }
|
||||
if method6.is_some() { interface.method6 = method6; }
|
||||
if mtu.is_some() { interface.mtu = mtu; }
|
||||
if let Some(ports) = bridge_ports {
|
||||
let ports = split_interface_list(&ports)?;
|
||||
interface.set_bridge_ports(ports)?;
|
||||
}
|
||||
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
|
||||
if let Some(slaves) = slaves {
|
||||
let slaves = split_interface_list(&slaves)?;
|
||||
interface.set_bond_slaves(slaves)?;
|
||||
}
|
||||
if bond_mode.is_some() { interface.bond_mode = bond_mode; }
|
||||
|
||||
if let Some(cidr) = cidr {
|
||||
let (_, _, is_v6) = network::parse_cidr(&cidr)?;
|
||||
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
|
||||
interface.cidr = Some(cidr);
|
||||
}
|
||||
|
||||
if let Some(cidr6) = cidr6 {
|
||||
let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
|
||||
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
|
||||
interface.cidr6 = Some(cidr6);
|
||||
}
|
||||
|
||||
if let Some(gateway) = gateway {
|
||||
let is_v6 = gateway.contains(':');
|
||||
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
|
||||
interface.gateway = Some(gateway);
|
||||
}
|
||||
|
||||
if let Some(gateway6) = gateway6 {
|
||||
let is_v6 = gateway6.contains(':');
|
||||
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
|
||||
interface.gateway6 = Some(gateway6);
|
||||
}
|
||||
|
||||
if comments.is_some() { interface.comments = comments; }
|
||||
if comments6.is_some() { interface.comments6 = comments6; }
|
||||
|
||||
if interface.cidr.is_some() || interface.gateway.is_some() {
|
||||
interface.method = Some(NetworkConfigMethod::Static);
|
||||
} else {
|
||||
interface.method = Some(NetworkConfigMethod::Manual);
|
||||
}
|
||||
|
||||
if interface.cidr6.is_some() || interface.gateway6.is_some() {
|
||||
interface.method6 = Some(NetworkConfigMethod::Static);
|
||||
} else {
|
||||
interface.method6 = Some(NetworkConfigMethod::Manual);
|
||||
}
|
||||
|
||||
network::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
iface: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces", "{iface}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove network interface configuration.
|
||||
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = network::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let _interface = config.lookup(&iface)?; // check if interface exists
|
||||
|
||||
config.interfaces.remove(&iface);
|
||||
|
||||
network::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Reload network configuration (requires ifupdown2).
|
||||
pub async fn reload_network_config(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
network::assert_ifupdown2_installed()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), &username.clone(), true, |_worker| async {
|
||||
|
||||
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
|
||||
|
||||
network::network_reload()?;
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "network", "interfaces"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Revert network configuration (rm /etc/network/interfaces.new).
|
||||
pub fn revert_network_config() -> Result<(), Error> {
|
||||
|
||||
let _ = std::fs::remove_file(network::NETWORK_INTERFACES_NEW_FILENAME);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_READ_INTERFACE)
|
||||
.put(&API_METHOD_UPDATE_INTERFACE)
|
||||
.delete(&API_METHOD_DELETE_INTERFACE);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_NETWORK_DEVICES)
|
||||
.put(&API_METHOD_RELOAD_NETWORK_CONFIG)
|
||||
.post(&API_METHOD_CREATE_INTERFACE)
|
||||
.delete(&API_METHOD_REVERT_NETWORK_CONFIG)
|
||||
.match_all("iface", &ITEM_ROUTER);
|
||||
|
46
src/api2/node/rrd.rs
Normal file
46
src/api2/node/rrd.rs
Normal file
@ -0,0 +1,46 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, Router};
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
timeframe: {
|
||||
type: RRDTimeFrameResolution,
|
||||
},
|
||||
cf: {
|
||||
type: RRDMode,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Read node stats
|
||||
fn get_node_stats(
|
||||
timeframe: RRDTimeFrameResolution,
|
||||
cf: RRDMode,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
crate::rrd::extract_data(
|
||||
"host",
|
||||
&[
|
||||
"cpu", "iowait",
|
||||
"memtotal", "memused",
|
||||
"swaptotal", "swapused",
|
||||
"netin", "netout",
|
||||
"roottotal", "rootused",
|
||||
"loadavg",
|
||||
],
|
||||
timeframe,
|
||||
cf,
|
||||
)
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_GET_NODE_STATS);
|
@ -1,15 +1,15 @@
|
||||
use std::process::{Command, Stdio};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{sortable, identity, list_subdirs_api_method};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::tools;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
|
||||
static SERVICE_NAME_LIST: [&str; 7] = [
|
||||
"proxmox-backup",
|
||||
@ -91,11 +91,45 @@ fn json_service_state(service: &str, status: Value) -> Value {
|
||||
Value::Null
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Returns a list of systemd services.",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Service details.",
|
||||
properties: {
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
type: String,
|
||||
description: "systemd service name.",
|
||||
},
|
||||
desc: {
|
||||
type: String,
|
||||
description: "systemd service description.",
|
||||
},
|
||||
state: {
|
||||
type: String,
|
||||
description: "systemd service 'SubState'.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Service list.
|
||||
fn list_services(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut list = vec![];
|
||||
@ -115,21 +149,36 @@ fn list_services(
|
||||
Ok(Value::from(list))
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read service properties.
|
||||
fn get_service_state(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
service: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let service = tools::required_string_param(¶m, "service")?;
|
||||
let service = service.as_str();
|
||||
|
||||
if !SERVICE_NAME_LIST.contains(&service) {
|
||||
bail!("unknown service name '{}'", service);
|
||||
}
|
||||
|
||||
let status = get_full_service_state(service)?;
|
||||
let status = get_full_service_state(&service)?;
|
||||
|
||||
Ok(json_service_state(service, status))
|
||||
Ok(json_service_state(&service, status))
|
||||
}
|
||||
|
||||
fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
@ -158,61 +207,117 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Start service.
|
||||
fn start_service(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
service: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let service = tools::required_string_param(¶m, "service")?;
|
||||
|
||||
log::info!("starting service {}", service);
|
||||
|
||||
run_service_command(service, "start")
|
||||
run_service_command(&service, "start")
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Stop service.
|
||||
fn stop_service(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
service: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let service = tools::required_string_param(¶m, "service")?;
|
||||
|
||||
log::info!("stoping service {}", service);
|
||||
|
||||
run_service_command(service, "stop")
|
||||
run_service_command(&service, "stop")
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Retart service.
|
||||
fn restart_service(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
service: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let service = tools::required_string_param(¶m, "service")?;
|
||||
|
||||
log::info!("re-starting service {}", service);
|
||||
|
||||
if service == "proxmox-backup-proxy" {
|
||||
if &service == "proxmox-backup-proxy" {
|
||||
// special case, avoid aborting running tasks
|
||||
run_service_command(service, "reload")
|
||||
run_service_command(&service, "reload")
|
||||
} else {
|
||||
run_service_command(service, "restart")
|
||||
run_service_command(&service, "restart")
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
service: {
|
||||
schema: SERVICE_ID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Reload service.
|
||||
fn reload_service(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
service: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let service = tools::required_string_param(¶m, "service")?;
|
||||
|
||||
log::info!("reloading service {}", service);
|
||||
|
||||
run_service_command(service, "reload")
|
||||
run_service_command(&service, "reload")
|
||||
}
|
||||
|
||||
|
||||
@ -221,111 +326,33 @@ const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.")
|
||||
.schema();
|
||||
|
||||
#[sortable]
|
||||
const SERVICE_SUBDIRS: SubdirMap = &[
|
||||
const SERVICE_SUBDIRS: SubdirMap = &sorted!([
|
||||
(
|
||||
"reload", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&reload_service),
|
||||
&ObjectSchema::new(
|
||||
"Reload service.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.post(&API_METHOD_RELOAD_SERVICE)
|
||||
),
|
||||
(
|
||||
"restart", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&restart_service),
|
||||
&ObjectSchema::new(
|
||||
"Restart service.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.post(&API_METHOD_RESTART_SERVICE)
|
||||
),
|
||||
(
|
||||
"start", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&start_service),
|
||||
&ObjectSchema::new(
|
||||
"Start service.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.post(&API_METHOD_START_SERVICE)
|
||||
),
|
||||
(
|
||||
"state", &Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_service_state),
|
||||
&ObjectSchema::new(
|
||||
"Read service properties.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
)
|
||||
)
|
||||
.get(&API_METHOD_GET_SERVICE_STATE)
|
||||
),
|
||||
(
|
||||
"stop", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&stop_service),
|
||||
&ObjectSchema::new(
|
||||
"Stop service.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.post(&API_METHOD_STOP_SERVICE)
|
||||
),
|
||||
];
|
||||
]);
|
||||
|
||||
const SERVICE_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SERVICE_SUBDIRS))
|
||||
.subdirs(SERVICE_SUBDIRS);
|
||||
|
||||
#[sortable]
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&list_services),
|
||||
&ObjectSchema::new(
|
||||
"Service list.",
|
||||
&sorted!([ ("node", false, &NODE_SCHEMA) ]),
|
||||
)
|
||||
).returns(
|
||||
&ArraySchema::new(
|
||||
"Returns a list of systemd services.",
|
||||
&ObjectSchema::new(
|
||||
"Service details.",
|
||||
&sorted!([
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
("name", false, &StringSchema::new("systemd service name.").schema()),
|
||||
("desc", false, &StringSchema::new("systemd service description.").schema()),
|
||||
("state", false, &StringSchema::new("systemd service 'SubState'.").schema()),
|
||||
]),
|
||||
).schema()
|
||||
).schema()
|
||||
)
|
||||
)
|
||||
.get(&API_METHOD_LIST_SERVICES)
|
||||
.match_all("service", &SERVICE_ROUTER);
|
||||
|
||||
|
@ -1,12 +1,14 @@
|
||||
use failure::*;
|
||||
use std::process::Command;
|
||||
|
||||
use anyhow::{Error, format_err, bail};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::sys::linux::procfs;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, SubdirMap};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -44,7 +46,10 @@ use crate::api2::types::*;
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read node memory, CPU and (root) disk usage
|
||||
fn get_usage(
|
||||
@ -66,12 +71,49 @@ fn get_usage(
|
||||
}))
|
||||
}
|
||||
|
||||
pub const USAGE_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_GET_USAGE);
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
command: {
|
||||
type: NodePowerCommand,
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_POWER_MANAGEMENT, false),
|
||||
},
|
||||
)]
|
||||
/// Reboot or shutdown the node.
|
||||
fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
|
||||
|
||||
let systemctl_command = match command {
|
||||
NodePowerCommand::Reboot => "reboot",
|
||||
NodePowerCommand::Shutdown => "poweroff",
|
||||
};
|
||||
|
||||
let output = Command::new("/bin/systemctl")
|
||||
.arg(systemctl_command)
|
||||
.output()
|
||||
.map_err(|err| format_err!("failed to execute systemctl - {}", err))?;
|
||||
|
||||
if !output.status.success() {
|
||||
match output.status.code() {
|
||||
Some(code) => {
|
||||
let msg = String::from_utf8(output.stderr)
|
||||
.map(|m| if m.is_empty() { String::from("no error message") } else { m })
|
||||
.unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
|
||||
bail!("diff failed with status code: {} - {}", code, msg);
|
||||
}
|
||||
None => bail!("systemctl terminated by signal"),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("usage", &USAGE_ROUTER),
|
||||
];
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
||||
.get(&API_METHOD_GET_USAGE)
|
||||
.post(&API_METHOD_REBOOT_OR_SHUTDOWN);
|
||||
|
@ -1,11 +1,12 @@
|
||||
use std::process::{Command, Stdio};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
|
||||
fn dump_journal(
|
||||
start: Option<u64>,
|
||||
@ -122,12 +123,15 @@ fn dump_journal(
|
||||
}
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "log"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read syslog entries.
|
||||
fn get_syslog(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (count, lines) = dump_journal(
|
||||
@ -137,7 +141,7 @@ fn get_syslog(
|
||||
param["until"].as_str(),
|
||||
param["service"].as_str())?;
|
||||
|
||||
rpcenv.set_result_attrib("total", Value::from(count));
|
||||
rpcenv["total"] = Value::from(count);
|
||||
|
||||
Ok(json!(lines))
|
||||
}
|
||||
|
@ -1,26 +1,96 @@
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission, UserInformation};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::{identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::tools;
|
||||
use crate::api2::types::*;
|
||||
use crate::server::{self, UPID};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
fn get_task_status(
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Task status nformation.",
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
pid: {
|
||||
type: i64,
|
||||
description: "The Unix PID.",
|
||||
},
|
||||
pstart: {
|
||||
type: u64,
|
||||
description: "The Unix process start time from `/proc/pid/stat`",
|
||||
},
|
||||
starttime: {
|
||||
type: i64,
|
||||
description: "The task start time (Epoch)",
|
||||
},
|
||||
"type": {
|
||||
type: String,
|
||||
description: "Worker type (arbitrary ASCII string)",
|
||||
},
|
||||
id: {
|
||||
type: String,
|
||||
optional: true,
|
||||
description: "Worker ID (arbitrary ASCII string)",
|
||||
},
|
||||
user: {
|
||||
type: String,
|
||||
description: "The user who started the task.",
|
||||
},
|
||||
status: {
|
||||
type: String,
|
||||
description: "'running' or 'stopped'",
|
||||
},
|
||||
exitstatus: {
|
||||
type: String,
|
||||
optional: true,
|
||||
description: "'OK', 'Error: <msg>', or 'unkwown'.",
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Users can access there own tasks, or need Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Get task status.
|
||||
async fn get_task_status(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
if username != upid.username {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
}
|
||||
|
||||
let mut result = json!({
|
||||
"upid": param["upid"],
|
||||
"node": upid.node,
|
||||
@ -32,7 +102,7 @@ fn get_task_status(
|
||||
"user": upid.username,
|
||||
});
|
||||
|
||||
if crate::server::worker_is_active(&upid) {
|
||||
if crate::server::worker_is_active(&upid).await? {
|
||||
result["status"] = Value::from("running");
|
||||
} else {
|
||||
let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(String::from("unknown"));
|
||||
@ -50,14 +120,54 @@ fn extract_upid(param: &Value) -> Result<UPID, Error> {
|
||||
upid_str.parse::<UPID>()
|
||||
}
|
||||
|
||||
fn read_task_log(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
"test-status": {
|
||||
type: bool,
|
||||
optional: true,
|
||||
description: "Test task status, and set result attribute \"active\" accordingly.",
|
||||
},
|
||||
start: {
|
||||
type: u64,
|
||||
optional: true,
|
||||
description: "Start at this line.",
|
||||
default: 0,
|
||||
},
|
||||
limit: {
|
||||
type: u64,
|
||||
optional: true,
|
||||
description: "Only list this amount of lines.",
|
||||
default: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Users can access there own tasks, or need Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Read task log.
|
||||
async fn read_task_log(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
if username != upid.username {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
}
|
||||
|
||||
let test_status = param["test-status"].as_bool().unwrap_or(false);
|
||||
|
||||
let start = param["start"].as_u64().unwrap_or(0);
|
||||
@ -89,28 +199,50 @@ fn read_task_log(
|
||||
}
|
||||
}
|
||||
|
||||
rpcenv.set_result_attrib("total", Value::from(count));
|
||||
rpcenv["total"] = Value::from(count);
|
||||
|
||||
if test_status {
|
||||
let active = crate::server::worker_is_active(&upid);
|
||||
rpcenv.set_result_attrib("active", Value::from(active));
|
||||
let active = crate::server::worker_is_active(&upid).await?;
|
||||
rpcenv["active"] = Value::from(active);
|
||||
}
|
||||
|
||||
Ok(json!(lines))
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
description: "Users can stop there own tasks, or need Sys.Modify on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Try to stop a task.
|
||||
fn stop_task(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
if crate::server::worker_is_active(&upid) {
|
||||
server::abort_worker_async(upid);
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
if username != upid.username {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
||||
}
|
||||
|
||||
server::abort_worker_async(upid);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
@ -140,11 +272,13 @@ fn stop_task(
|
||||
type: bool,
|
||||
description: "Only list running tasks.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
errors: {
|
||||
type: bool,
|
||||
description: "Only list erroneous tasks.",
|
||||
optional:true,
|
||||
default: false,
|
||||
},
|
||||
userfilter: {
|
||||
optional:true,
|
||||
@ -158,18 +292,26 @@ fn stop_task(
|
||||
type: Array,
|
||||
items: { type: TaskListItem },
|
||||
},
|
||||
access: {
|
||||
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List tasks.
|
||||
pub fn list_tasks(
|
||||
start: u64,
|
||||
limit: u64,
|
||||
errors: bool,
|
||||
running: bool,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let start = param["start"].as_u64().unwrap_or(0);
|
||||
let limit = param["limit"].as_u64().unwrap_or(50);
|
||||
let errors = param["errors"].as_bool().unwrap_or(false);
|
||||
let running = param["running"].as_bool().unwrap_or(false);
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
let store = param["store"].as_str();
|
||||
|
||||
@ -182,6 +324,8 @@ pub fn list_tasks(
|
||||
let mut count = 0;
|
||||
|
||||
for info in list.iter() {
|
||||
if !list_all && info.upid.username != username { continue; }
|
||||
|
||||
let mut entry = TaskListItem {
|
||||
upid: info.upid_str.clone(),
|
||||
node: "localhost".to_string(),
|
||||
@ -238,79 +382,28 @@ pub fn list_tasks(
|
||||
if (result.len() as u64) < limit { result.push(entry); };
|
||||
}
|
||||
|
||||
rpcenv.set_result_attrib("total", Value::from(count));
|
||||
rpcenv["total"] = Value::from(count);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const UPID_API_SUBDIRS: SubdirMap = &[
|
||||
const UPID_API_SUBDIRS: SubdirMap = &sorted!([
|
||||
(
|
||||
"log", &Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&read_task_log),
|
||||
&ObjectSchema::new(
|
||||
"Read task log.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
( "test-status",
|
||||
true,
|
||||
&BooleanSchema::new(
|
||||
"Test task status, and set result attribute \"active\" accordingly."
|
||||
).schema()
|
||||
),
|
||||
("upid", false, &UPID_SCHEMA),
|
||||
("start", true, &IntegerSchema::new("Start at this line.")
|
||||
.minimum(0)
|
||||
.default(0)
|
||||
.schema()
|
||||
),
|
||||
("limit", true, &IntegerSchema::new("Only list this amount of lines.")
|
||||
.minimum(0)
|
||||
.default(50)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
)
|
||||
)
|
||||
.get(&API_METHOD_READ_TASK_LOG)
|
||||
),
|
||||
(
|
||||
"status", &Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_task_status),
|
||||
&ObjectSchema::new(
|
||||
"Get task status.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("upid", false, &UPID_SCHEMA),
|
||||
]),
|
||||
.get(&API_METHOD_GET_TASK_STATUS)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
];
|
||||
]);
|
||||
|
||||
#[sortable]
|
||||
pub const UPID_API_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(UPID_API_SUBDIRS))
|
||||
.delete(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&stop_task),
|
||||
&ObjectSchema::new(
|
||||
"Try to stop a task.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("upid", false, &UPID_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.delete(&API_METHOD_STOP_TASK)
|
||||
.subdirs(&UPID_API_SUBDIRS);
|
||||
|
||||
#[sortable]
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_TASKS)
|
||||
.match_all("upid", &UPID_API_ROUTER);
|
||||
|
@ -1,14 +1,11 @@
|
||||
use std::mem::{self, MaybeUninit};
|
||||
|
||||
use chrono::prelude::*;
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
use proxmox::tools::fs::{file_read_firstline, replace_file, CreateOptions};
|
||||
|
||||
use crate::config::acl::PRIV_SYS_MODIFY;
|
||||
use crate::api2::types::*;
|
||||
|
||||
fn read_etc_localtime() -> Result<String, Error> {
|
||||
@ -18,34 +15,48 @@ fn read_etc_localtime() -> Result<String, Error> {
|
||||
}
|
||||
|
||||
// otherwise guess from the /etc/localtime symlink
|
||||
let mut buf = MaybeUninit::<[u8; 64]>::uninit();
|
||||
let len = unsafe {
|
||||
libc::readlink(
|
||||
"/etc/localtime".as_ptr() as *const _,
|
||||
buf.as_mut_ptr() as *mut _,
|
||||
mem::size_of_val(&buf),
|
||||
)
|
||||
};
|
||||
if len <= 0 {
|
||||
bail!("failed to guess timezone");
|
||||
}
|
||||
let len = len as usize;
|
||||
let buf = unsafe {
|
||||
(*buf.as_mut_ptr())[len] = 0;
|
||||
buf.assume_init()
|
||||
};
|
||||
let link = std::str::from_utf8(&buf[..len])?;
|
||||
let link = std::fs::read_link("/etc/localtime").
|
||||
map_err(|err| format_err!("failed to guess timezone - {}", err))?;
|
||||
|
||||
let link = link.to_string_lossy();
|
||||
match link.rfind("/zoneinfo/") {
|
||||
Some(pos) => Ok(link[(pos + 10)..].to_string()),
|
||||
None => Ok(link.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_time(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Returns server time and timezone.",
|
||||
properties: {
|
||||
timezone: {
|
||||
schema: TIME_ZONE_SCHEMA,
|
||||
},
|
||||
time: {
|
||||
type: i64,
|
||||
description: "Seconds since 1970-01-01 00:00:00 UTC.",
|
||||
minimum: 1_297_163_644,
|
||||
},
|
||||
localtime: {
|
||||
type: i64,
|
||||
description: "Seconds since 1970-01-01 00:00:00 UTC. (local time)",
|
||||
minimum: 1_297_163_644,
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Read server time and time zone settings.
|
||||
fn get_time(_param: Value) -> Result<Value, Error> {
|
||||
let datetime = Local::now();
|
||||
let offset = datetime.offset();
|
||||
let time = datetime.timestamp();
|
||||
@ -58,13 +69,28 @@ fn get_time(
|
||||
}))
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
reload_timezone: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
timezone: {
|
||||
schema: TIME_ZONE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "time"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Set time zone
|
||||
fn set_timezone(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
timezone: String,
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
let timezone = crate::tools::required_string_param(¶m, "timezone")?;
|
||||
|
||||
let path = std::path::PathBuf::from(format!("/usr/share/zoneinfo/{}", timezone));
|
||||
|
||||
if !path.exists() {
|
||||
@ -81,45 +107,6 @@ fn set_timezone(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_time),
|
||||
&ObjectSchema::new(
|
||||
"Read server time and time zone settings.",
|
||||
&sorted!([ ("node", false, &NODE_SCHEMA) ]),
|
||||
)
|
||||
).returns(
|
||||
&ObjectSchema::new(
|
||||
"Returns server time and timezone.",
|
||||
&sorted!([
|
||||
("timezone", false, &StringSchema::new("Time zone").schema()),
|
||||
("time", false, &IntegerSchema::new("Seconds since 1970-01-01 00:00:00 UTC.")
|
||||
.minimum(1_297_163_644)
|
||||
.schema()
|
||||
),
|
||||
("localtime", false, &IntegerSchema::new("Seconds since 1970-01-01 00:00:00 UTC. (local time)")
|
||||
.minimum(1_297_163_644)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
).schema()
|
||||
)
|
||||
)
|
||||
.put(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&set_timezone),
|
||||
&ObjectSchema::new(
|
||||
"Set time zone.",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("timezone", false, &StringSchema::new(
|
||||
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
).protected(true).reload_timezone(true)
|
||||
);
|
||||
|
||||
.get(&API_METHOD_GET_TIME)
|
||||
.put(&API_METHOD_SET_TIMEZONE);
|
||||
|
401
src/api2/pull.rs
401
src/api2/pull.rs
@ -1,371 +1,19 @@
|
||||
//! Sync datastore from remote server
|
||||
|
||||
use failure::*;
|
||||
use serde_json::json;
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use chrono::{Utc, TimeZone};
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::{ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::server::{WorkerTask};
|
||||
use crate::backup::*;
|
||||
use crate::client::*;
|
||||
use crate::config::remote;
|
||||
use crate::backup::DataStore;
|
||||
use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_store};
|
||||
use crate::api2::types::*;
|
||||
|
||||
// fixme: implement filters
|
||||
// fixme: delete vanished groups
|
||||
// Todo: correctly lock backup groups
|
||||
|
||||
async fn pull_index_chunks<I: IndexFile>(
|
||||
_worker: &WorkerTask,
|
||||
chunk_reader: &mut RemoteChunkReader,
|
||||
target: Arc<DataStore>,
|
||||
index: I,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
let chunk_exists = target.cond_touch_chunk(digest, false)?;
|
||||
if chunk_exists {
|
||||
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
continue;
|
||||
}
|
||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
let chunk = chunk_reader.read_raw_chunk(&digest)?;
|
||||
|
||||
target.insert_chunk(&chunk, &digest)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn download_manifest(
|
||||
reader: &BackupReader,
|
||||
filename: &std::path::Path,
|
||||
) -> Result<std::fs::File, Error> {
|
||||
|
||||
let tmp_manifest_file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&filename)?;
|
||||
|
||||
let mut tmp_manifest_file = reader.download(MANIFEST_BLOB_NAME, tmp_manifest_file).await?;
|
||||
|
||||
tmp_manifest_file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
Ok(tmp_manifest_file)
|
||||
}
|
||||
|
||||
async fn pull_single_archive(
|
||||
worker: &WorkerTask,
|
||||
reader: &BackupReader,
|
||||
chunk_reader: &mut RemoteChunkReader,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
archive_name: &str,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut path = tgt_store.base_path();
|
||||
path.push(snapshot.relative_path());
|
||||
path.push(archive_name);
|
||||
|
||||
let mut tmp_path = path.clone();
|
||||
tmp_path.set_extension("tmp");
|
||||
|
||||
worker.log(format!("sync archive {}", archive_name));
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&tmp_path)?;
|
||||
|
||||
let tmpfile = reader.download(archive_name, tmpfile).await?;
|
||||
|
||||
match archive_type(archive_name)? {
|
||||
ArchiveType::DynamicIndex => {
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynamic index {:?} - {}", tmp_path, err))?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
}
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", tmp_path, err))?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
}
|
||||
ArchiveType::Blob => { /* nothing to do */ }
|
||||
}
|
||||
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn pull_snapshot(
|
||||
worker: &WorkerTask,
|
||||
reader: Arc<BackupReader>,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut manifest_name = tgt_store.base_path();
|
||||
manifest_name.push(snapshot.relative_path());
|
||||
manifest_name.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
let mut tmp_manifest_name = manifest_name.clone();
|
||||
tmp_manifest_name.set_extension("tmp");
|
||||
|
||||
let mut tmp_manifest_file = download_manifest(&reader, &tmp_manifest_name).await?;
|
||||
let tmp_manifest_blob = DataBlob::load(&mut tmp_manifest_file)?;
|
||||
tmp_manifest_blob.verify_crc()?;
|
||||
|
||||
if manifest_name.exists() {
|
||||
let manifest_blob = proxmox::try_block!({
|
||||
let mut manifest_file = std::fs::File::open(&manifest_name)
|
||||
.map_err(|err| format_err!("unable to open local manifest {:?} - {}", manifest_name, err))?;
|
||||
|
||||
let manifest_blob = DataBlob::load(&mut manifest_file)?;
|
||||
manifest_blob.verify_crc()?;
|
||||
Ok(manifest_blob)
|
||||
}).map_err(|err: Error| {
|
||||
format_err!("unable to read local manifest {:?} - {}", manifest_name, err)
|
||||
})?;
|
||||
|
||||
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
|
||||
return Ok(()); // nothing changed
|
||||
}
|
||||
}
|
||||
|
||||
let manifest = BackupManifest::try_from(tmp_manifest_blob)?;
|
||||
|
||||
let mut chunk_reader = RemoteChunkReader::new(reader.clone(), None, HashMap::new());
|
||||
|
||||
for item in manifest.files() {
|
||||
let mut path = tgt_store.base_path();
|
||||
path.push(snapshot.relative_path());
|
||||
path.push(&item.filename);
|
||||
|
||||
if path.exists() {
|
||||
match archive_type(&item.filename)? {
|
||||
ArchiveType::DynamicIndex => {
|
||||
let index = DynamicIndexReader::open(&path)?;
|
||||
let (csum, size) = index.compute_csum();
|
||||
match manifest.verify_file(&item.filename, &csum, size) {
|
||||
Ok(_) => continue,
|
||||
Err(err) => {
|
||||
worker.log(format!("detected changed file {:?} - {}", path, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = FixedIndexReader::open(&path)?;
|
||||
let (csum, size) = index.compute_csum();
|
||||
match manifest.verify_file(&item.filename, &csum, size) {
|
||||
Ok(_) => continue,
|
||||
Err(err) => {
|
||||
worker.log(format!("detected changed file {:?} - {}", path, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
ArchiveType::Blob => {
|
||||
let mut tmpfile = std::fs::File::open(&path)?;
|
||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||
match manifest.verify_file(&item.filename, &csum, size) {
|
||||
Ok(_) => continue,
|
||||
Err(err) => {
|
||||
worker.log(format!("detected changed file {:?} - {}", path, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pull_single_archive(
|
||||
worker,
|
||||
&reader,
|
||||
&mut chunk_reader,
|
||||
tgt_store.clone(),
|
||||
snapshot,
|
||||
&item.filename,
|
||||
).await?;
|
||||
}
|
||||
|
||||
if let Err(err) = std::fs::rename(&tmp_manifest_name, &manifest_name) {
|
||||
bail!("Atomic rename file {:?} failed - {}", manifest_name, err);
|
||||
}
|
||||
|
||||
// cleanup - remove stale files
|
||||
tgt_store.cleanup_backup_dir(snapshot, &manifest)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pull_snapshot_from(
|
||||
worker: &WorkerTask,
|
||||
reader: Arc<BackupReader>,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let (_path, is_new) = tgt_store.create_backup_dir(&snapshot)?;
|
||||
|
||||
if is_new {
|
||||
worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
|
||||
|
||||
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await {
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot) {
|
||||
worker.log(format!("cleanup error - {}", cleanup_err));
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
} else {
|
||||
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pull_group(
|
||||
worker: &WorkerTask,
|
||||
client: &HttpClient,
|
||||
src_repo: &BackupRepository,
|
||||
tgt_store: Arc<DataStore>,
|
||||
group: &BackupGroup,
|
||||
delete: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/snapshots", src_repo.store());
|
||||
|
||||
let args = json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
});
|
||||
|
||||
let mut result = client.get(&path, Some(args)).await?;
|
||||
let mut list: Vec<SnapshotListItem> = serde_json::from_value(result["data"].take())?;
|
||||
|
||||
list.sort_unstable_by(|a, b| a.backup_time.cmp(&b.backup_time));
|
||||
|
||||
let auth_info = client.login().await?;
|
||||
let fingerprint = client.fingerprint();
|
||||
|
||||
let last_sync = tgt_store.last_successful_backup(group)?;
|
||||
|
||||
let mut remote_snapshots = std::collections::HashSet::new();
|
||||
|
||||
for item in list {
|
||||
let backup_time = Utc.timestamp(item.backup_time, 0);
|
||||
remote_snapshots.insert(backup_time);
|
||||
|
||||
if let Some(last_sync_time) = last_sync {
|
||||
if last_sync_time > backup_time { continue; }
|
||||
}
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(auth_info.ticket.clone()))
|
||||
.fingerprint(fingerprint.clone());
|
||||
|
||||
let new_client = HttpClient::new(src_repo.host(), src_repo.user(), options)?;
|
||||
|
||||
let reader = BackupReader::start(
|
||||
new_client,
|
||||
None,
|
||||
src_repo.store(),
|
||||
&item.backup_type,
|
||||
&item.backup_id,
|
||||
backup_time,
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
|
||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
}
|
||||
|
||||
if delete {
|
||||
let local_list = group.list_backups(&tgt_store.base_path())?;
|
||||
for info in local_list {
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
if remote_snapshots.contains(&backup_time) { continue; }
|
||||
worker.log(format!("delete vanished snapshot {:?}", info.backup_dir.relative_path()));
|
||||
tgt_store.remove_backup_dir(&info.backup_dir)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pull_store(
|
||||
worker: &WorkerTask,
|
||||
client: &HttpClient,
|
||||
src_repo: &BackupRepository,
|
||||
tgt_store: Arc<DataStore>,
|
||||
delete: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/groups", src_repo.store());
|
||||
|
||||
let mut result = client.get(&path, None).await?;
|
||||
|
||||
let mut list: Vec<GroupListItem> = serde_json::from_value(result["data"].take())?;
|
||||
|
||||
list.sort_unstable_by(|a, b| {
|
||||
let type_order = a.backup_type.cmp(&b.backup_type);
|
||||
if type_order == std::cmp::Ordering::Equal {
|
||||
a.backup_id.cmp(&b.backup_id)
|
||||
} else {
|
||||
type_order
|
||||
}
|
||||
});
|
||||
|
||||
let mut errors = false;
|
||||
|
||||
let mut new_groups = std::collections::HashSet::new();
|
||||
|
||||
for item in list {
|
||||
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
||||
if let Err(err) = pull_group(worker, client, src_repo, tgt_store.clone(), &group, delete).await {
|
||||
worker.log(format!("sync group {}/{} failed - {}", item.backup_type, item.backup_id, err));
|
||||
errors = true;
|
||||
// do not stop here, instead continue
|
||||
}
|
||||
new_groups.insert(group);
|
||||
}
|
||||
|
||||
if delete {
|
||||
let result: Result<(), Error> = proxmox::try_block!({
|
||||
let local_groups = BackupGroup::list_groups(&tgt_store.base_path())?;
|
||||
for local_group in local_groups {
|
||||
if new_groups.contains(&local_group) { continue; }
|
||||
worker.log(format!("delete vanished group '{}/{}'", local_group.backup_type(), local_group.backup_id()));
|
||||
if let Err(err) = tgt_store.remove_backup_group(&local_group) {
|
||||
worker.log(err.to_string());
|
||||
errors = true;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
if let Err(err) = result {
|
||||
worker.log(format!("error during cleanup: {}", err));
|
||||
errors = true;
|
||||
use crate::config::{
|
||||
remote,
|
||||
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
|
||||
cached_user_info::CachedUserInfo,
|
||||
};
|
||||
}
|
||||
|
||||
if errors {
|
||||
bail!("sync failed with some errors.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -379,28 +27,42 @@ pub async fn pull_store(
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "Delete vanished backups. This remove the local copy if the remote backup was deleted.",
|
||||
type: Boolean,
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
// Note: used parameters are no uri parameters, so we need to test inside function body
|
||||
description: r###"The user needs Datastore.Backup privilege on '/datastore/{store}',
|
||||
and needs to own the backup group. Remote.Read is required on '/remote/{remote}/{remote-store}'.
|
||||
The delete flag additionally requires the Datastore.Prune privilege on '/datastore/{store}'.
|
||||
"###,
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Sync store from other repository
|
||||
async fn pull (
|
||||
store: String,
|
||||
remote: String,
|
||||
remote_store: String,
|
||||
delete: Option<bool>,
|
||||
remove_vanished: Option<bool>,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let delete = delete.unwrap_or(true);
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(&username, &["remote", &remote, &remote_store], PRIV_REMOTE_READ, false)?;
|
||||
|
||||
let delete = remove_vanished.unwrap_or(true);
|
||||
|
||||
if delete {
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
}
|
||||
|
||||
let tgt_store = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -423,10 +85,7 @@ async fn pull (
|
||||
|
||||
worker.log(format!("sync datastore '{}' start", store));
|
||||
|
||||
// explicit create shared lock to prevent GC on newly created chunks
|
||||
let _shared_store_lock = tgt_store.try_shared_chunk_store_lock()?;
|
||||
|
||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete).await?;
|
||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, username).await?;
|
||||
|
||||
worker.log(format!("sync datastore '{}' end", store));
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
//use chrono::{Local, TimeZone};
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::header::{self, HeaderValue, UPGRADE};
|
||||
use hyper::http::request::Parts;
|
||||
@ -7,7 +7,7 @@ use hyper::{Body, Response, StatusCode};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::http_err;
|
||||
|
||||
@ -15,6 +15,8 @@ use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::server::{WorkerTask, H2Service};
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_DATASTORE_READ;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -29,18 +31,16 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||
concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(), "')."),
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &StringSchema::new("Backup type.")
|
||||
.format(&ApiStringFormat::Enum(&["vm", "ct", "host"]))
|
||||
.schema()
|
||||
),
|
||||
("backup-id", false, &StringSchema::new("Backup ID.").schema()),
|
||||
("backup-time", false, &IntegerSchema::new("Backup time (Unix epoch.)")
|
||||
.minimum(1_547_797_308)
|
||||
.schema()
|
||||
),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
||||
]),
|
||||
)
|
||||
).access(
|
||||
// Note: parameter 'store' is no uri parameter, so we need to test inside function body
|
||||
Some("The user needs Datastore.Read privilege on /datastore/{store}."),
|
||||
&Permission::Anybody
|
||||
);
|
||||
|
||||
fn upgrade_to_backup_reader_protocol(
|
||||
@ -54,7 +54,12 @@ fn upgrade_to_backup_reader_protocol(
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
@ -75,7 +80,6 @@ fn upgrade_to_backup_reader_protocol(
|
||||
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
|
||||
}
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
@ -1,8 +1,7 @@
|
||||
//use failure::*;
|
||||
//use anyhow::{bail, format_err, Error};
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde_json::Value;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||
|
||||
@ -16,7 +15,7 @@ use crate::server::formatter::*;
|
||||
#[derive(Clone)]
|
||||
pub struct ReaderEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: HashMap<String, Value>,
|
||||
result_attributes: Value,
|
||||
user: String,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
@ -37,7 +36,7 @@ impl ReaderEnvironment {
|
||||
|
||||
|
||||
Self {
|
||||
result_attributes: HashMap::new(),
|
||||
result_attributes: json!({}),
|
||||
env_type,
|
||||
user,
|
||||
worker,
|
||||
@ -61,12 +60,12 @@ impl ReaderEnvironment {
|
||||
|
||||
impl RpcEnvironment for ReaderEnvironment {
|
||||
|
||||
fn set_result_attrib(&mut self, name: &str, value: Value) {
|
||||
self.result_attributes.insert(name.into(), value);
|
||||
fn result_attrib_mut(&mut self) -> &mut Value {
|
||||
&mut self.result_attributes
|
||||
}
|
||||
|
||||
fn get_result_attrib(&self, name: &str) -> Option<&Value> {
|
||||
self.result_attributes.get(name)
|
||||
fn result_attrib(&self) -> &Value {
|
||||
&self.result_attributes
|
||||
}
|
||||
|
||||
fn env_type(&self) -> RpcEnvironmentType {
|
||||
|
@ -1,16 +1,39 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::schema::ObjectSchema;
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
|
||||
fn get_subscription(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
#[api(
|
||||
returns: {
|
||||
description: "Subscription status.",
|
||||
properties: {
|
||||
status: {
|
||||
type: String,
|
||||
description: "'NotFound', 'active' or 'inactive'."
|
||||
},
|
||||
message: {
|
||||
type: String,
|
||||
description: "Human readable problem description.",
|
||||
},
|
||||
serverid: {
|
||||
type: String,
|
||||
description: "The unique server ID.",
|
||||
},
|
||||
url: {
|
||||
type: String,
|
||||
description: "URL to Web Shop.",
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read subscription info.
|
||||
fn get_subscription(_param: Value) -> Result<Value, Error> {
|
||||
|
||||
let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing";
|
||||
Ok(json!({
|
||||
@ -22,9 +45,4 @@ fn get_subscription(
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_subscription),
|
||||
&ObjectSchema::new("Read subscription info.", &[])
|
||||
)
|
||||
);
|
||||
.get(&API_METHOD_GET_SUBSCRIPTION);
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, schema::*};
|
||||
@ -25,11 +25,22 @@ macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL
|
||||
// slash is not allowed because it is used as pve API delimiter
|
||||
// also see "man useradd"
|
||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
||||
|
||||
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||
|
||||
const_regex!{
|
||||
pub IP_FORMAT_REGEX = IPRE!();
|
||||
pub IP_V4_REGEX = concat!(r"^", IPV4RE!(), r"$");
|
||||
pub IP_V6_REGEX = concat!(r"^", IPV6RE!(), r"$");
|
||||
pub IP_REGEX = concat!(r"^", IPRE!(), r"$");
|
||||
pub CIDR_V4_REGEX = concat!(r"^", CIDR_V4_REGEX_STR!(), r"$");
|
||||
pub CIDR_V6_REGEX = concat!(r"^", CIDR_V6_REGEX_STR!(), r"$");
|
||||
pub CIDR_REGEX = concat!(r"^(?:", CIDR_V4_REGEX_STR!(), "|", CIDR_V6_REGEX_STR!(), r")$");
|
||||
|
||||
pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ?
|
||||
pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; // fixme: define in common_regex ?
|
||||
|
||||
@ -54,14 +65,24 @@ const_regex!{
|
||||
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
|
||||
|
||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||
|
||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||
|
||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||
}
|
||||
|
||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
|
||||
|
||||
pub const IP_V4_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&IP_V4_REGEX);
|
||||
|
||||
pub const IP_V6_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&IP_V6_REGEX);
|
||||
|
||||
pub const IP_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&IP_FORMAT_REGEX);
|
||||
ApiStringFormat::Pattern(&IP_REGEX);
|
||||
|
||||
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||
@ -87,10 +108,39 @@ pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
||||
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
|
||||
|
||||
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
|
||||
|
||||
pub const PASSWORD_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PASSWORD_REGEX);
|
||||
|
||||
pub const ACL_PATH_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||
|
||||
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
pub const CIDR_V4_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&CIDR_V4_REGEX);
|
||||
|
||||
pub const CIDR_V6_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&CIDR_V6_REGEX);
|
||||
|
||||
pub const CIDR_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&CIDR_REGEX);
|
||||
|
||||
|
||||
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(1024)
|
||||
.schema();
|
||||
|
||||
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(5)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = StringSchema::new(
|
||||
"X509 certificate fingerprint (sha256)."
|
||||
@ -142,6 +192,68 @@ pub const THIRD_DNS_SERVER_SCHEMA: Schema =
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const IP_V4_SCHEMA: Schema =
|
||||
StringSchema::new("IPv4 address.")
|
||||
.format(&IP_V4_FORMAT)
|
||||
.max_length(15)
|
||||
.schema();
|
||||
|
||||
pub const IP_V6_SCHEMA: Schema =
|
||||
StringSchema::new("IPv6 address.")
|
||||
.format(&IP_V6_FORMAT)
|
||||
.max_length(39)
|
||||
.schema();
|
||||
|
||||
pub const IP_SCHEMA: Schema =
|
||||
StringSchema::new("IP (IPv4 or IPv6) address.")
|
||||
.format(&IP_FORMAT)
|
||||
.max_length(39)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_V4_SCHEMA: Schema =
|
||||
StringSchema::new("IPv4 address with netmask (CIDR notation).")
|
||||
.format(&CIDR_V4_FORMAT)
|
||||
.max_length(18)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_V6_SCHEMA: Schema =
|
||||
StringSchema::new("IPv6 address with netmask (CIDR notation).")
|
||||
.format(&CIDR_V6_FORMAT)
|
||||
.max_length(43)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_SCHEMA: Schema =
|
||||
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
|
||||
.format(&CIDR_FORMAT)
|
||||
.max_length(43)
|
||||
.schema();
|
||||
|
||||
pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new(
|
||||
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new(
|
||||
"Access control path.")
|
||||
.format(&ACL_PATH_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(128)
|
||||
.schema();
|
||||
|
||||
pub const ACL_PROPAGATE_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Allow to propagate (inherit) permissions.")
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
|
||||
"Type of 'ugid' property.")
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("user", "User"),
|
||||
EnumEntry::new("group", "Group")]))
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
|
||||
StringSchema::new("Backup archive name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
@ -149,7 +261,10 @@ pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
|
||||
|
||||
pub const BACKUP_TYPE_SCHEMA: Schema =
|
||||
StringSchema::new("Backup type.")
|
||||
.format(&ApiStringFormat::Enum(&["vm", "ct", "host"]))
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("vm", "Virtual Machine Backup"),
|
||||
EnumEntry::new("ct", "Container Backup"),
|
||||
EnumEntry::new("host", "Host Backup")]))
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_ID_SCHEMA: Schema =
|
||||
@ -172,12 +287,33 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run garbage collection job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run prune job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Delete vanished backups. This remove the local copy if the remote backup was deleted.")
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.schema();
|
||||
@ -202,6 +338,12 @@ pub const PROXMOX_USER_ID_SCHEMA: Schema = StringSchema::new("User ID")
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
||||
.format(&PROXMOX_GROUP_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
|
||||
// Complex type definitions
|
||||
|
||||
@ -271,6 +413,60 @@ pub struct SnapshotListItem {
|
||||
pub size: Option<u64>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Prune result.
|
||||
pub struct PruneListItem {
|
||||
pub backup_type: String, // enum
|
||||
pub backup_id: String,
|
||||
pub backup_time: i64,
|
||||
/// Keep snapshot
|
||||
pub keep: bool,
|
||||
}
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new(
|
||||
"Number of daily backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = IntegerSchema::new(
|
||||
"Number of hourly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new(
|
||||
"Number of backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = IntegerSchema::new(
|
||||
"Number of monthly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = IntegerSchema::new(
|
||||
"Number of weekly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
||||
"Number of yearly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"filename": {
|
||||
@ -313,6 +509,10 @@ pub struct GarbageCollectionStatus {
|
||||
pub removed_bytes: u64,
|
||||
/// Number of removed chunks.
|
||||
pub removed_chunks: usize,
|
||||
/// Sum of pending bytes (pending removal - kept for safety).
|
||||
pub pending_bytes: u64,
|
||||
/// Number of pending chunks (pending removal - kept for safety).
|
||||
pub pending_chunks: usize,
|
||||
}
|
||||
|
||||
impl Default for GarbageCollectionStatus {
|
||||
@ -325,6 +525,8 @@ impl Default for GarbageCollectionStatus {
|
||||
disk_chunks: 0,
|
||||
removed_bytes: 0,
|
||||
removed_chunks: 0,
|
||||
pending_bytes: 0,
|
||||
pending_chunks: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -373,10 +575,224 @@ pub struct TaskListItem {
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Node Power command type.
|
||||
pub enum NodePowerCommand {
|
||||
/// Restart the server
|
||||
Reboot,
|
||||
/// Shutdown the server
|
||||
Shutdown,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Interface configuration method
|
||||
pub enum NetworkConfigMethod {
|
||||
/// Configuration is done manually using other tools
|
||||
Manual,
|
||||
/// Define interfaces with statically allocated addresses.
|
||||
Static,
|
||||
/// Obtain an address via DHCP
|
||||
DHCP,
|
||||
/// Define the loopback interface.
|
||||
Loopback,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[allow(non_camel_case_types)]
|
||||
#[repr(u8)]
|
||||
/// Linux Bond Mode
|
||||
pub enum LinuxBondMode {
|
||||
/// Round-robin policy
|
||||
balance_rr = 0,
|
||||
/// Active-backup policy
|
||||
active_backup = 1,
|
||||
/// XOR policy
|
||||
balance_xor = 2,
|
||||
/// Broadcast policy
|
||||
broadcast = 3,
|
||||
/// IEEE 802.3ad Dynamic link aggregation
|
||||
//#[serde(rename = "802.3ad")]
|
||||
ieee802_3ad = 4,
|
||||
/// Adaptive transmit load balancing
|
||||
balance_tlb = 5,
|
||||
/// Adaptive load balancing
|
||||
balance_alb = 6,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Network interface type
|
||||
pub enum NetworkInterfaceType {
|
||||
/// Loopback
|
||||
Loopback,
|
||||
/// Physical Ethernet device
|
||||
Eth,
|
||||
/// Linux Bridge
|
||||
Bridge,
|
||||
/// Linux Bond
|
||||
Bond,
|
||||
/// Linux VLAN (eth.10)
|
||||
Vlan,
|
||||
/// Interface Alias (eth:1)
|
||||
Alias,
|
||||
/// Unknown interface type
|
||||
Unknown,
|
||||
}
|
||||
|
||||
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
|
||||
.format(&NETWORK_INTERFACE_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(libc::IFNAMSIZ-1)
|
||||
.schema();
|
||||
|
||||
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA)
|
||||
.schema();
|
||||
|
||||
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
"A list of network devices, comma separated.")
|
||||
.format(&ApiStringFormat::PropertyString(&NETWORK_INTERFACE_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: NetworkInterfaceType,
|
||||
},
|
||||
method: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
method6: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
cidr: {
|
||||
schema: CIDR_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
cidr6: {
|
||||
schema: CIDR_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway: {
|
||||
schema: IP_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway6: {
|
||||
schema: IP_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
options: {
|
||||
description: "Option list (inet)",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Optional attribute line.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
options6: {
|
||||
description: "Option list (inet6)",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Optional attribute line.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
comments: {
|
||||
description: "Comments (inet, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
comments6: {
|
||||
description: "Comments (inet6, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
bridge_ports: {
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
/// Network Interface configuration
|
||||
pub struct Interface {
|
||||
/// Autostart interface
|
||||
#[serde(rename = "autostart")]
|
||||
pub autostart: bool,
|
||||
/// Interface is active (UP)
|
||||
pub active: bool,
|
||||
/// Interface name
|
||||
pub name: String,
|
||||
/// Interface type
|
||||
#[serde(rename = "type")]
|
||||
pub interface_type: NetworkInterfaceType,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub method: Option<NetworkConfigMethod>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub method6: Option<NetworkConfigMethod>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// IPv4 address with netmask
|
||||
pub cidr: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// IPv4 gateway
|
||||
pub gateway: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// IPv6 address with netmask
|
||||
pub cidr6: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// IPv6 gateway
|
||||
pub gateway6: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if="Vec::is_empty")]
|
||||
pub options: Vec<String>,
|
||||
#[serde(skip_serializing_if="Vec::is_empty")]
|
||||
pub options6: Vec<String>,
|
||||
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comments: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comments6: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
/// Maximum Transmission Unit
|
||||
pub mtu: Option<u64>,
|
||||
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub bridge_ports: Option<Vec<String>>,
|
||||
/// Enable bridge vlan support.
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub bridge_vlan_aware: Option<bool>,
|
||||
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub slaves: Option<Vec<String>>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub bond_mode: Option<LinuxBondMode>,
|
||||
}
|
||||
|
||||
// Regression tests
|
||||
|
||||
#[test]
|
||||
fn test_cert_fingerprint_schema() -> Result<(), Error> {
|
||||
fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
let schema = CERT_FINGERPRINT_SHA256_SCHEMA;
|
||||
|
||||
@ -417,7 +833,7 @@ fn test_cert_fingerprint_schema() -> Result<(), Error> {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proxmox_user_id_schema() -> Result<(), Error> {
|
||||
fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
let schema = PROXMOX_USER_ID_SCHEMA;
|
||||
|
||||
@ -462,3 +878,31 @@ fn test_proxmox_user_id_schema() -> Result<(), Error> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
pub enum RRDMode {
|
||||
/// Maximum
|
||||
Max,
|
||||
/// Average
|
||||
Average,
|
||||
}
|
||||
|
||||
|
||||
#[api()]
|
||||
#[repr(u64)]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum RRDTimeFrameResolution {
|
||||
/// 1 min => last 70 minutes
|
||||
Hour = 60,
|
||||
/// 30 min => last 35 hours
|
||||
Day = 60*30,
|
||||
/// 3 hours => about 8 days
|
||||
Week = 60*180,
|
||||
/// 12 hours => last 35 days
|
||||
Month = 60*720,
|
||||
/// 1 week => last 490 days
|
||||
Year = 60*10080,
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::ObjectSchema;
|
||||
|
||||
pub const PROXMOX_PKG_VERSION: &str =
|
||||
@ -31,6 +31,6 @@ pub const ROUTER: Router = Router::new()
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_version),
|
||||
&ObjectSchema::new("Proxmox Backup Server API version.", &[])
|
||||
)
|
||||
).access(None, &Permission::Anybody)
|
||||
);
|
||||
|
||||
|
148
src/auth.rs
Normal file
148
src/auth.rs
Normal file
@ -0,0 +1,148 @@
|
||||
//! Proxmox Backup Server Authentication
|
||||
//!
|
||||
//! This library contains helper to authenticate users.
|
||||
|
||||
use std::process::{Command, Stdio};
|
||||
use std::io::Write;
|
||||
use std::ffi::{CString, CStr};
|
||||
|
||||
use base64;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::json;
|
||||
|
||||
pub trait ProxmoxAuthenticator {
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error>;
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
pub struct PAM();
|
||||
|
||||
impl ProxmoxAuthenticator for PAM {
|
||||
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
||||
auth.get_handler().set_credentials(username, password);
|
||||
auth.authenticate()?;
|
||||
return Ok(());
|
||||
|
||||
}
|
||||
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
let mut child = Command::new("passwd")
|
||||
.arg(username)
|
||||
.stdin(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.or_else(|err| Err(format_err!("unable to set password for '{}' - execute passwd failed: {}", username, err)))?;
|
||||
|
||||
// Note: passwd reads password twice from stdin (for verify)
|
||||
writeln!(child.stdin.as_mut().unwrap(), "{}\n{}", password, password)?;
|
||||
|
||||
let output = child.wait_with_output()
|
||||
.or_else(|err| Err(format_err!("unable to set password for '{}' - wait failed: {}", username, err)))?;
|
||||
|
||||
if !output.status.success() {
|
||||
bail!("unable to set password for '{}' - {}", username, String::from_utf8_lossy(&output.stderr));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PBS();
|
||||
|
||||
pub fn crypt(password: &[u8], salt: &str) -> Result<String, Error> {
|
||||
|
||||
#[link(name="crypt")]
|
||||
extern "C" {
|
||||
#[link_name = "crypt"]
|
||||
fn __crypt(key: *const libc::c_char, salt: *const libc::c_char) -> * mut libc::c_char;
|
||||
}
|
||||
|
||||
let salt = CString::new(salt)?;
|
||||
let password = CString::new(password)?;
|
||||
|
||||
let res = unsafe {
|
||||
CStr::from_ptr(
|
||||
__crypt(
|
||||
password.as_c_str().as_ptr(),
|
||||
salt.as_c_str().as_ptr()
|
||||
)
|
||||
)
|
||||
};
|
||||
Ok(String::from(res.to_str()?))
|
||||
}
|
||||
|
||||
|
||||
pub fn encrypt_pw(password: &str) -> Result<String, Error> {
|
||||
|
||||
let salt = proxmox::sys::linux::random_data(8)?;
|
||||
let salt = format!("$5${}$", base64::encode_config(&salt, base64::CRYPT));
|
||||
|
||||
crypt(password.as_bytes(), &salt)
|
||||
}
|
||||
|
||||
pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error> {
|
||||
let verify = crypt(password.as_bytes(), enc_password)?;
|
||||
if &verify != enc_password {
|
||||
bail!("invalid credentials");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const SHADOW_CONFIG_FILENAME: &str = "/etc/proxmox-backup/shadow.json";
|
||||
|
||||
impl ProxmoxAuthenticator for PBS {
|
||||
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||
match data[username].as_str() {
|
||||
None => bail!("no password set"),
|
||||
Some(enc_password) => verify_crypt_pw(password, enc_password)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
let enc_password = encrypt_pw(password)?;
|
||||
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||
data[username] = enc_password.into();
|
||||
|
||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
|
||||
let options = proxmox::tools::fs::CreateOptions::new()
|
||||
.perm(mode)
|
||||
.owner(nix::unistd::ROOT)
|
||||
.group(nix::unistd::Gid::from_raw(0));
|
||||
|
||||
let data = serde_json::to_vec_pretty(&data)?;
|
||||
proxmox::tools::fs::replace_file(SHADOW_CONFIG_FILENAME, &data, options)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_userid(userid: &str) -> Result<(String, String), Error> {
|
||||
let data: Vec<&str> = userid.rsplitn(2, '@').collect();
|
||||
|
||||
if data.len() != 2 {
|
||||
bail!("userid '{}' has no realm", userid);
|
||||
}
|
||||
Ok((data[1].to_owned(), data[0].to_owned()))
|
||||
}
|
||||
|
||||
/// Lookup the autenticator for the specified realm
|
||||
pub fn lookup_authenticator(realm: &str) -> Result<Box<dyn ProxmoxAuthenticator>, Error> {
|
||||
match realm {
|
||||
"pam" => Ok(Box::new(PAM())),
|
||||
"pbs" => Ok(Box::new(PBS())),
|
||||
_ => bail!("unknown realm '{}'", realm),
|
||||
}
|
||||
}
|
||||
|
||||
/// Authenticate users
|
||||
pub fn authenticate_user(userid: &str, password: &str) -> Result<(), Error> {
|
||||
let (username, realm) = parse_userid(userid)?;
|
||||
|
||||
lookup_authenticator(&realm)?
|
||||
.authenticate_user(&username, password)
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use openssl::rsa::{Rsa};
|
||||
|
@ -103,7 +103,7 @@
|
||||
//!
|
||||
//! Not sure if this is better. TODO
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
// Note: .pcat1 => Proxmox Catalog Format version 1
|
||||
pub const CATALOG_NAME: &str = "catalog.pcat1.didx";
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::tools;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use regex::Regex;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::fmt;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
@ -7,7 +7,7 @@ use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::{Component, Path, PathBuf};
|
||||
|
||||
use chrono::{Utc, offset::TimeZone};
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::sys::stat::{Mode, SFlag};
|
||||
|
||||
use proxmox::api::{cli::*, *};
|
||||
@ -140,7 +140,9 @@ impl Shell {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let _ = handle_command(helper.cmd_def(), "", args, None);
|
||||
|
||||
let rpcenv = CliEnvironment::new();
|
||||
let _ = handle_command(helper.cmd_def(), "", args, rpcenv, None);
|
||||
self.rl.add_history_entry(line);
|
||||
self.update_prompt()?;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::Read;
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::sync::Arc;
|
||||
use std::io::Write;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use super::CryptConfig;
|
||||
use crate::tools::borrow::Tied;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::io::Write;
|
||||
@ -157,8 +157,8 @@ impl ChunkStore {
|
||||
|
||||
let (chunk_path, _digest_str) = self.chunk_path(digest);
|
||||
|
||||
const UTIME_NOW: i64 = ((1 << 30) - 1);
|
||||
const UTIME_OMIT: i64 = ((1 << 30) - 2);
|
||||
const UTIME_NOW: i64 = (1 << 30) - 1;
|
||||
const UTIME_OMIT: i64 = (1 << 30) - 2;
|
||||
|
||||
let times: [libc::timespec; 2] = [
|
||||
libc::timespec { tv_sec: 0, tv_nsec: UTIME_NOW },
|
||||
@ -289,9 +289,9 @@ impl ChunkStore {
|
||||
|
||||
pub fn sweep_unused_chunks(
|
||||
&self,
|
||||
oldest_writer: Option<i64>,
|
||||
oldest_writer: i64,
|
||||
status: &mut GarbageCollectionStatus,
|
||||
worker: Arc<WorkerTask>,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
use nix::sys::stat::fstatat;
|
||||
|
||||
@ -299,10 +299,8 @@ impl ChunkStore {
|
||||
|
||||
let mut min_atime = now - 3600*24; // at least 24h (see mount option relatime)
|
||||
|
||||
if let Some(stamp) = oldest_writer {
|
||||
if stamp < min_atime {
|
||||
min_atime = stamp;
|
||||
}
|
||||
if oldest_writer < min_atime {
|
||||
min_atime = oldest_writer;
|
||||
}
|
||||
|
||||
min_atime -= 300; // add 5 mins gap for safety
|
||||
@ -316,6 +314,7 @@ impl ChunkStore {
|
||||
worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count));
|
||||
}
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
|
||||
let (dirfd, entry) = match entry {
|
||||
@ -338,10 +337,9 @@ impl ChunkStore {
|
||||
let lock = self.mutex.lock();
|
||||
|
||||
if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
|
||||
let age = now - stat.st_atime;
|
||||
//println!("FOUND {} {:?}", age/(3600*24), filename);
|
||||
if stat.st_atime < min_atime {
|
||||
println!("UNLINK {} {:?}", age/(3600*24), filename);
|
||||
//let age = now - stat.st_atime;
|
||||
//println!("UNLINK {} {:?}", age/(3600*24), filename);
|
||||
let res = unsafe { libc::unlinkat(dirfd, filename.as_ptr(), 0) };
|
||||
if res != 0 {
|
||||
let err = nix::Error::last();
|
||||
@ -354,11 +352,16 @@ impl ChunkStore {
|
||||
}
|
||||
status.removed_chunks += 1;
|
||||
status.removed_bytes += stat.st_size as u64;
|
||||
} else {
|
||||
if stat.st_atime < oldest_writer {
|
||||
status.pending_chunks += 1;
|
||||
status.pending_bytes += stat.st_size as u64;
|
||||
} else {
|
||||
status.disk_chunks += 1;
|
||||
status.disk_bytes += stat.st_size as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
drop(lock);
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@ use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use bytes::BytesMut;
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use futures::ready;
|
||||
use futures::stream::{Stream, TryStream};
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
//! See the Wikipedia Artikel for [Authenticated
|
||||
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
|
||||
//! for a short introduction.
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::{Read, BufRead};
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::Write;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use std::convert::TryInto;
|
||||
|
||||
use proxmox::tools::io::{ReadExt, WriteExt};
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::{Read, BufReader};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::{Write, Seek, SeekFrom};
|
||||
use proxmox::tools::io::WriteExt;
|
||||
|
@ -1,9 +1,9 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::io;
|
||||
use std::io::{self, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
@ -236,18 +236,80 @@ impl DataStore {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
|
||||
/// Returns the backup owner.
|
||||
///
|
||||
/// The backup owner is the user who first created the backup group.
|
||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<String, Error> {
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(backup_group.group_path());
|
||||
full_path.push("owner");
|
||||
let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
|
||||
Ok(owner.trim_end().to_string()) // remove trailing newline
|
||||
}
|
||||
|
||||
/// Set the backup owner.
|
||||
pub fn set_owner(&self, backup_group: &BackupGroup, userid: &str, force: bool) -> Result<(), Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_group.group_path());
|
||||
path.push("owner");
|
||||
|
||||
let mut open_options = std::fs::OpenOptions::new();
|
||||
open_options.write(true);
|
||||
open_options.truncate(true);
|
||||
|
||||
if force {
|
||||
open_options.create(true);
|
||||
} else {
|
||||
open_options.create_new(true);
|
||||
}
|
||||
|
||||
let mut file = open_options.open(&path)
|
||||
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
|
||||
|
||||
write!(file, "{}\n", userid)
|
||||
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a backup group if it does not already exists.
|
||||
///
|
||||
/// And set the owner to 'userid'. If the group already exists, it returns the
|
||||
/// current owner (instead of setting the owner).
|
||||
pub fn create_backup_group(&self, backup_group: &BackupGroup, userid: &str) -> Result<String, Error> {
|
||||
|
||||
// create intermediate path first:
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(backup_dir.group().group_path());
|
||||
let base_path = self.base_path();
|
||||
|
||||
let mut full_path = base_path.clone();
|
||||
full_path.push(backup_group.backup_type());
|
||||
std::fs::create_dir_all(&full_path)?;
|
||||
|
||||
full_path.push(backup_group.backup_id());
|
||||
|
||||
// create the last component now
|
||||
match std::fs::create_dir(&full_path) {
|
||||
Ok(_) => {
|
||||
self.set_owner(backup_group, userid, false)?;
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
Ok(owner)
|
||||
}
|
||||
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
Ok(owner)
|
||||
}
|
||||
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new backup snapshot inside a BackupGroup
|
||||
///
|
||||
/// The BackupGroup directory needs to exist.
|
||||
pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
|
||||
let relative_path = backup_dir.relative_path();
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(&relative_path);
|
||||
|
||||
// create the last component now
|
||||
match std::fs::create_dir(&full_path) {
|
||||
Ok(_) => Ok((relative_path, true)),
|
||||
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)),
|
||||
@ -290,12 +352,14 @@ impl DataStore {
|
||||
index: I,
|
||||
file_name: &Path, // only used for error reporting
|
||||
status: &mut GarbageCollectionStatus,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
status.index_file_count += 1;
|
||||
status.index_data_bytes += index.index_bytes();
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
worker.fail_on_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
if let Err(err) = self.chunk_store.touch_chunk(digest) {
|
||||
@ -306,21 +370,22 @@ impl DataStore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
|
||||
fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus, worker: &WorkerTask) -> Result<(), Error> {
|
||||
|
||||
let image_list = self.list_images()?;
|
||||
|
||||
for path in image_list {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
tools::fail_on_shutdown()?;
|
||||
|
||||
if let Ok(archive_type) = archive_type(&path) {
|
||||
if archive_type == ArchiveType::FixedIndex {
|
||||
let index = self.open_fixed_reader(&path)?;
|
||||
self.index_mark_used_chunks(index, &path, status)?;
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
} else if archive_type == ArchiveType::DynamicIndex {
|
||||
let index = self.open_dynamic_reader(&path)?;
|
||||
self.index_mark_used_chunks(index, &path, status)?;
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -332,26 +397,36 @@ impl DataStore {
|
||||
self.last_gc_status.lock().unwrap().clone()
|
||||
}
|
||||
|
||||
pub fn garbage_collection(&self, worker: Arc<WorkerTask>) -> Result<(), Error> {
|
||||
pub fn garbage_collection_running(&self) -> bool {
|
||||
if let Ok(_) = self.gc_mutex.try_lock() { false } else { true }
|
||||
}
|
||||
|
||||
pub fn garbage_collection(&self, worker: &WorkerTask) -> Result<(), Error> {
|
||||
|
||||
if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
|
||||
|
||||
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
||||
|
||||
let oldest_writer = self.chunk_store.oldest_writer();
|
||||
let now = unsafe { libc::time(std::ptr::null_mut()) };
|
||||
|
||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(now);
|
||||
|
||||
let mut gc_status = GarbageCollectionStatus::default();
|
||||
gc_status.upid = Some(worker.to_string());
|
||||
|
||||
worker.log("Start GC phase1 (mark used chunks)");
|
||||
|
||||
self.mark_used_chunks(&mut gc_status)?;
|
||||
self.mark_used_chunks(&mut gc_status, &worker)?;
|
||||
|
||||
worker.log("Start GC phase2 (sweep unused chunks)");
|
||||
self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, worker.clone())?;
|
||||
self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, &worker)?;
|
||||
|
||||
worker.log(&format!("Removed bytes: {}", gc_status.removed_bytes));
|
||||
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
|
||||
if gc_status.pending_bytes > 0 {
|
||||
worker.log(&format!("Pending removals: {} bytes ({} chunks)", gc_status.pending_bytes, gc_status.pending_chunks));
|
||||
}
|
||||
|
||||
worker.log(&format!("Original data bytes: {}", gc_status.index_data_bytes));
|
||||
|
||||
if gc_status.index_data_bytes > 0 {
|
||||
|
@ -5,7 +5,7 @@ use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::tools::uuid::Uuid;
|
||||
@ -275,7 +275,7 @@ struct ChunkCacher<'a, S> {
|
||||
}
|
||||
|
||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, (u64, u64, Vec<u8>)> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, failure::Error> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, anyhow::Error> {
|
||||
let (start, end, digest) = self.index.chunk_info(index)?;
|
||||
self.store.read_chunk(&digest).and_then(|data| Ok(Some((start, end, data))))
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::convert::TryInto;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
|
||||
|
@ -3,7 +3,7 @@ use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use failure::*;
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::*;
|
||||
|
||||
/// Trait to get digest list from index files
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::convert::TryFrom;
|
||||
use std::path::Path;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::PathBuf;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::datastore::*;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
// chacha20-poly1305
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox::api::{*, cli::*};
|
||||
|
||||
@ -83,7 +83,8 @@ fn main() -> Result<(), Error> {
|
||||
|
||||
let args = shellword_split(&line)?;
|
||||
|
||||
let _ = handle_command(helper.cmd_def(), "", args, None);
|
||||
let rpcenv = CliEnvironment::new();
|
||||
let _ = handle_command(helper.cmd_def(), "", args, rpcenv, None);
|
||||
|
||||
rl.add_history_entry(line);
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use std::io::Write;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox::api::format::*;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox::api::format::*;
|
||||
use proxmox::api::cli::*;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox::api::format::dump_api;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use std::thread;
|
||||
use std::path::PathBuf;
|
||||
|
@ -2,7 +2,7 @@ use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use futures::future::TryFutureExt;
|
||||
use futures::stream::Stream;
|
||||
use tokio::net::TcpStream;
|
||||
|
@ -2,7 +2,7 @@ use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::future::TryFutureExt;
|
||||
use futures::stream::Stream;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::{Request, Response, Body};
|
||||
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use futures::*;
|
||||
|
||||
// Simple H2 server to test H2 speed with h2client.rs
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, Error};
|
||||
use futures::*;
|
||||
|
||||
use proxmox::try_block;
|
||||
@ -34,6 +34,8 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
config::update_self_signed_cert(false)?;
|
||||
|
||||
proxmox_backup::rrd::create_rrdb_dir()?;
|
||||
|
||||
if let Err(err) = generate_auth_key() {
|
||||
bail!("unable to generate auth key - {}", err);
|
||||
}
|
||||
@ -45,7 +47,7 @@ async fn run() -> Result<(), Error> {
|
||||
let _ = csrf_secret(); // load with lazy_static
|
||||
|
||||
let config = server::ApiConfig::new(
|
||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED);
|
||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PRIVILEGED)?;
|
||||
|
||||
let rest_server = RestServer::new(config);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::unistd::{fork, ForkResult, pipe};
|
||||
use std::os::unix::io::RawFd;
|
||||
use chrono::{Local, DateTime, Utc, TimeZone};
|
||||
@ -1424,11 +1424,30 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
param["backup-type"] = group.backup_type().into();
|
||||
param["backup-id"] = group.backup_id().into();
|
||||
|
||||
let result = client.post(&path, Some(param)).await?;
|
||||
let mut result = client.post(&path, Some(param)).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
view_task_result(client, result, &output_format).await?;
|
||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.sortby("backup-type", false)
|
||||
.sortby("backup-id", false)
|
||||
.sortby("backup-time", false)
|
||||
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
|
||||
.column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
|
||||
.column(ColumnConfig::new("keep"))
|
||||
;
|
||||
|
||||
let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
|
||||
|
||||
let mut data = result["data"].take();
|
||||
|
||||
format_and_print_result_full(&mut data, info, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -1852,7 +1871,9 @@ fn key_mgmt_cli() -> CliCommandMap {
|
||||
|
||||
const KDF_SCHEMA: Schema =
|
||||
StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
|
||||
.format(&ApiStringFormat::Enum(&["scrypt", "none"]))
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("scrypt", "SCrypt"),
|
||||
EnumEntry::new("none", "Do not encrypt the key")]))
|
||||
.default("scrypt")
|
||||
.schema();
|
||||
|
||||
@ -2400,7 +2421,8 @@ fn main() {
|
||||
.insert("catalog", catalog_mgmt_cli())
|
||||
.insert("task", task_mgmt_cli());
|
||||
|
||||
run_cli_command(cmd_def, Some(|future| {
|
||||
let rpcenv = CliEnvironment::new();
|
||||
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
||||
proxmox_backup::tools::runtime::main(future)
|
||||
}));
|
||||
}
|
||||
|
@ -1,19 +1,20 @@
|
||||
use std::path::PathBuf;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{format_err, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment};
|
||||
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::config::{self, remote::{self, Remote}};
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::tools::ticket::*;
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
|
||||
mod proxmox_backup_manager;
|
||||
use proxmox_backup_manager::*;
|
||||
|
||||
async fn view_task_result(
|
||||
client: HttpClient,
|
||||
result: Value,
|
||||
@ -51,88 +52,6 @@ fn connect() -> Result<HttpClient, Error> {
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List configured remotes.
|
||||
fn list_remotes(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::remote::API_METHOD_LIST_REMOTES;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("host"))
|
||||
.column(ColumnConfig::new("userid"))
|
||||
.column(ColumnConfig::new("fingerprint"))
|
||||
.column(ColumnConfig::new("comment"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn remote_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_REMOTES))
|
||||
.insert(
|
||||
"create",
|
||||
// fixme: howto handle password parameter?
|
||||
CliCommand::new(&api2::config::remote::API_METHOD_CREATE_REMOTE)
|
||||
.arg_param(&["name"])
|
||||
)
|
||||
.insert(
|
||||
"update",
|
||||
CliCommand::new(&api2::config::remote::API_METHOD_UPDATE_REMOTE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::remote::complete_remote_name)
|
||||
)
|
||||
.insert(
|
||||
"remove",
|
||||
CliCommand::new(&api2::config::remote::API_METHOD_DELETE_REMOTE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::remote::complete_remote_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
fn datastore_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&api2::config::datastore::API_METHOD_LIST_DATASTORES))
|
||||
.insert("create",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_CREATE_DATASTORE)
|
||||
.arg_param(&["name", "path"])
|
||||
)
|
||||
.insert("update",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_UPDATE_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
)
|
||||
.insert("remove",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -328,97 +247,6 @@ fn task_mgmt_cli() -> CommandLineInterface {
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
|
||||
let mut parts = Vec::new();
|
||||
for entry in name.entries() {
|
||||
parts.push(format!("{} = {}", entry.object().nid().short_name()?, entry.data().as_utf8()?));
|
||||
}
|
||||
Ok(parts.join(", "))
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Diplay node certificate information.
|
||||
fn cert_info() -> Result<(), Error> {
|
||||
|
||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
||||
|
||||
let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
|
||||
|
||||
let cert = openssl::x509::X509::from_pem(&cert_pem)?;
|
||||
|
||||
println!("Subject: {}", x509name_to_string(cert.subject_name())?);
|
||||
|
||||
if let Some(san) = cert.subject_alt_names() {
|
||||
for name in san.iter() {
|
||||
if let Some(v) = name.dnsname() {
|
||||
println!(" DNS:{}", v);
|
||||
} else if let Some(v) = name.ipaddress() {
|
||||
println!(" IP:{:?}", v);
|
||||
} else if let Some(v) = name.email() {
|
||||
println!(" EMAIL:{}", v);
|
||||
} else if let Some(v) = name.uri() {
|
||||
println!(" URI:{}", v);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("Issuer: {}", x509name_to_string(cert.issuer_name())?);
|
||||
println!("Validity:");
|
||||
println!(" Not Before: {}", cert.not_before());
|
||||
println!(" Not After : {}", cert.not_after());
|
||||
|
||||
let fp = cert.digest(openssl::hash::MessageDigest::sha256())?;
|
||||
let fp_string = proxmox::tools::digest_to_hex(&fp);
|
||||
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
|
||||
.collect::<Vec<&str>>().join(":");
|
||||
|
||||
println!("Fingerprint (sha256): {}", fp_string);
|
||||
|
||||
let pubkey = cert.public_key()?;
|
||||
println!("Public key type: {}", openssl::nid::Nid::from_raw(pubkey.id().as_raw()).long_name()?);
|
||||
println!("Public key bits: {}", pubkey.bits());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
force: {
|
||||
description: "Force generation of new SSL certifate.",
|
||||
type: Boolean,
|
||||
optional:true,
|
||||
},
|
||||
}
|
||||
},
|
||||
)]
|
||||
/// Update node certificates and generate all needed files/directories.
|
||||
fn update_certs(force: Option<bool>) -> Result<(), Error> {
|
||||
|
||||
config::create_configdir()?;
|
||||
|
||||
if let Err(err) = generate_auth_key() {
|
||||
bail!("unable to generate auth key - {}", err);
|
||||
}
|
||||
|
||||
if let Err(err) = generate_csrf_key() {
|
||||
bail!("unable to generate csrf key - {}", err);
|
||||
}
|
||||
|
||||
config::update_self_signed_cert(force.unwrap_or(false))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn cert_mgmt_cli() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("info", CliCommand::new(&API_METHOD_CERT_INFO))
|
||||
.insert("update", CliCommand::new(&API_METHOD_UPDATE_CERTS));
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
// fixme: avoid API redefinition
|
||||
#[api(
|
||||
input: {
|
||||
@ -478,10 +306,15 @@ async fn pull_datastore(
|
||||
fn main() {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("acl", acl_commands())
|
||||
.insert("datastore", datastore_commands())
|
||||
.insert("dns", dns_commands())
|
||||
.insert("network", network_commands())
|
||||
.insert("user", user_commands())
|
||||
.insert("remote", remote_commands())
|
||||
.insert("garbage-collection", garbage_collection_commands())
|
||||
.insert("cert", cert_mgmt_cli())
|
||||
.insert("sync-job", sync_job_commands())
|
||||
.insert("task", task_mgmt_cli())
|
||||
.insert(
|
||||
"pull",
|
||||
@ -492,7 +325,10 @@ fn main() {
|
||||
.completion_cb("remote-store", complete_remote_datastore_name)
|
||||
);
|
||||
|
||||
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def));
|
||||
let mut rpcenv = CliEnvironment::new();
|
||||
rpcenv.set_user(Some(String::from("root@pam")));
|
||||
|
||||
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def, rpcenv));
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
@ -502,9 +338,9 @@ pub fn complete_remote_datastore_name(_arg: &str, param: &HashMap<String, String
|
||||
|
||||
let _ = proxmox::try_block!({
|
||||
let remote = param.get("remote").ok_or_else(|| format_err!("no remote"))?;
|
||||
let (remote_config, _digest) = remote::config()?;
|
||||
let (remote_config, _digest) = config::remote::config()?;
|
||||
|
||||
let remote: Remote = remote_config.lookup("remote", &remote)?;
|
||||
let remote: config::remote::Remote = remote_config.lookup("remote", &remote)?;
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
|
@ -1,6 +1,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper;
|
||||
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
||||
@ -34,7 +34,7 @@ async fn run() -> Result<(), Error> {
|
||||
let _ = csrf_secret(); // load with lazy_static
|
||||
|
||||
let mut config = ApiConfig::new(
|
||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC);
|
||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
|
||||
|
||||
// add default dirs which includes jquery and bootstrap
|
||||
// my $base = '/usr/share/libpve-http-server-perl';
|
||||
@ -107,6 +107,9 @@ async fn run() -> Result<(), Error> {
|
||||
bail!("unable to start daemon - {}", err);
|
||||
}
|
||||
|
||||
start_task_scheduler();
|
||||
start_stat_generator();
|
||||
|
||||
server.await?;
|
||||
log::info!("server shutting down, waiting for active workers to complete");
|
||||
proxmox_backup::server::last_worker_future().await?;
|
||||
@ -114,3 +117,606 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start_stat_generator() {
|
||||
let abort_future = server::shutdown_future();
|
||||
let future = Box::pin(run_stat_generator());
|
||||
let task = futures::future::select(future, abort_future);
|
||||
tokio::spawn(task.map(|_| ()));
|
||||
}
|
||||
|
||||
fn start_task_scheduler() {
|
||||
let abort_future = server::shutdown_future();
|
||||
let future = Box::pin(run_task_scheduler());
|
||||
let task = futures::future::select(future, abort_future);
|
||||
tokio::spawn(task.map(|_| ()));
|
||||
}
|
||||
|
||||
use std::time:: {Instant, Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn next_minute() -> Result<Instant, Error> {
|
||||
let epoch_now = SystemTime::now().duration_since(UNIX_EPOCH)?;
|
||||
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
|
||||
Ok(Instant::now() + epoch_next - epoch_now)
|
||||
}
|
||||
|
||||
async fn run_task_scheduler() {
|
||||
|
||||
let mut count: usize = 0;
|
||||
|
||||
loop {
|
||||
count += 1;
|
||||
|
||||
let delay_target = match next_minute() { // try to run very minute
|
||||
Ok(d) => d,
|
||||
Err(err) => {
|
||||
eprintln!("task scheduler: compute next minute failed - {}", err);
|
||||
tokio::time::delay_until(tokio::time::Instant::from_std(Instant::now() + Duration::from_secs(60))).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if count > 2 { // wait 1..2 minutes before starting
|
||||
match schedule_tasks().catch_unwind().await {
|
||||
Err(panic) => {
|
||||
match panic.downcast::<&str>() {
|
||||
Ok(msg) => {
|
||||
eprintln!("task scheduler panic: {}", msg);
|
||||
}
|
||||
Err(_) => {
|
||||
eprintln!("task scheduler panic - unknown type");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
eprintln!("task scheduler failed - {:?}", err);
|
||||
}
|
||||
Ok(Ok(_)) => {}
|
||||
}
|
||||
}
|
||||
|
||||
tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_tasks() -> Result<(), Error> {
|
||||
|
||||
schedule_datastore_garbage_collection().await;
|
||||
schedule_datastore_prune().await;
|
||||
schedule_datastore_sync_jobs().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> {
|
||||
|
||||
let list = proxmox_backup::server::read_task_list()?;
|
||||
|
||||
let mut last: Option<&server::UPID> = None;
|
||||
|
||||
for entry in list.iter() {
|
||||
if entry.upid.worker_type == worker_type {
|
||||
if let Some(ref id) = entry.upid.worker_id {
|
||||
if id == worker_id {
|
||||
match last {
|
||||
Some(ref upid) => {
|
||||
if upid.starttime < entry.upid.starttime {
|
||||
last = Some(&entry.upid)
|
||||
}
|
||||
}
|
||||
None => {
|
||||
last = Some(&entry.upid)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(last.cloned())
|
||||
}
|
||||
|
||||
|
||||
async fn schedule_datastore_garbage_collection() {
|
||||
|
||||
use proxmox_backup::backup::DataStore;
|
||||
use proxmox_backup::server::{UPID, WorkerTask};
|
||||
use proxmox_backup::config::datastore::{self, DataStoreConfig};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
let config = match datastore::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read datastore config - {}", err);
|
||||
return;
|
||||
}
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
for (store, (_, store_config)) in config.sections {
|
||||
let datastore = match DataStore::lookup_datastore(&store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_datastore failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
|
||||
Ok(c) => c,
|
||||
Err(err) => {
|
||||
eprintln!("datastore config from_value failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let event_str = match store_config.gc_schedule {
|
||||
Some(event_str) => event_str,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let event = match parse_calendar_event(&event_str) {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if datastore.garbage_collection_running() { continue; }
|
||||
|
||||
let worker_type = "garbage_collection";
|
||||
|
||||
let stat = datastore.last_gc_status();
|
||||
let last = if let Some(upid_str) = stat.upid {
|
||||
match upid_str.parse::<UPID>() {
|
||||
Ok(upid) => upid.starttime,
|
||||
Err(err) => {
|
||||
eprintln!("unable to parse upid '{}' - {}", upid_str, err);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match lookup_last_worker(worker_type, &store) {
|
||||
Ok(Some(upid)) => upid.starttime,
|
||||
Ok(None) => 0,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(next) => next,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if next > now { continue; }
|
||||
|
||||
let store2 = store.clone();
|
||||
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(store.clone()),
|
||||
"backup@pam",
|
||||
false,
|
||||
move |worker| {
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
datastore.garbage_collection(&worker)
|
||||
}
|
||||
) {
|
||||
eprintln!("unable to start garbage collection on store {} - {}", store2, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_datastore_prune() {
|
||||
|
||||
use proxmox_backup::backup::{
|
||||
PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info};
|
||||
use proxmox_backup::server::{WorkerTask};
|
||||
use proxmox_backup::config::datastore::{self, DataStoreConfig};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
let config = match datastore::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read datastore config - {}", err);
|
||||
return;
|
||||
}
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
for (store, (_, store_config)) in config.sections {
|
||||
let datastore = match DataStore::lookup_datastore(&store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_datastore '{}' failed - {}", store, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
|
||||
Ok(c) => c,
|
||||
Err(err) => {
|
||||
eprintln!("datastore '{}' config from_value failed - {}", store, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let event_str = match store_config.prune_schedule {
|
||||
Some(event_str) => event_str,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let prune_options = PruneOptions {
|
||||
keep_last: store_config.keep_last,
|
||||
keep_hourly: store_config.keep_hourly,
|
||||
keep_daily: store_config.keep_daily,
|
||||
keep_weekly: store_config.keep_weekly,
|
||||
keep_monthly: store_config.keep_monthly,
|
||||
keep_yearly: store_config.keep_yearly,
|
||||
};
|
||||
|
||||
if !prune_options.keeps_something() { // no prune settings - keep all
|
||||
continue;
|
||||
}
|
||||
|
||||
let event = match parse_calendar_event(&event_str) {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
//fixme: if last_prune_job_stzill_running { continue; }
|
||||
|
||||
let worker_type = "prune";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &store) {
|
||||
Ok(Some(upid)) => upid.starttime,
|
||||
Ok(None) => 0,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(next) => next,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if next > now { continue; }
|
||||
|
||||
let store2 = store.clone();
|
||||
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(store.clone()),
|
||||
"backup@pam",
|
||||
false,
|
||||
move |worker| {
|
||||
worker.log(format!("Starting datastore prune on store \"{}\"", store));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
||||
|
||||
let base_path = datastore.base_path();
|
||||
|
||||
let groups = BackupGroup::list_groups(&base_path)?;
|
||||
for group in groups {
|
||||
let list = group.list_backups(&base_path)?;
|
||||
let mut prune_info = compute_prune_info(list, &prune_options)?;
|
||||
prune_info.reverse(); // delete older snapshots first
|
||||
|
||||
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
|
||||
store, group.backup_type(), group.backup_id()));
|
||||
|
||||
for (info, keep) in prune_info {
|
||||
worker.log(format!(
|
||||
"{} {}/{}/{}",
|
||||
if keep { "keep" } else { "remove" },
|
||||
group.backup_type(), group.backup_id(),
|
||||
BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
|
||||
|
||||
if !keep {
|
||||
datastore.remove_backup_dir(&info.backup_dir)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
) {
|
||||
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_datastore_sync_jobs() {
|
||||
|
||||
use proxmox_backup::{
|
||||
backup::DataStore,
|
||||
client::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store },
|
||||
server::{ WorkerTask },
|
||||
config::{ sync::{self, SyncJobConfig}, remote::{self, Remote} },
|
||||
tools::systemd::time::{ parse_calendar_event, compute_next_event },
|
||||
};
|
||||
|
||||
let config = match sync::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read sync job config - {}", err);
|
||||
return;
|
||||
}
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
let remote_config = match remote::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read remote config - {}", err);
|
||||
return;
|
||||
}
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
for (job_id, (_, job_config)) in config.sections {
|
||||
let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
|
||||
Ok(c) => c,
|
||||
Err(err) => {
|
||||
eprintln!("sync job config from_value failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let event_str = match job_config.schedule {
|
||||
Some(ref event_str) => event_str.clone(),
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let event = match parse_calendar_event(&event_str) {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
//fixme: if last_sync_job_still_running { continue; }
|
||||
|
||||
let worker_type = "sync";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &job_config.store) {
|
||||
Ok(Some(upid)) => upid.starttime,
|
||||
Ok(None) => 0,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(next) => next,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if next > now { continue; }
|
||||
|
||||
|
||||
let job_id2 = job_id.clone();
|
||||
|
||||
let tgt_store = match DataStore::lookup_datastore(&job_config.store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_datastore '{}' failed - {}", job_config.store, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let remote: Remote = match remote_config.lookup("remote", &job_config.remote) {
|
||||
Ok(remote) => remote,
|
||||
Err(err) => {
|
||||
eprintln!("remote_config lookup failed: {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let username = String::from("backup@pam");
|
||||
|
||||
let delete = job_config.remove_vanished.unwrap_or(true);
|
||||
|
||||
if let Err(err) = WorkerTask::spawn(
|
||||
worker_type,
|
||||
Some(job_config.store.clone()),
|
||||
&username.clone(),
|
||||
false,
|
||||
move |worker| async move {
|
||||
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
worker.log(format!("Sync datastore '{}' from '{}/{}'",
|
||||
job_config.store, job_config.remote, job_config.remote_store));
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
.fingerprint(remote.fingerprint.clone());
|
||||
|
||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
|
||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), job_config.remote_store);
|
||||
|
||||
pull_store(&worker, &client, &src_repo, tgt_store, delete, username).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
) {
|
||||
eprintln!("unable to start datastore sync job {} - {}", job_id2, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_stat_generator() {
|
||||
|
||||
loop {
|
||||
let delay_target = Instant::now() + Duration::from_secs(10);
|
||||
|
||||
generate_host_stats().await;
|
||||
|
||||
tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fn rrd_update_gauge(name: &str, value: f64) {
|
||||
use proxmox_backup::rrd;
|
||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge) {
|
||||
eprintln!("rrd::update_value '{}' failed - {}", name, err);
|
||||
}
|
||||
}
|
||||
|
||||
fn rrd_update_derive(name: &str, value: f64) {
|
||||
use proxmox_backup::rrd;
|
||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive) {
|
||||
eprintln!("rrd::update_value '{}' failed - {}", name, err);
|
||||
}
|
||||
}
|
||||
|
||||
async fn generate_host_stats() {
|
||||
use proxmox::sys::linux::procfs::{
|
||||
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
|
||||
use proxmox_backup::config::datastore;
|
||||
|
||||
proxmox_backup::tools::runtime::block_in_place(move || {
|
||||
|
||||
match read_proc_stat() {
|
||||
Ok(stat) => {
|
||||
rrd_update_gauge("host/cpu", stat.cpu);
|
||||
rrd_update_gauge("host/iowait", stat.iowait_percent);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_proc_stat failed - {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
match read_meminfo() {
|
||||
Ok(meminfo) => {
|
||||
rrd_update_gauge("host/memtotal", meminfo.memtotal as f64);
|
||||
rrd_update_gauge("host/memused", meminfo.memused as f64);
|
||||
rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64);
|
||||
rrd_update_gauge("host/swapused", meminfo.swapused as f64);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_meminfo failed - {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
match read_proc_net_dev() {
|
||||
Ok(netdev) => {
|
||||
use proxmox_backup::config::network::is_physical_nic;
|
||||
let mut netin = 0;
|
||||
let mut netout = 0;
|
||||
for item in netdev {
|
||||
if !is_physical_nic(&item.device) { continue; }
|
||||
netin += item.receive;
|
||||
netout += item.send;
|
||||
}
|
||||
rrd_update_derive("host/netin", netin as f64);
|
||||
rrd_update_derive("host/netout", netout as f64);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_prox_net_dev failed - {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
match read_loadavg() {
|
||||
Ok(loadavg) => {
|
||||
rrd_update_gauge("host/loadavg", loadavg.0 as f64);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_loadavg failed - {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
match disk_usage(std::path::Path::new("/")) {
|
||||
Ok((total, used, _avail)) => {
|
||||
rrd_update_gauge("host/roottotal", total as f64);
|
||||
rrd_update_gauge("host/rootused", used as f64);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read root disk_usage failed - {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
match datastore::config() {
|
||||
Ok((config, _)) => {
|
||||
let datastore_list: Vec<datastore::DataStoreConfig> =
|
||||
config.convert_to_typed_array("datastore").unwrap_or(Vec::new());
|
||||
|
||||
for config in datastore_list {
|
||||
match disk_usage(std::path::Path::new(&config.path)) {
|
||||
Ok((total, used, _avail)) => {
|
||||
let rrd_key = format!("datastore/{}", config.name);
|
||||
rrd_update_gauge(&rrd_key, total as f64);
|
||||
rrd_update_gauge(&rrd_key, used as f64);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read disk_usage on {:?} failed - {}", config.path, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read datastore config failed - {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
// Returns (total, used, avail)
|
||||
fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
|
||||
|
||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
||||
|
||||
use nix::NixPath;
|
||||
|
||||
let res = path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
|
||||
nix::errno::Errno::result(res)?;
|
||||
|
||||
let bsize = stat.f_bsize as u64;
|
||||
|
||||
Ok((stat.f_blocks*bsize, (stat.f_blocks-stat.f_bfree)*bsize, stat.f_bavail*bsize))
|
||||
}
|
||||
|
69
src/bin/proxmox_backup_manager/acl.rs
Normal file
69
src/bin/proxmox_backup_manager/acl.rs
Normal file
@ -0,0 +1,69 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Access Control list.
|
||||
fn list_acls(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::access::acl::API_METHOD_READ_ACL;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
fn render_ugid(value: &Value, record: &Value) -> Result<String, Error> {
|
||||
if value.is_null() { return Ok(String::new()); }
|
||||
let ugid = value.as_str().unwrap();
|
||||
let ugid_type = record["ugid_type"].as_str().unwrap();
|
||||
|
||||
if ugid_type == "user" {
|
||||
Ok(ugid.to_string())
|
||||
} else if ugid_type == "group" {
|
||||
Ok(format!("@{}", ugid))
|
||||
} else {
|
||||
bail!("render_ugid: got unknown ugid_type");
|
||||
}
|
||||
}
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("ugid").renderer(render_ugid))
|
||||
.column(ColumnConfig::new("path"))
|
||||
.column(ColumnConfig::new("propagate"))
|
||||
.column(ColumnConfig::new("roleid"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn acl_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_ACLS))
|
||||
.insert(
|
||||
"update",
|
||||
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
|
||||
.arg_param(&["path", "role"])
|
||||
.completion_cb("userid", config::user::complete_user_name)
|
||||
.completion_cb("path", config::datastore::complete_acl_path)
|
||||
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
100
src/bin/proxmox_backup_manager/cert.rs
Normal file
100
src/bin/proxmox_backup_manager/cert.rs
Normal file
@ -0,0 +1,100 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox::api::{api, cli::*};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
|
||||
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
|
||||
let mut parts = Vec::new();
|
||||
for entry in name.entries() {
|
||||
parts.push(format!("{} = {}", entry.object().nid().short_name()?, entry.data().as_utf8()?));
|
||||
}
|
||||
Ok(parts.join(", "))
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Diplay node certificate information.
|
||||
fn cert_info() -> Result<(), Error> {
|
||||
|
||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
||||
|
||||
let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
|
||||
|
||||
let cert = openssl::x509::X509::from_pem(&cert_pem)?;
|
||||
|
||||
println!("Subject: {}", x509name_to_string(cert.subject_name())?);
|
||||
|
||||
if let Some(san) = cert.subject_alt_names() {
|
||||
for name in san.iter() {
|
||||
if let Some(v) = name.dnsname() {
|
||||
println!(" DNS:{}", v);
|
||||
} else if let Some(v) = name.ipaddress() {
|
||||
println!(" IP:{:?}", v);
|
||||
} else if let Some(v) = name.email() {
|
||||
println!(" EMAIL:{}", v);
|
||||
} else if let Some(v) = name.uri() {
|
||||
println!(" URI:{}", v);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("Issuer: {}", x509name_to_string(cert.issuer_name())?);
|
||||
println!("Validity:");
|
||||
println!(" Not Before: {}", cert.not_before());
|
||||
println!(" Not After : {}", cert.not_after());
|
||||
|
||||
let fp = cert.digest(openssl::hash::MessageDigest::sha256())?;
|
||||
let fp_string = proxmox::tools::digest_to_hex(&fp);
|
||||
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
|
||||
.collect::<Vec<&str>>().join(":");
|
||||
|
||||
println!("Fingerprint (sha256): {}", fp_string);
|
||||
|
||||
let pubkey = cert.public_key()?;
|
||||
println!("Public key type: {}", openssl::nid::Nid::from_raw(pubkey.id().as_raw()).long_name()?);
|
||||
println!("Public key bits: {}", pubkey.bits());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
force: {
|
||||
description: "Force generation of new SSL certifate.",
|
||||
type: Boolean,
|
||||
optional:true,
|
||||
},
|
||||
}
|
||||
},
|
||||
)]
|
||||
/// Update node certificates and generate all needed files/directories.
|
||||
fn update_certs(force: Option<bool>) -> Result<(), Error> {
|
||||
|
||||
config::create_configdir()?;
|
||||
|
||||
if let Err(err) = generate_auth_key() {
|
||||
bail!("unable to generate auth key - {}", err);
|
||||
}
|
||||
|
||||
if let Err(err) = generate_csrf_key() {
|
||||
bail!("unable to generate csrf key - {}", err);
|
||||
}
|
||||
|
||||
config::update_self_signed_cert(force.unwrap_or(false))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cert_mgmt_cli() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("info", CliCommand::new(&API_METHOD_CERT_INFO))
|
||||
.insert("update", CliCommand::new(&API_METHOD_UPDATE_CERTS));
|
||||
|
||||
cmd_def.into()
|
||||
}
|
97
src/bin/proxmox_backup_manager/datastore.rs
Normal file
97
src/bin/proxmox_backup_manager/datastore.rs
Normal file
@ -0,0 +1,97 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Datastore list.
|
||||
fn list_datastores(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::datastore::API_METHOD_LIST_DATASTORES;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("path"))
|
||||
.column(ColumnConfig::new("comment"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Show datastore configuration
|
||||
fn show_datastore(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::datastore::API_METHOD_READ_DATASTORE;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options();
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn datastore_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES))
|
||||
.insert("show",
|
||||
CliCommand::new(&API_METHOD_SHOW_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
)
|
||||
.insert("create",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_CREATE_DATASTORE)
|
||||
.arg_param(&["name", "path"])
|
||||
)
|
||||
.insert("update",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_UPDATE_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
.completion_cb("gc-schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("prune-schedule", config::datastore::complete_calendar_event)
|
||||
)
|
||||
.insert("remove",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
57
src/bin/proxmox_backup_manager/dns.rs
Normal file
57
src/bin/proxmox_backup_manager/dns.rs
Normal file
@ -0,0 +1,57 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::api2;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Read DNS settings
|
||||
fn get_dns(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::dns::API_METHOD_GET_DNS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("search"))
|
||||
.column(ColumnConfig::new("dns1"))
|
||||
.column(ColumnConfig::new("dns2"))
|
||||
.column(ColumnConfig::new("dns3"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn dns_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert(
|
||||
"get",
|
||||
CliCommand::new(&API_METHOD_GET_DNS)
|
||||
)
|
||||
.insert(
|
||||
"set",
|
||||
CliCommand::new(&api2::node::dns::API_METHOD_UPDATE_DNS)
|
||||
.fixed_param("node", String::from("localhost"))
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
16
src/bin/proxmox_backup_manager/mod.rs
Normal file
16
src/bin/proxmox_backup_manager/mod.rs
Normal file
@ -0,0 +1,16 @@
|
||||
mod acl;
|
||||
pub use acl::*;
|
||||
mod cert;
|
||||
pub use cert::*;
|
||||
mod datastore;
|
||||
pub use datastore::*;
|
||||
mod dns;
|
||||
pub use dns::*;
|
||||
mod network;
|
||||
pub use network::*;
|
||||
mod remote;
|
||||
pub use remote::*;
|
||||
mod sync;
|
||||
pub use sync::*;
|
||||
mod user;
|
||||
pub use user::*;
|
162
src/bin/proxmox_backup_manager/network.rs
Normal file
162
src/bin/proxmox_backup_manager/network.rs
Normal file
@ -0,0 +1,162 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Network device list.
|
||||
fn list_network_devices(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::network::API_METHOD_LIST_NETWORK_DEVICES;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
if let Value::String(ref diff) = rpcenv["changes"] {
|
||||
if output_format == "text" {
|
||||
eprintln!("pending changes:\n{}\n", diff);
|
||||
}
|
||||
}
|
||||
|
||||
fn render_address(_value: &Value, record: &Value) -> Result<String, Error> {
|
||||
let mut text = String::new();
|
||||
|
||||
if let Some(cidr) = record["cidr"].as_str() {
|
||||
text.push_str(cidr);
|
||||
}
|
||||
if let Some(cidr) = record["cidr6"].as_str() {
|
||||
if !text.is_empty() { text.push('\n'); }
|
||||
text.push_str(cidr);
|
||||
}
|
||||
|
||||
Ok(text)
|
||||
}
|
||||
|
||||
fn render_ports(_value: &Value, record: &Value) -> Result<String, Error> {
|
||||
let mut text = String::new();
|
||||
|
||||
if let Some(ports) = record["bridge_ports"].as_array() {
|
||||
let list: Vec<&str> = ports.iter().filter_map(|v| v.as_str()).collect();
|
||||
text.push_str(&list.join(" "));
|
||||
}
|
||||
if let Some(slaves) = record["slaves"].as_array() {
|
||||
let list: Vec<&str> = slaves.iter().filter_map(|v| v.as_str()).collect();
|
||||
text.push_str(&list.join(" "));
|
||||
}
|
||||
|
||||
Ok(text)
|
||||
}
|
||||
|
||||
fn render_gateway(_value: &Value, record: &Value) -> Result<String, Error> {
|
||||
let mut text = String::new();
|
||||
|
||||
if let Some(gateway) = record["gateway"].as_str() {
|
||||
text.push_str(gateway);
|
||||
}
|
||||
if let Some(gateway) = record["gateway6"].as_str() {
|
||||
if !text.is_empty() { text.push('\n'); }
|
||||
text.push_str(gateway);
|
||||
}
|
||||
|
||||
Ok(text)
|
||||
}
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("type").header("type"))
|
||||
.column(ColumnConfig::new("autostart"))
|
||||
.column(ColumnConfig::new("method"))
|
||||
.column(ColumnConfig::new("method6"))
|
||||
.column(ColumnConfig::new("cidr").header("address").renderer(render_address))
|
||||
.column(ColumnConfig::new("gateway").header("gateway").renderer(render_gateway))
|
||||
.column(ColumnConfig::new("bridge_ports").header("ports/slaves").renderer(render_ports));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
/// Show pending configuration changes (diff)
|
||||
fn pending_network_changes(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::network::API_METHOD_LIST_NETWORK_DEVICES;
|
||||
let _data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
if let Value::String(ref diff) = rpcenv["changes"] {
|
||||
println!("{}", diff);
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn network_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert(
|
||||
"list",
|
||||
CliCommand::new(&API_METHOD_LIST_NETWORK_DEVICES)
|
||||
)
|
||||
.insert(
|
||||
"changes",
|
||||
CliCommand::new(&API_METHOD_PENDING_NETWORK_CHANGES)
|
||||
)
|
||||
.insert(
|
||||
"create",
|
||||
CliCommand::new(&api2::node::network::API_METHOD_CREATE_INTERFACE)
|
||||
.fixed_param("node", String::from("localhost"))
|
||||
.arg_param(&["iface"])
|
||||
.completion_cb("iface", config::network::complete_interface_name)
|
||||
.completion_cb("bridge_ports", config::network::complete_port_list)
|
||||
.completion_cb("slaves", config::network::complete_port_list)
|
||||
)
|
||||
.insert(
|
||||
"update",
|
||||
CliCommand::new(&api2::node::network::API_METHOD_UPDATE_INTERFACE)
|
||||
.fixed_param("node", String::from("localhost"))
|
||||
.arg_param(&["iface"])
|
||||
.completion_cb("iface", config::network::complete_interface_name)
|
||||
.completion_cb("bridge_ports", config::network::complete_port_list)
|
||||
.completion_cb("slaves", config::network::complete_port_list)
|
||||
)
|
||||
.insert(
|
||||
"remove",
|
||||
CliCommand::new(&api2::node::network::API_METHOD_DELETE_INTERFACE)
|
||||
.fixed_param("node", String::from("localhost"))
|
||||
.arg_param(&["iface"])
|
||||
.completion_cb("iface", config::network::complete_interface_name)
|
||||
)
|
||||
.insert(
|
||||
"revert",
|
||||
CliCommand::new(&api2::node::network::API_METHOD_REVERT_NETWORK_CONFIG)
|
||||
.fixed_param("node", String::from("localhost"))
|
||||
)
|
||||
.insert(
|
||||
"reload",
|
||||
CliCommand::new(&api2::node::network::API_METHOD_RELOAD_NETWORK_CONFIG)
|
||||
.fixed_param("node", String::from("localhost"))
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
102
src/bin/proxmox_backup_manager/remote.rs
Normal file
102
src/bin/proxmox_backup_manager/remote.rs
Normal file
@ -0,0 +1,102 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List configured remotes.
|
||||
fn list_remotes(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::remote::API_METHOD_LIST_REMOTES;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("host"))
|
||||
.column(ColumnConfig::new("userid"))
|
||||
.column(ColumnConfig::new("fingerprint"))
|
||||
.column(ColumnConfig::new("comment"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Show remote configuration
|
||||
fn show_remote(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::remote::API_METHOD_READ_REMOTE;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options();
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn remote_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_REMOTES))
|
||||
.insert(
|
||||
"show",
|
||||
CliCommand::new(&API_METHOD_SHOW_REMOTE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::remote::complete_remote_name)
|
||||
)
|
||||
.insert(
|
||||
"create",
|
||||
// fixme: howto handle password parameter?
|
||||
CliCommand::new(&api2::config::remote::API_METHOD_CREATE_REMOTE)
|
||||
.arg_param(&["name"])
|
||||
)
|
||||
.insert(
|
||||
"update",
|
||||
CliCommand::new(&api2::config::remote::API_METHOD_UPDATE_REMOTE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::remote::complete_remote_name)
|
||||
)
|
||||
.insert(
|
||||
"remove",
|
||||
CliCommand::new(&api2::config::remote::API_METHOD_DELETE_REMOTE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::remote::complete_remote_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
106
src/bin/proxmox_backup_manager/sync.rs
Normal file
106
src/bin/proxmox_backup_manager/sync.rs
Normal file
@ -0,0 +1,106 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Sync job list.
|
||||
fn list_sync_jobs(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::sync::API_METHOD_LIST_SYNC_JOBS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("id"))
|
||||
.column(ColumnConfig::new("store"))
|
||||
.column(ColumnConfig::new("remote"))
|
||||
.column(ColumnConfig::new("remote-store"))
|
||||
.column(ColumnConfig::new("schedule"))
|
||||
.column(ColumnConfig::new("comment"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Show sync job configuration
|
||||
fn show_sync_job(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::config::sync::API_METHOD_READ_SYNC_JOB;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options();
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn sync_job_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_SYNC_JOBS))
|
||||
.insert("show",
|
||||
CliCommand::new(&API_METHOD_SHOW_SYNC_JOB)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", config::sync::complete_sync_job_id)
|
||||
)
|
||||
.insert("create",
|
||||
CliCommand::new(&api2::config::sync::API_METHOD_CREATE_SYNC_JOB)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", config::sync::complete_sync_job_id)
|
||||
.completion_cb("schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("remote", config::remote::complete_remote_name)
|
||||
.completion_cb("remote-store", crate::complete_remote_datastore_name)
|
||||
)
|
||||
.insert("update",
|
||||
CliCommand::new(&api2::config::sync::API_METHOD_UPDATE_SYNC_JOB)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", config::sync::complete_sync_job_id)
|
||||
.completion_cb("schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("remote-store", crate::complete_remote_datastore_name)
|
||||
)
|
||||
.insert("remove",
|
||||
CliCommand::new(&api2::config::sync::API_METHOD_DELETE_SYNC_JOB)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", config::sync::complete_sync_job_id)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
75
src/bin/proxmox_backup_manager/user.rs
Normal file
75
src/bin/proxmox_backup_manager/user.rs
Normal file
@ -0,0 +1,75 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::api2;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List configured users.
|
||||
fn list_users(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let info = &api2::access::user::API_METHOD_LIST_USERS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("userid"))
|
||||
.column(
|
||||
ColumnConfig::new("enable")
|
||||
.renderer(tools::format::render_bool_with_default_true)
|
||||
)
|
||||
.column(
|
||||
ColumnConfig::new("expire")
|
||||
.renderer(tools::format::render_epoch)
|
||||
)
|
||||
.column(ColumnConfig::new("firstname"))
|
||||
.column(ColumnConfig::new("lastname"))
|
||||
.column(ColumnConfig::new("email"))
|
||||
.column(ColumnConfig::new("comment"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn user_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&&API_METHOD_LIST_USERS))
|
||||
.insert(
|
||||
"create",
|
||||
// fixme: howto handle password parameter?
|
||||
CliCommand::new(&api2::access::user::API_METHOD_CREATE_USER)
|
||||
.arg_param(&["userid"])
|
||||
)
|
||||
.insert(
|
||||
"update",
|
||||
CliCommand::new(&api2::access::user::API_METHOD_UPDATE_USER)
|
||||
.arg_param(&["userid"])
|
||||
.completion_cb("userid", config::user::complete_user_name)
|
||||
)
|
||||
.insert(
|
||||
"remove",
|
||||
CliCommand::new(&api2::access::user::API_METHOD_DELETE_USER)
|
||||
.arg_param(&["userid"])
|
||||
.completion_cb("userid", config::user::complete_user_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
extern crate proxmox_backup;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
||||
@ -499,12 +499,12 @@ fn main() {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("create", CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
|
||||
.arg_param(&["archive", "source", "exclude"])
|
||||
.arg_param(&["archive", "source"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("source", tools::complete_file_name)
|
||||
)
|
||||
.insert("extract", CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
|
||||
.arg_param(&["archive", "pattern"])
|
||||
.arg_param(&["archive", "target"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("target", tools::complete_file_name)
|
||||
.completion_cb("files-from", tools::complete_file_name)
|
||||
@ -519,5 +519,6 @@ fn main() {
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
);
|
||||
|
||||
run_cli_command(cmd_def, None);
|
||||
let rpcenv = CliEnvironment::new();
|
||||
run_cli_command(cmd_def, rpcenv, None);
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ extern crate proxmox_backup;
|
||||
|
||||
// also see https://www.johndcook.com/blog/standard_deviation/
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use proxmox_backup::backup::*;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
use futures::*;
|
||||
|
||||
extern crate proxmox_backup;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
|
@ -29,3 +29,5 @@ pub use pxar_decode_writer::*;
|
||||
|
||||
mod backup_repo;
|
||||
pub use backup_repo::*;
|
||||
|
||||
pub mod pull;
|
||||
|
@ -1,4 +1,4 @@
|
||||
use failure::*;
|
||||
use anyhow::{format_err, Error};
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::fs::File;
|
||||
use std::sync::Arc;
|
||||
|
@ -1,6 +1,6 @@
|
||||
use std::fmt;
|
||||
|
||||
use failure::*;
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::const_regex;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user