Compare commits
159 Commits
Author | SHA1 | Date | |
---|---|---|---|
cfe01b2e6a | |||
b19b032be3 | |||
5441708634 | |||
3c9b370255 | |||
510544770b | |||
e8293841c2 | |||
46114bf28e | |||
0d7e61f06f | |||
fd6a54dfbc | |||
1ea5722b8f | |||
bc8fadf494 | |||
a76934ad33 | |||
d7a122a026 | |||
6c25588e63 | |||
17a1f579d0 | |||
998db63933 | |||
c0fa14d94a | |||
6fd129844d | |||
baae780c99 | |||
09a1da25ed | |||
298c6aaef6 | |||
a329324139 | |||
a83e2ffeab | |||
5d7449a121 | |||
ebbe4958c6 | |||
73b2cc4977 | |||
7ecfde8150 | |||
796480a38b | |||
4ae6aede60 | |||
e0085e6612 | |||
194da6f867 | |||
3fade35260 | |||
5e39918fe1 | |||
f4dc47a805 | |||
12c65bacf1 | |||
ba37f3562d | |||
fce4659388 | |||
0a15870a82 | |||
9866de5e3d | |||
9d3f183ba9 | |||
fe233f3b3d | |||
be3bd0f90b | |||
3c053adbb5 | |||
c040ec22f7 | |||
43f627ba92 | |||
2b67de2e3f | |||
477859662a | |||
ccd7241e2f | |||
f37ef25bdd | |||
b93bbab454 | |||
9cebc837d5 | |||
1bc1d81a00 | |||
dda72456d7 | |||
8f2f3dd710 | |||
85959a99ea | |||
36700a0a87 | |||
dd4b42bac1 | |||
9626c28619 | |||
463c03462a | |||
a086427a7d | |||
4d431383d3 | |||
d10332a15d | |||
43772efc6e | |||
0af2da0437 | |||
d09db6c2e9 | |||
bc871bd19d | |||
b11a6a029d | |||
6a7be83efe | |||
58169da46a | |||
158f49e246 | |||
3e4a67f350 | |||
e0e5b4426a | |||
7158b304f5 | |||
833eca6d2f | |||
151acf5d96 | |||
4a363fb4a7 | |||
229adeb746 | |||
1eff9a1e89 | |||
ed4f0a0edc | |||
13bed6226e | |||
d937daedb3 | |||
8cce51135c | |||
0cfe1b3f13 | |||
05c16a6e59 | |||
3294b516d3 | |||
139bcedc53 | |||
cf9ea3c4c7 | |||
e84fde3e14 | |||
1de47507ff | |||
1a9948a488 | |||
04c2731349 | |||
5656888cc9 | |||
5fdc5a6f3d | |||
61d7b5013c | |||
871181d984 | |||
02939e178d | |||
3be308b949 | |||
83088644da | |||
14db8b52dc | |||
597427afaf | |||
3cddfb29be | |||
e15b76369a | |||
d7c1251435 | |||
ea3ce82a74 | |||
092378ba92 | |||
068e526862 | |||
a9767cf7de | |||
aadcc2815c | |||
0f3b7efa84 | |||
7c77e2f94a | |||
abd4c4cb8c | |||
09f12d1cf3 | |||
1db4cfb308 | |||
a4c1143664 | |||
0623674f44 | |||
2dd58db792 | |||
e11cfb93c0 | |||
bc0608955e | |||
36be19218e | |||
9fa39a46ba | |||
ff30b912a0 | |||
b0c10a88a3 | |||
ccbe6547a7 | |||
32afd60336 | |||
02e47b8d6e | |||
44055cac4d | |||
1dfc09cb6b | |||
48c56024aa | |||
cf103266b3 | |||
d5cf8f606c | |||
ce7ab28cfa | |||
07ca6f6e66 | |||
15ec790a40 | |||
cb73b2d69c | |||
c931c87173 | |||
28a0a9343c | |||
56b666458c | |||
cd6ddb5a69 | |||
ecd55041a2 | |||
e7e8e6d5f7 | |||
49df8ac115 | |||
7397f4a390 | |||
8317873c06 | |||
deef63699e | |||
c6e07769e9 | |||
423df9b1f4 | |||
c879e5af11 | |||
63d9aca96f | |||
c3b1da9e41 | |||
46388e6aef | |||
484d439a7c | |||
ab6615134c | |||
b1149ebb36 | |||
1bfdae7933 | |||
4f09d31085 | |||
58d73ddb1d | |||
6b809ff59b | |||
afe08d2755 | |||
a7bc5d4eaf |
10
Cargo.toml
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.8.13"
|
||||
version = "0.8.21"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -18,7 +18,6 @@ apt-pkg-native = "0.3.1" # custom patched version
|
||||
base64 = "0.12"
|
||||
bitflags = "1.2.1"
|
||||
bytes = "0.5"
|
||||
chrono = "0.4" # Date and time library for Rust
|
||||
crc32fast = "1"
|
||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||
anyhow = "1.0"
|
||||
@ -26,7 +25,7 @@ futures = "0.3"
|
||||
h2 = { version = "0.2", features = ["stream"] }
|
||||
handlebars = "3.0"
|
||||
http = "0.2"
|
||||
hyper = "0.13"
|
||||
hyper = "0.13.6"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
@ -39,11 +38,11 @@ pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.3.3", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox = { version = "0.4.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
pxar = { version = "0.6.0", features = [ "tokio-io", "futures-io" ] }
|
||||
pxar = { version = "0.6.1", features = [ "tokio-io", "futures-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "6"
|
||||
@ -62,6 +61,7 @@ walkdir = "2"
|
||||
xdg = "2.2"
|
||||
zstd = { version = "0.4", features = [ "bindgen" ] }
|
||||
nom = "5.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
2
Makefile
@ -150,4 +150,4 @@ upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
||||
# check if working directory is clean
|
||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve,pmg" --dist buster
|
||||
|
142
debian/changelog
vendored
@ -1,3 +1,144 @@
|
||||
rust-proxmox-backup (0.8.21-1) unstable; urgency=medium
|
||||
|
||||
* depend on crossbeam-channel
|
||||
|
||||
* speedup sync jobs (allow up to 4 worker threads)
|
||||
|
||||
* improve docs
|
||||
|
||||
* use jobstate mechanism for verify/garbage_collection schedules
|
||||
|
||||
* proxy: fix error handling in prune scheduling
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 25 Sep 2020 13:20:19 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.20-1) unstable; urgency=medium
|
||||
|
||||
* improve sync speed
|
||||
|
||||
* benchmark: use compressable data to get more realistic result
|
||||
|
||||
* docs: add onlineHelp to some panels
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 24 Sep 2020 13:15:45 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.19-1) unstable; urgency=medium
|
||||
|
||||
* src/api2/reader.rs: use std::fs::read instead of tokio::fs::read
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 22 Sep 2020 13:30:27 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.18-1) unstable; urgency=medium
|
||||
|
||||
* src/client/pull.rs: allow up to 20 concurrent download streams
|
||||
|
||||
* docs: add version and date to HTML index
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 22 Sep 2020 12:39:26 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.17-1) unstable; urgency=medium
|
||||
|
||||
* src/client/pull.rs: open temporary manifest with truncate(true)
|
||||
|
||||
* depend on proxmox 0.4.1
|
||||
|
||||
* fix #3017: check array boundaries before using
|
||||
|
||||
* datastore/prune schedules: use JobState for tracking of schedules
|
||||
|
||||
* improve docs
|
||||
|
||||
* fix #3015: allow user self-service
|
||||
|
||||
* add verification scheduling to proxmox-backup-proxy
|
||||
|
||||
* fix #3014: allow DataStoreAdmins to list DS config
|
||||
|
||||
* depend on pxar 0.6.1
|
||||
|
||||
* fix #2942: implement lacp bond mode and bond_xmit_hash_policy
|
||||
|
||||
* api2/pull: make pull worker abortable
|
||||
|
||||
* fix #2870: renew tickets in HttpClient
|
||||
|
||||
* always allow retrieving (censored) subscription info
|
||||
|
||||
* fix #2957: allow Sys.Audit access to node RRD
|
||||
|
||||
* backup: check all referenced chunks actually exist
|
||||
|
||||
* backup: check verify state of previous backup before allowing reuse
|
||||
|
||||
* avoid chrono dependency
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 21 Sep 2020 14:08:32 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.16-1) unstable; urgency=medium
|
||||
|
||||
* BackupDir: make constructor fallible
|
||||
|
||||
* handle invalid mtime when formating entries
|
||||
|
||||
* ui/docs: add onlineHelp button for syncjobs
|
||||
|
||||
* docs: add section for calendar events
|
||||
|
||||
* tools/systemd/parse_time: enable */x syntax for calendar events
|
||||
|
||||
* docs: set html img width limitation through css
|
||||
|
||||
* docs: use alabaster theme
|
||||
|
||||
* server: set http2 max frame size
|
||||
|
||||
* doc: Add section "FAQ"
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 11 Sep 2020 15:54:57 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.15-1) unstable; urgency=medium
|
||||
|
||||
* verify: skip benchmark directory
|
||||
|
||||
* add benchmark flag to backup creation for proper cleanup when running
|
||||
a benchmark
|
||||
|
||||
* mount: fix mount subcommand
|
||||
|
||||
* ui: only mark backup encrypted if there are any files
|
||||
|
||||
* fix #2983: improve tcp performance
|
||||
|
||||
* improve ui and docs
|
||||
|
||||
* verify: rename corrupted chunks with .bad extension
|
||||
|
||||
* gc: remove .bad files on garbage collect
|
||||
|
||||
* ui: add translation support
|
||||
|
||||
* server/worker_task: fix upid_read_status
|
||||
|
||||
* tools/systemd/time: enable dates for calendarevents
|
||||
|
||||
* server/worker_task: fix 'unknown' status for some big task logs
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 10 Sep 2020 09:25:59 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.14-1) unstable; urgency=medium
|
||||
|
||||
* verify speed up: use separate IO thread, use datastore-wide cache (instead
|
||||
of per group)
|
||||
|
||||
* ui: datastore content: improve encrypted column
|
||||
|
||||
* ui: datastore content: show more granular verify state, especially for
|
||||
backup group rows
|
||||
|
||||
* verify: log progress in percent
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Sep 2020 09:36:47 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.13-1) unstable; urgency=medium
|
||||
|
||||
* improve and add to documentation
|
||||
@ -429,4 +570,3 @@ proxmox-backup (0.1-1) unstable; urgency=medium
|
||||
* first try
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 30 Nov 2018 13:03:28 +0100
|
||||
|
||||
|
19
debian/control
vendored
@ -11,8 +11,8 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-base64-0.12+default-dev,
|
||||
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||
librust-bytes-0.5+default-dev,
|
||||
librust-chrono-0.4+default-dev,
|
||||
librust-crc32fast-1+default-dev,
|
||||
librust-crossbeam-channel-0.4+default-dev,
|
||||
librust-endian-trait-0.6+arrays-dev,
|
||||
librust-endian-trait-0.6+default-dev,
|
||||
librust-futures-0.3+default-dev,
|
||||
@ -20,7 +20,7 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-h2-0.2+stream-dev,
|
||||
librust-handlebars-3+default-dev,
|
||||
librust-http-0.2+default-dev,
|
||||
librust-hyper-0.13+default-dev,
|
||||
librust-hyper-0.13+default-dev (>= 0.13.6-~~),
|
||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||
librust-libc-0.2+default-dev,
|
||||
librust-log-0.4+default-dev,
|
||||
@ -34,14 +34,14 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-utils-0.1+default-dev,
|
||||
librust-proxmox-0.3+api-macro-dev (>= 0.3.3-~~),
|
||||
librust-proxmox-0.3+default-dev (>= 0.3.3-~~),
|
||||
librust-proxmox-0.3+sortable-macro-dev (>= 0.3.3-~~),
|
||||
librust-proxmox-0.3+websocket-dev (>= 0.3.3-~~),
|
||||
librust-proxmox-0.4+api-macro-dev (>= 0.4.1-~~),
|
||||
librust-proxmox-0.4+default-dev (>= 0.4.1-~~),
|
||||
librust-proxmox-0.4+sortable-macro-dev (>= 0.4.1-~~),
|
||||
librust-proxmox-0.4+websocket-dev (>= 0.4.1-~~),
|
||||
librust-proxmox-fuse-0.1+default-dev,
|
||||
librust-pxar-0.6+default-dev,
|
||||
librust-pxar-0.6+futures-io-dev,
|
||||
librust-pxar-0.6+tokio-io-dev,
|
||||
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
|
||||
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
|
||||
librust-pxar-0.6+tokio-io-dev (>= 0.6.1-~~),
|
||||
librust-regex-1+default-dev (>= 1.2-~~),
|
||||
librust-rustyline-6+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
@ -103,6 +103,7 @@ Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 6.0.1),
|
||||
libzstd1 (>= 1.3.8),
|
||||
lvm2,
|
||||
pbs-i18n,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
|
1
debian/control.in
vendored
@ -4,6 +4,7 @@ Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 6.0.1),
|
||||
libzstd1 (>= 1.3.8),
|
||||
lvm2,
|
||||
pbs-i18n,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
|
7
debian/postinst
vendored
@ -15,11 +15,10 @@ case "$1" in
|
||||
fi
|
||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||
|
||||
if test -n "$2"; then
|
||||
if dpkg --compare-versions "$2" 'le' '0.8.10-1'; then
|
||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||
echo "Fixing up termproxy user id in task log..."
|
||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::root: /:termproxy::root@pam: /' /var/log/proxmox-backup/tasks/active
|
||||
fi
|
||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active
|
||||
fi
|
||||
;;
|
||||
|
||||
|
@ -28,7 +28,6 @@ COMPILEDIR := ../target/debug
|
||||
SPHINXOPTS += -t devbuild
|
||||
endif
|
||||
|
||||
|
||||
# Sphinx internal variables.
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
||||
|
||||
@ -68,9 +67,17 @@ proxmox-backup-manager.1: proxmox-backup-manager/man1.rst proxmox-backup-manage
|
||||
proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst
|
||||
rst2man $< >$@
|
||||
|
||||
.PHONY: onlinehelpinfo
|
||||
onlinehelpinfo:
|
||||
@echo "Generating OnlineHelpInfo.js..."
|
||||
$(SPHINXBUILD) -b proxmox-scanrefs $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||
|
||||
.PHONY: html
|
||||
html: ${GENERATED_SYNOPSIS}
|
||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||
cp custom.css $(BUILDDIR)/html/_static/
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
|
133
docs/_ext/proxmox-scanrefs.py
Normal file
@ -0,0 +1,133 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# debugging stuff
|
||||
from pprint import pprint
|
||||
|
||||
from typing import cast
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
import os
|
||||
import io
|
||||
from docutils import nodes
|
||||
|
||||
from sphinx.builders import Builder
|
||||
from sphinx.util import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# refs are added in the following manner before the title of a section (note underscore and newline before title):
|
||||
# .. _my-label:
|
||||
#
|
||||
# Section to ref
|
||||
# --------------
|
||||
#
|
||||
#
|
||||
# then referred to like (note missing underscore):
|
||||
# "see :ref:`my-label`"
|
||||
#
|
||||
# the benefit of using this is if a label is explicitly set for a section,
|
||||
# we can refer to it with this anchor #my-label in the html,
|
||||
# even if the section name changes.
|
||||
#
|
||||
# see https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-ref
|
||||
|
||||
def scan_extjs_files(wwwdir="../www"): # a bit rough i know, but we can optimize later
|
||||
js_files = []
|
||||
used_anchors = []
|
||||
logger.info("scanning extjs files for onlineHelp definitions")
|
||||
for root, dirs, files in os.walk("{}".format(wwwdir)):
|
||||
#print(root, dirs, files)
|
||||
for filename in files:
|
||||
if filename.endswith('.js'):
|
||||
js_files.append(os.path.join(root, filename))
|
||||
for js_file in js_files:
|
||||
fd = open(js_file).read()
|
||||
match = re.search("onlineHelp:\s*[\'\"](.*?)[\'\"]", fd) # match object is tuple
|
||||
if match:
|
||||
anchor = match.groups()[0]
|
||||
anchor = re.sub('_', '-', anchor) # normalize labels
|
||||
logger.info("found onlineHelp: {} in {}".format(anchor, js_file))
|
||||
used_anchors.append(anchor)
|
||||
return used_anchors
|
||||
|
||||
|
||||
def setup(app):
|
||||
logger.info('Mapping reference labels...')
|
||||
app.add_builder(ReflabelMapper)
|
||||
return {
|
||||
'version': '0.1',
|
||||
'parallel_read_safe': True,
|
||||
'parallel_write_safe': True,
|
||||
}
|
||||
|
||||
class ReflabelMapper(Builder):
|
||||
name = 'proxmox-scanrefs'
|
||||
|
||||
def init(self):
|
||||
self.docnames = []
|
||||
self.env.online_help = {}
|
||||
self.env.online_help['pbs_documentation_index'] = {
|
||||
'link': '/docs/index.html',
|
||||
'title': 'Proxmox Backup Server Documentation Index',
|
||||
}
|
||||
self.env.used_anchors = scan_extjs_files()
|
||||
|
||||
if not os.path.isdir(self.outdir):
|
||||
os.mkdir(self.outdir)
|
||||
|
||||
self.output_filename = os.path.join(self.outdir, 'OnlineHelpInfo.js')
|
||||
self.output = io.open(self.output_filename, 'w', encoding='UTF-8')
|
||||
|
||||
def write_doc(self, docname, doctree):
|
||||
for node in doctree.traverse(nodes.section):
|
||||
#pprint(vars(node))
|
||||
|
||||
if hasattr(node, 'expect_referenced_by_id') and len(node['ids']) > 1: # explicit labels
|
||||
filename = self.env.doc2path(docname)
|
||||
filename_html = re.sub('.rst', '.html', filename)
|
||||
labelid = node['ids'][1] # [0] is predefined by sphinx, we need [1] for explicit ones
|
||||
title = cast(nodes.title, node[0])
|
||||
logger.info('traversing section {}'.format(title.astext()))
|
||||
ref_name = getattr(title, 'rawsource', title.astext())
|
||||
|
||||
self.env.online_help[labelid] = {'link': '', 'title': ''}
|
||||
self.env.online_help[labelid]['link'] = "/docs/" + os.path.basename(filename_html) + "#{}".format(labelid)
|
||||
self.env.online_help[labelid]['title'] = ref_name
|
||||
|
||||
return
|
||||
|
||||
|
||||
def get_outdated_docs(self):
|
||||
return 'all documents'
|
||||
|
||||
def prepare_writing(self, docnames):
|
||||
return
|
||||
|
||||
def get_target_uri(self, docname, typ=None):
|
||||
return ''
|
||||
|
||||
def validate_anchors(self):
|
||||
#pprint(self.env.online_help)
|
||||
to_remove = []
|
||||
for anchor in self.env.used_anchors:
|
||||
if anchor not in self.env.online_help:
|
||||
logger.info("[-] anchor {} is missing from onlinehelp!".format(anchor))
|
||||
for anchor in self.env.online_help:
|
||||
if anchor not in self.env.used_anchors and anchor != 'pbs_documentation_index':
|
||||
logger.info("[*] anchor {} not used! deleting...".format(anchor))
|
||||
to_remove.append(anchor)
|
||||
for anchor in to_remove:
|
||||
self.env.online_help.pop(anchor, None)
|
||||
return
|
||||
|
||||
def finish(self):
|
||||
# generate OnlineHelpInfo.js output
|
||||
self.validate_anchors()
|
||||
|
||||
self.output.write("const proxmoxOnlineHelpInfo = ")
|
||||
self.output.write(json.dumps(self.env.online_help, indent=2))
|
||||
self.output.write(";\n")
|
||||
self.output.close()
|
||||
return
|
11
docs/_templates/index-sidebar.html
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
<h3>Navigation</h3>
|
||||
{{ toctree(includehidden=theme_sidebar_includehidden, collapse=True, titles_only=True) }}
|
||||
{% if theme_extra_nav_links %}
|
||||
<hr />
|
||||
<h3>Links</h3>
|
||||
<ul>
|
||||
{% for text, uri in theme_extra_nav_links.items() %}
|
||||
<li class="toctree-l1"><a href="{{ uri }}">{{ text }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
7
docs/_templates/sidebar-header.html
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
<p class="logo">
|
||||
<a href="index.html">
|
||||
<img class="logo" src="_static/proxmox-logo.svg" alt="Logo">
|
||||
</a>
|
||||
</p>
|
||||
<h1 class="logo logo-name"><a href="index.html">Proxmox Backup</a></h1>
|
||||
<hr style="width:100%;">
|
@ -24,6 +24,13 @@ good deduplication rates for file archives.
|
||||
The Proxmox Backup Server supports both strategies.
|
||||
|
||||
|
||||
Image Archives: ``<name>.img``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This is used for virtual machine images and other large binary
|
||||
data. Content is split into fixed-sized chunks.
|
||||
|
||||
|
||||
File Archives: ``<name>.pxar``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -34,13 +41,6 @@ the :ref:`pxar-format`, split into variable-sized chunks. The format
|
||||
is optimized to achieve good deduplication rates.
|
||||
|
||||
|
||||
Image Archives: ``<name>.img``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This is used for virtual machine images and other large binary
|
||||
data. Content is split into fixed-sized chunks.
|
||||
|
||||
|
||||
Binary Data (BLOBs)
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -127,17 +127,18 @@ Backup Server Management
|
||||
The command line tool to configure and manage the backup server is called
|
||||
:command:`proxmox-backup-manager`.
|
||||
|
||||
|
||||
.. _datastore_intro:
|
||||
|
||||
:term:`DataStore`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
A datastore is a place where backups are stored. The current implementation
|
||||
uses a directory inside a standard unix file system (``ext4``, ``xfs``
|
||||
or ``zfs``) to store the backup data.
|
||||
A datastore refers to a location at which backups are stored. The current
|
||||
implementation uses a directory inside a standard unix file system (``ext4``,
|
||||
``xfs`` or ``zfs``) to store the backup data.
|
||||
|
||||
Datastores are identified by a simple *ID*. You can configure it
|
||||
when setting up the backup server.
|
||||
Datastores are identified by a simple *ID*. You can configure this
|
||||
when setting up the datastore. The configuration information for datastores
|
||||
is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
|
||||
|
||||
.. note:: The `File Layout`_ requires the file system to support at least *65538*
|
||||
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
||||
@ -148,11 +149,17 @@ when setting up the backup server.
|
||||
|
||||
Disk Management
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-disks.png
|
||||
:align: right
|
||||
:alt: List of disks
|
||||
|
||||
Proxmox Backup Server comes with a set of disk utilities, which are
|
||||
accessed using the ``disk`` subcommand. This subcommand allows you to initialize
|
||||
disks, create various filesystems, and get information about the disks.
|
||||
|
||||
To view the disks connected to the system, use the ``list`` subcommand of
|
||||
To view the disks connected to the system, navigate to **Administration ->
|
||||
Disks** in the web interface or use the ``list`` subcommand of
|
||||
``disk``:
|
||||
|
||||
.. code-block:: console
|
||||
@ -174,66 +181,98 @@ To initialize a disk with a new GPT, use the ``initialize`` subcommand:
|
||||
|
||||
# proxmox-backup-manager disk initialize sdX
|
||||
|
||||
You can create an ``ext4`` or ``xfs`` filesystem on a disk, using ``fs
|
||||
create``. The following command creates an ``ext4`` filesystem and passes the
|
||||
``--add-datastore`` parameter, in order to automatically create a datastore on
|
||||
the disk (in this case ``sdd``). This will create a datastore at the location
|
||||
``/mnt/datastore/store1``:
|
||||
.. image:: images/screenshots/pbs-gui-disks-dir-create.png
|
||||
:align: right
|
||||
:alt: Create a directory
|
||||
|
||||
You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
|
||||
create``, or by navigating to **Administration -> Disks -> Directory** in the
|
||||
web interface and creating one from there. The following command creates an
|
||||
``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
|
||||
automatically create a datastore on the disk (in this case ``sdd``). This will
|
||||
create a datastore at the location ``/mnt/datastore/store1``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk fs create store1 --disk sdd --filesystem ext4 --add-datastore true
|
||||
create datastore 'store1' on disk sdd
|
||||
Percentage done: 1
|
||||
...
|
||||
Percentage done: 99
|
||||
TASK OK
|
||||
|
||||
You can also create a ``zpool`` with various raid levels. The command below
|
||||
creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and mounts it
|
||||
on the root directory (default):
|
||||
.. image:: images/screenshots/pbs-gui-disks-zfs-create.png
|
||||
:align: right
|
||||
:alt: Create ZFS
|
||||
|
||||
You can also create a ``zpool`` with various raid levels from **Administration
|
||||
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
|
||||
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
|
||||
mounts it on the root directory (default):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk zpool create zpool1 --devices sdb,sdc --raidlevel mirror
|
||||
create Mirror zpool 'zpool1' on devices 'sdb,sdc'
|
||||
# "zpool" "create" "-o" "ashift=12" "zpool1" "mirror" "sdb" "sdc"
|
||||
|
||||
TASK OK
|
||||
|
||||
.. note::
|
||||
You can also pass the ``--add-datastore`` parameter here, to automatically
|
||||
.. note:: You can also pass the ``--add-datastore`` parameter here, to automatically
|
||||
create a datastore from the disk.
|
||||
|
||||
You can use ``disk fs list`` and ``disk zpool list`` to keep track of your
|
||||
filesystems and zpools respectively.
|
||||
|
||||
If a disk supports S.M.A.R.T. capability, and you have this enabled, you can
|
||||
display S.M.A.R.T. attributes using the command:
|
||||
Proxmox Backup Server uses the package smartmontools. This is a set of tools
|
||||
used to monitor and control the S.M.A.R.T. system for local hard disks. If a
|
||||
disk supports S.M.A.R.T. capability, and you have this enabled, you can
|
||||
display S.M.A.R.T. attributes from the web interface or by using the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager disk smart-attributes sdX
|
||||
|
||||
.. note:: This functionality may also be accessed directly through the use of
|
||||
the ``smartctl`` command, which comes as part of the smartmontools package
|
||||
(see ``man smartctl`` for more details).
|
||||
|
||||
|
||||
Datastore Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-datastore.png
|
||||
:align: right
|
||||
:alt: Datastore Overview
|
||||
|
||||
You can configure multiple datastores. Minimum one datastore needs to be
|
||||
configured. The datastore is identified by a simple `name` and points to a
|
||||
configured. The datastore is identified by a simple *name* and points to a
|
||||
directory on the filesystem. Each datastore also has associated retention
|
||||
settings of how many backup snapshots for each interval of ``hourly``,
|
||||
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||
number of backups to keep in that store. :ref:`Pruning <pruning>` and
|
||||
:ref:`garbage collection <garbage-collection>` can also be configured to run
|
||||
periodically based on a configured :term:`schedule` per datastore.
|
||||
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
|
||||
|
||||
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||
Creating a Datastore
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
.. image:: images/screenshots/pbs-gui-datastore-create-general.png
|
||||
:align: right
|
||||
:alt: Create a datastore
|
||||
|
||||
You can create a new datastore from the web GUI, by navigating to **Datastore** in
|
||||
the menu tree and clicking **Create**. Here:
|
||||
|
||||
* *Name* refers to the name of the datastore
|
||||
* *Backing Path* is the path to the directory upon which you want to create the
|
||||
datastore
|
||||
* *GC Schedule* refers to the time and intervals at which garbage collection
|
||||
runs
|
||||
* *Prune Schedule* refers to the frequency at which pruning takes place
|
||||
* *Prune Options* set the amount of backups which you would like to keep (see :ref:`Pruning <pruning>`).
|
||||
|
||||
Alternatively you can create a new datastore from the command line. The
|
||||
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
|
||||
|
||||
To list existing datastores run:
|
||||
Managing Datastores
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To list existing datastores from the command line run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -244,13 +283,15 @@ To list existing datastores run:
|
||||
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
|
||||
└────────┴──────────────────────┴─────────────────────────────┘
|
||||
|
||||
You can change settings of a datastore, for example to set a prune and garbage
|
||||
collection schedule or retention settings using ``update`` subcommand and view
|
||||
a datastore with the ``show`` subcommand:
|
||||
You can change the garbage collection and prune settings of a datastore, by
|
||||
editing the datastore from the GUI or by using the ``update`` subcommand. For
|
||||
example, the below command changes the garbage collection schedule using the
|
||||
``update`` subcommand and prints the properties of the datastore with the
|
||||
``show`` subcommand:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore update store1 --keep-last 7 --prune-schedule daily --gc-schedule 'Tue 04:27'
|
||||
# proxmox-backup-manager datastore update store1 --gc-schedule 'Tue 04:27'
|
||||
# proxmox-backup-manager datastore show store1
|
||||
┌────────────────┬─────────────────────────────┐
|
||||
│ Name │ Value │
|
||||
@ -323,11 +364,15 @@ directories will store the chunked data after a backup operation has been execut
|
||||
276489 drwxr-xr-x 3 backup backup 4.0K Jul 8 12:35 ..
|
||||
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
|
||||
|
||||
|
||||
.. _user_mgmt:
|
||||
|
||||
User Management
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-user-management.png
|
||||
:align: right
|
||||
:alt: User management
|
||||
|
||||
Proxmox Backup Server supports several authentication realms, and you need to
|
||||
choose the realm when you add a new user. Possible realms are:
|
||||
|
||||
@ -339,7 +384,8 @@ choose the realm when you add a new user. Possible realms are:
|
||||
``/etc/proxmox-backup/shadow.json``.
|
||||
|
||||
After installation, there is a single user ``root@pam``, which
|
||||
corresponds to the Unix superuser. You can use the
|
||||
corresponds to the Unix superuser. User configuration information is stored in the file
|
||||
``/etc/proxmox-backup/user.cfg``. You can use the
|
||||
``proxmox-backup-manager`` command line tool to list or manipulate
|
||||
users:
|
||||
|
||||
@ -352,19 +398,21 @@ users:
|
||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||
└─────────────┴────────┴────────┴───────────┴──────────┴────────────────┴────────────────────┘
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
|
||||
:align: right
|
||||
:alt: Add a new user
|
||||
|
||||
The superuser has full administration rights on everything, so you
|
||||
normally want to add other users with less privileges:
|
||||
normally want to add other users with less privileges. You can create a new
|
||||
user with the ``user create`` subcommand or through the web interface, under
|
||||
**Configuration -> User Management**. The ``create`` subcommand lets you specify
|
||||
many options like ``--email`` or ``--password``. You can update or change any
|
||||
user properties using the ``update`` subcommand later (**Edit** in the GUI):
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user create john@pbs --email john@example.com
|
||||
|
||||
The create command lets you specify many options like ``--email`` or
|
||||
``--password``. You can update or change any of them using the
|
||||
update command later:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
||||
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
||||
|
||||
@ -400,6 +448,8 @@ Or completely remove the user with:
|
||||
# proxmox-backup-manager user remove john@pbs
|
||||
|
||||
|
||||
.. _user_acl:
|
||||
|
||||
Access Control
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
@ -442,9 +492,32 @@ following roles exist:
|
||||
**RemoteSyncOperator**
|
||||
Is allowed to read data from a remote.
|
||||
|
||||
You can use the ``acl`` subcommand to manage and monitor user permissions. For
|
||||
example, the command below will add the user ``john@pbs`` as a
|
||||
**DatastoreAdmin** for the data store ``store1``, located at ``/backup/disk1/store1``:
|
||||
.. image:: images/screenshots/pbs-gui-permissions-add.png
|
||||
:align: right
|
||||
:alt: Add permissions for user
|
||||
|
||||
Access permission information is stored in ``/etc/proxmox-backup/acl.cfg``. The
|
||||
file contains 5 fields, separated using a colon (':') as a delimiter. A typical
|
||||
entry takes the form:
|
||||
|
||||
``acl:1:/datastore:john@pbs:DatastoreBackup``
|
||||
|
||||
The data represented in each field is as follows:
|
||||
|
||||
#. ``acl`` identifier
|
||||
#. A ``1`` or ``0``, representing whether propagation is enabled or disabled,
|
||||
respectively
|
||||
#. The object on which the permission is set. This can be a specific object
|
||||
(single datastore, remote, etc.) or a top level object, which with
|
||||
propagation enabled, represents all children of the object also.
|
||||
#. The user for which the permission is set
|
||||
#. The role being set
|
||||
|
||||
You can manage datastore permissions from **Configuration -> Permissions** in the
|
||||
web interface. Likewise, you can use the ``acl`` subcommand to manage and
|
||||
monitor user permissions from the command line. For example, the command below
|
||||
will add the user ``john@pbs`` as a **DatastoreAdmin** for the datastore
|
||||
``store1``, located at ``/backup/disk1/store1``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -474,87 +547,120 @@ A single user can be assigned multiple permission sets for different data stores
|
||||
|
||||
Network Management
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
Proxmox Backup Server provides an interface for network configuration, through the
|
||||
``network`` subcommand. This allows you to carry out some basic network
|
||||
management tasks such as adding, configuring and removing network interfaces.
|
||||
|
||||
Proxmox Backup Server provides both a web interface and a command line tool for
|
||||
network configuration. You can find the configuration options in the web
|
||||
interface under the **Network Interfaces** section of the **Configuration** menu
|
||||
tree item. The command line tool is accessed via the ``network`` subcommand.
|
||||
These interfaces allow you to carry out some basic network management tasks,
|
||||
such as adding, configuring, and removing network interfaces.
|
||||
|
||||
.. note:: Any changes made to the network configuration are not
|
||||
applied, until you click on **Apply Configuration** or enter the ``network
|
||||
reload`` command. This allows you to make many changes at once. It also allows
|
||||
you to ensure that your changes are correct before applying them, as making a
|
||||
mistake here can render the server inaccessible over the network.
|
||||
|
||||
To get a list of available interfaces, use the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network list
|
||||
┌───────┬────────┬───────────┬────────┬─────────┬───────────────────┬──────────────┬──────────────┐
|
||||
│ name │ type │ autostart │ method │ method6 │ address │ gateway │ ports/slaves │
|
||||
╞═══════╪════════╪═══════════╪════════╪═════════╪═══════════════════╪══════════════╪══════════════╡
|
||||
│ bond0 │ bond │ 1 │ manual │ │ │ │ ens18 ens19 │
|
||||
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||
│ ens18 │ eth │ 1 │ manual │ │ │ │ │
|
||||
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||
│ ens19 │ eth │ 1 │ manual │ │ │ │ │
|
||||
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||
│ vmbr0 │ bridge │ 1 │ static │ │ x.x.x.x/x │ x.x.x.x │ bond0 │
|
||||
└───────┴────────┴───────────┴────────┴─────────┴───────────────────┴──────────────┴──────────────┘
|
||||
┌───────┬────────┬───────────┬────────┬─────────────┬──────────────┬──────────────┐
|
||||
│ name │ type │ autostart │ method │ address │ gateway │ ports/slaves │
|
||||
╞═══════╪════════╪═══════════╪════════╪═════════════╪══════════════╪══════════════╡
|
||||
│ bond0 │ bond │ 1 │ static │ x.x.x.x/x │ x.x.x.x │ ens18 ens19 │
|
||||
├───────┼────────┼───────────┼────────┼─────────────┼──────────────┼──────────────┤
|
||||
│ ens18 │ eth │ 1 │ manual │ │ │ │
|
||||
├───────┼────────┼───────────┼────────┼─────────────┼──────────────┼──────────────┤
|
||||
│ ens19 │ eth │ 1 │ manual │ │ │ │
|
||||
└───────┴────────┴───────────┴────────┴─────────────┴──────────────┴──────────────┘
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-network-create-bond.png
|
||||
:align: right
|
||||
:alt: Add a network interface
|
||||
|
||||
To add a new network interface, use the ``create`` subcommand with the relevant
|
||||
parameters. The following command shows a template for creating a new bridge:
|
||||
parameters. For example, you may want to set up a bond, for the purpose of
|
||||
network redundancy. The following command shows a template for creating the bond shown
|
||||
in the list above:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network create vmbr1 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x --bridge_ports iface_name --type bridge
|
||||
# proxmox-backup-manager network create bond0 --type bond --bond_mode active-backup --slaves ens18,ens19 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x
|
||||
|
||||
You can make changes to the configuration of a network interface with the
|
||||
``update`` subcommand:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network update vmbr1 --cidr y.y.y.y/y
|
||||
# proxmox-backup-manager network update bond0 --cidr y.y.y.y/y
|
||||
|
||||
You can also remove a network interface:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network remove vmbr1
|
||||
# proxmox-backup-manager network remove bond0
|
||||
|
||||
To view the changes made to the network configuration file, before committing
|
||||
them, use the command:
|
||||
The pending changes for the network configuration file will appear at the bottom of the
|
||||
web interface. You can also view these changes, by using the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network changes
|
||||
|
||||
If you would like to cancel all changes at this point, you can do this using:
|
||||
If you would like to cancel all changes at this point, you can either click on
|
||||
the **Revert** button or use the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network revert
|
||||
|
||||
If you are happy with the changes and would like to write them into the
|
||||
configuration file, the command is:
|
||||
configuration file, select **Apply Configuration**. The corresponding command
|
||||
is:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager network reload
|
||||
|
||||
You can also configure DNS settings using the ``dns`` subcommand of
|
||||
.. note:: This command and corresponding GUI button rely on the ``ifreload``
|
||||
command, from the package ``ifupdown2``. This package is included within the
|
||||
Proxmox Backup Server installation, however, you may have to install it yourself,
|
||||
if you have installed Proxmox Backup Server on top of Debian or Proxmox VE.
|
||||
|
||||
You can also configure DNS settings, from the **DNS** section
|
||||
of **Configuration** or by using the ``dns`` subcommand of
|
||||
``proxmox-backup-manager``.
|
||||
|
||||
.. _backup_remote:
|
||||
|
||||
:term:`Remote`
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
A remote refers to a separate Proxmox Backup Server installation and a user on that
|
||||
installation, from which you can `sync` datastores to a local datastore with a
|
||||
`Sync Job`.
|
||||
`Sync Job`. You can configure remotes in the web interface, under **Configuration
|
||||
-> Remotes**. Alternatively, you can use the ``remote`` subcommand. The
|
||||
configuration information for remotes is stored in the file
|
||||
``/etc/proxmox-backup/remote.cfg``.
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-remote-add.png
|
||||
:align: right
|
||||
:alt: Add a remote
|
||||
|
||||
To add a remote, you need its hostname or ip, a userid and password on the
|
||||
remote, and its certificate fingerprint. To get the fingerprint, use the
|
||||
``proxmox-backup-manager cert info`` command on the remote.
|
||||
``proxmox-backup-manager cert info`` command on the remote, or navigate to
|
||||
**Dashboard** in the remote's web interface and select **Show Fingerprint**.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager cert info |grep Fingerprint
|
||||
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||
|
||||
Using the information specified above, add the remote with:
|
||||
Using the information specified above, you can add a remote from the **Remotes**
|
||||
configuration panel, or by using the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -574,14 +680,23 @@ Use the ``list``, ``show``, ``update``, ``remove`` subcommands of
|
||||
└──────┴──────────────┴──────────┴───────────────────────────────────────────┴─────────┘
|
||||
# proxmox-backup-manager remote remove pbs2
|
||||
|
||||
.. _syncjobs:
|
||||
|
||||
Sync Jobs
|
||||
~~~~~~~~~
|
||||
|
||||
Sync jobs are configured to pull the contents of a datastore on a `Remote` to a
|
||||
local datastore. You can either start the sync job manually on the GUI or
|
||||
provide it with a :term:`schedule` to run regularly. The
|
||||
``proxmox-backup-manager sync-job`` command is used to manage sync jobs:
|
||||
.. image:: images/screenshots/pbs-gui-syncjob-add.png
|
||||
:align: right
|
||||
:alt: Add a Sync Job
|
||||
|
||||
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
|
||||
a local datastore. You can manage sync jobs under **Configuration -> Sync Jobs**
|
||||
in the web interface, or using the ``proxmox-backup-manager sync-job`` command.
|
||||
The configuration information for sync jobs is stored at
|
||||
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
|
||||
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
||||
either start it manually on the GUI or provide it with a schedule (see
|
||||
:ref:`calendar-events`) to run regularly.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -595,6 +710,7 @@ provide it with a :term:`schedule` to run regularly. The
|
||||
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||
# proxmox-backup-manager sync-job remove pbs2-local
|
||||
|
||||
|
||||
Garbage Collection
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
||||
@ -1307,6 +1423,10 @@ as ``user1@pbs``.
|
||||
# pvesm add pbs store2 --server localhost --datastore store2
|
||||
# pvesm set store2 --username user1@pbs --password <secret>
|
||||
|
||||
.. note:: If you would rather not pass your password as plain text, you can pass
|
||||
the ``--password`` parameter, without any arguments. This will cause the
|
||||
program to prompt you for a password upon entering the command.
|
||||
|
||||
If your backup server uses a self signed certificate, you need to add
|
||||
the certificate fingerprint to the configuration. You can get the
|
||||
fingerprint by running the following command on the backup server:
|
||||
@ -1331,6 +1451,10 @@ After that you should be able to see storage status with:
|
||||
Name Type Status Total Used Available %
|
||||
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
||||
|
||||
Having added the PBS datastore to `Proxmox VE`_, you can backup VMs and
|
||||
containers in the same way you would for any other storage device within the
|
||||
environment (see `PVE Admin Guide: Backup and Restore
|
||||
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.
|
||||
|
||||
|
||||
.. include:: command-line-tools.rst
|
||||
|
100
docs/calendarevents.rst
Normal file
@ -0,0 +1,100 @@
|
||||
|
||||
.. _calendar-events:
|
||||
|
||||
Calendar Events
|
||||
===============
|
||||
|
||||
Introduction and Format
|
||||
-----------------------
|
||||
|
||||
Certain tasks, for example pruning and garbage collection, need to be
|
||||
performed on a regular basis. Proxmox Backup Server uses a format inspired
|
||||
by the systemd Time and Date Specification (see `systemd.time manpage`_)
|
||||
called `calendar events` for its schedules.
|
||||
|
||||
`Calendar events` are expressions to specify one or more points in time.
|
||||
They are mostly compatible with systemds calendar events.
|
||||
|
||||
The general format is as follows:
|
||||
|
||||
.. code-block:: console
|
||||
:caption: Calendar event
|
||||
|
||||
[WEEKDAY] [[YEARS-]MONTHS-DAYS] [HOURS:MINUTES[:SECONDS]]
|
||||
|
||||
Note that there either has to be at least a weekday, date or time part.
|
||||
If the weekday or date part is omitted, all (week)days are included.
|
||||
If the time part is omitted, the time 00:00:00 is implied.
|
||||
(e.g. '2020-01-01' refers to '2020-01-01 00:00:00')
|
||||
|
||||
Weekdays are specified with the abbreviated english version:
|
||||
`mon, tue, wed, thu, fri, sat, sun`.
|
||||
|
||||
Each field can contain multiple values in the following formats:
|
||||
|
||||
* comma-separated: e.g., 01,02,03
|
||||
* as a range: e.g., 01..10
|
||||
* as a repetition: e.g, 05/10 (means starting at 5 every 10)
|
||||
* and a combination of the above: e.g., 01,05..10,12/02
|
||||
* or a `*` for every possible value: e.g., \*:00
|
||||
|
||||
There are some special values that have specific meaning:
|
||||
|
||||
================================= ==============================
|
||||
Value Syntax
|
||||
================================= ==============================
|
||||
`minutely` `*-*-* *:*:00`
|
||||
`hourly` `*-*-* *:00:00`
|
||||
`daily` `*-*-* 00:00:00`
|
||||
`weekly` `mon *-*-* 00:00:00`
|
||||
`monthly` `*-*-01 00:00:00`
|
||||
`yearly` or `annualy` `*-01-01 00:00:00`
|
||||
`quarterly` `*-01,04,07,10-01 00:00:00`
|
||||
`semiannually` or `semi-annually` `*-01,07-01 00:00:00`
|
||||
================================= ==============================
|
||||
|
||||
|
||||
Here is a table with some useful examples:
|
||||
|
||||
======================== ============================= ===================================
|
||||
Example Alternative Explanation
|
||||
======================== ============================= ===================================
|
||||
`mon,tue,wed,thu,fri` `mon..fri` Every working day at 00:00
|
||||
`sat,sun` `sat..sun` Only on weekends at 00:00
|
||||
`mon,wed,fri` -- Monday, Wednesday, Friday at 00:00
|
||||
`12:05` -- Every day at 12:05 PM
|
||||
`*:00/5` `0/1:0/5` Every five minutes
|
||||
`mon..wed *:30/10` `mon,tue,wed *:30/10` Monday, Tuesday, Wednesday 30, 40 and 50 minutes after every full hour
|
||||
`mon..fri 8..17,22:0/15` -- Every working day every 15 minutes between 8 AM and 6 PM and between 10 PM and 11 PM
|
||||
`fri 12..13:5/20` `fri 12,13:5/20` Friday at 12:05, 12:25, 12:45, 13:05, 13:25 and 13:45
|
||||
`12,14,16,18,20,22:5` `12/2:5` Every day starting at 12:05 until 22:05, every 2 hours
|
||||
`*:*` `0/1:0/1` Every minute (minimum interval)
|
||||
`*-05` -- On the 5th day of every Month
|
||||
`Sat *-1..7 15:00` -- First Saturday each Month at 15:00
|
||||
`2015-10-21` -- 21st October 2015 at 00:00
|
||||
======================== ============================= ===================================
|
||||
|
||||
|
||||
Differences to systemd
|
||||
----------------------
|
||||
|
||||
Not all features of systemd calendar events are implemented:
|
||||
|
||||
* no unix timestamps (e.g. `@12345`): instead use date and time to specify
|
||||
a specific point in time
|
||||
* no timezone: all schedules use the set timezone on the server
|
||||
* no sub-second resolution
|
||||
* no reverse day syntax (e.g. 2020-03~01)
|
||||
* no repetition of ranges (e.g. 1..10/2)
|
||||
|
||||
Notes on scheduling
|
||||
-------------------
|
||||
|
||||
In `Proxmox Backup`_ scheduling for most tasks is done in the
|
||||
`proxmox-backup-proxy`. This daemon checks all job schedules
|
||||
if they are due every minute. This means that even if
|
||||
`calendar events` can contain seconds, it will only be checked
|
||||
once a minute.
|
||||
|
||||
Also, all schedules will be checked against the timezone set
|
||||
in the `Proxmox Backup`_ server.
|
65
docs/conf.py
@ -18,9 +18,12 @@
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
import os
|
||||
# import sys
|
||||
import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# custom extensions
|
||||
sys.path.append(os.path.abspath("./_ext"))
|
||||
|
||||
# -- Implement custom formatter for code-blocks ---------------------------
|
||||
#
|
||||
# * use smaller font
|
||||
@ -46,7 +49,7 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
|
||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo"]
|
||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo", "proxmox-scanrefs"]
|
||||
|
||||
todo_link_only = True
|
||||
|
||||
@ -71,7 +74,7 @@ rst_epilog = epilog_file.read()
|
||||
|
||||
# General information about the project.
|
||||
project = 'Proxmox Backup'
|
||||
copyright = '2019-2020, Proxmox Support Team'
|
||||
copyright = '2019-2020, Proxmox Server Solutions GmbH'
|
||||
author = 'Proxmox Support Team'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
@ -94,12 +97,10 @@ language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#
|
||||
# today = ''
|
||||
#
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#
|
||||
# today_fmt = '%B %d, %Y'
|
||||
today_fmt = '%A, %d %B %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
@ -145,7 +146,7 @@ pygments_style = 'sphinx'
|
||||
# keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = True
|
||||
todo_include_todos = not tags.has('release')
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
@ -153,13 +154,51 @@ todo_include_todos = True
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = 'sphinxdoc'
|
||||
html_theme = 'alabaster'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
# html_theme_options = {}
|
||||
html_theme_options = {
|
||||
'fixed_sidebar': True,
|
||||
'sidebar_includehidden': False,
|
||||
'sidebar_collapse': False,
|
||||
'globaltoc_collapse': False,
|
||||
'show_relbar_bottom': True,
|
||||
'show_powered_by': False,
|
||||
|
||||
'extra_nav_links': {
|
||||
'Proxmox Homepage': 'https://proxmox.com',
|
||||
'PDF': 'proxmox-backup.pdf',
|
||||
},
|
||||
|
||||
'sidebar_width': '320px',
|
||||
'page_width': '1320px',
|
||||
# font styles
|
||||
'head_font_family': 'Lato, sans-serif',
|
||||
'caption_font_family': 'Lato, sans-serif',
|
||||
'caption_font_size': '20px',
|
||||
'font_family': 'Open Sans, sans-serif',
|
||||
}
|
||||
|
||||
# Alabaster theme recommends setting this fixed.
|
||||
# If you switch theme this needs to removed, probably.
|
||||
html_sidebars = {
|
||||
'**': [
|
||||
'sidebar-header.html',
|
||||
'searchbox.html',
|
||||
'navigation.html',
|
||||
'relations.html',
|
||||
],
|
||||
|
||||
'index': [
|
||||
'sidebar-header.html',
|
||||
'searchbox.html',
|
||||
'index-sidebar.html',
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
# html_theme_path = []
|
||||
@ -176,7 +215,7 @@ html_theme = 'sphinxdoc'
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#
|
||||
html_logo = 'images/proxmox-logo.svg'
|
||||
#html_logo = 'images/proxmox-logo.svg' # replaced by html_theme_options.logo
|
||||
|
||||
# The name of an image file (relative to this directory) to use as a favicon of
|
||||
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
@ -206,10 +245,6 @@ html_static_path = ['_static']
|
||||
#
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#
|
||||
@ -229,7 +264,7 @@ html_static_path = ['_static']
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#
|
||||
# html_show_sourcelink = True
|
||||
html_show_sourcelink = False
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#
|
||||
|
52
docs/custom.css
Normal file
@ -0,0 +1,52 @@
|
||||
div.sphinxsidebar {
|
||||
height: calc(100% - 20px);
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
h1.logo-name {
|
||||
font-size: 24px;
|
||||
}
|
||||
|
||||
div.body img {
|
||||
width: 250px;
|
||||
}
|
||||
pre {
|
||||
padding: 5px 10px;
|
||||
}
|
||||
|
||||
li a.current {
|
||||
font-weight: bold;
|
||||
border-bottom: 1px solid #000;
|
||||
}
|
||||
ul li.toctree-l1 {
|
||||
margin-top: 0.5em;
|
||||
}
|
||||
ul li.toctree-l1 > a {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
div.sphinxsidebar form.search {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3 {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h1.logo-name {
|
||||
display: none;
|
||||
}
|
||||
@media screen and (max-width: 875px) {
|
||||
div.sphinxsidebar p.logo {
|
||||
display: initial;
|
||||
}
|
||||
div.sphinxsidebar h1.logo-name {
|
||||
display: block;
|
||||
}
|
||||
div.sphinxsidebar span {
|
||||
color: #AAA;
|
||||
}
|
||||
ul li.toctree-l1 > a {
|
||||
color: #FFF;
|
||||
}
|
||||
}
|
@ -13,7 +13,7 @@
|
||||
.. _Proxmox: https://www.proxmox.com
|
||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||
// FIXME
|
||||
.. FIXME
|
||||
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||
@ -38,3 +38,6 @@
|
||||
.. _RFC3399: https://tools.ietf.org/html/rfc3339
|
||||
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
|
||||
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date
|
||||
|
||||
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html
|
||||
|
||||
|
71
docs/faq.rst
Normal file
@ -0,0 +1,71 @@
|
||||
FAQ
|
||||
===
|
||||
|
||||
What distribution is Proxmox Backup Server (PBS) based on?
|
||||
----------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server is based on `Debian GNU/Linux <https://www.debian.org/>`_.
|
||||
|
||||
|
||||
Which platforms are supported as a backup source (client)?
|
||||
----------------------------------------------------------
|
||||
|
||||
The client tool works on most modern Linux systems, meaning you are not limited
|
||||
to Debian-based backups.
|
||||
|
||||
|
||||
Will Proxmox Backup Server run on a 32-bit processor?
|
||||
-----------------------------------------------------
|
||||
|
||||
Proxmox Backup Server only supports 64-bit CPUs (AMD or Intel). There are no
|
||||
future plans to support 32-bit processors.
|
||||
|
||||
|
||||
How long will my Proxmox Backup Server version be supported?
|
||||
------------------------------------------------------------
|
||||
|
||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
||||
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
|
||||
+=======================+====================+===============+============+====================+
|
||||
|Proxmox Backup 1.x | Debian 10 (Buster) | tba | tba | tba |
|
||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
||||
|
||||
|
||||
Can I copy or synchronize my datastore to another location?
|
||||
-----------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server allows you to copy or synchroize datastores to other
|
||||
locations, through the use of *Remotes* and *Sync Jobs*. *Remote* is the term
|
||||
given to a separate server, which has a datastore that can be synced to a local store.
|
||||
A *Sync Job* is the process which is used to pull the contents of a datastore from
|
||||
a *Remote* to a local datastore.
|
||||
|
||||
|
||||
Can Proxmox Backup Server verify data integrity of a backup archive?
|
||||
--------------------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server uses a built-in SHA-256 checksum algorithm, to ensure
|
||||
data integrity. Within each backup, a manifest file (index.json) is created,
|
||||
which contains a list of all the backup files, along with their sizes and
|
||||
checksums. This manifest file is used to verify the integrity of each backup.
|
||||
|
||||
|
||||
When backing up to remote servers, do I have to trust the remote server?
|
||||
------------------------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server supports client-side encryption, meaning your data is
|
||||
encrypted before it reaches the server. Thus, in the event that an attacker
|
||||
gains access to the server, they will not be able to read the data.
|
||||
|
||||
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
||||
`Encryption
|
||||
<https://pbs.proxmox.com/docs/administration-guide.html#encryption>`_ section
|
||||
of the Proxmox Backup Server Administration Guide.
|
||||
|
||||
|
||||
Is the backup incremental/deduplicated?
|
||||
---------------------------------------
|
||||
|
||||
With Proxmox Backup Server, backups are sent incremental and data is
|
||||
deduplicated on the server.
|
||||
This minimizes both the storage consumed and the network impact.
|
@ -51,14 +51,3 @@ Glossary
|
||||
A remote Proxmox Backup Server installation and credentials for a user on it.
|
||||
You can pull datastores from a remote to a local datastore in order to
|
||||
have redundant backups.
|
||||
|
||||
Schedule
|
||||
|
||||
Certain tasks, for example pruning and garbage collection, need to be
|
||||
performed on a regular basis. Proxmox Backup Server uses a subset of the
|
||||
`systemd Time and Date Specification
|
||||
<https://www.freedesktop.org/software/systemd/man/systemd.time.html#>`_.
|
||||
The subset currently supports time of day specifications and weekdays, in
|
||||
addition to the shorthand expressions 'minutely', 'hourly', 'daily'.
|
||||
There is no support for specifying timezones, the tasks are run in the
|
||||
timezone configured on the server.
|
||||
|
BIN
docs/images/screenshots/pbs-gui-datastore-create-general.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
docs/images/screenshots/pbs-gui-datastore.png
Normal file
After Width: | Height: | Size: 60 KiB |
BIN
docs/images/screenshots/pbs-gui-disks-dir-create.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
docs/images/screenshots/pbs-gui-disks-zfs-create.png
Normal file
After Width: | Height: | Size: 43 KiB |
BIN
docs/images/screenshots/pbs-gui-disks.png
Normal file
After Width: | Height: | Size: 79 KiB |
BIN
docs/images/screenshots/pbs-gui-network-create-bond.png
Normal file
After Width: | Height: | Size: 26 KiB |
BIN
docs/images/screenshots/pbs-gui-permissions-add.png
Normal file
After Width: | Height: | Size: 14 KiB |
BIN
docs/images/screenshots/pbs-gui-remote-add.png
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
docs/images/screenshots/pbs-gui-syncjob-add.png
Normal file
After Width: | Height: | Size: 21 KiB |
BIN
docs/images/screenshots/pbs-gui-user-management-add-user.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
docs/images/screenshots/pbs-gui-user-management.png
Normal file
After Width: | Height: | Size: 54 KiB |
@ -2,8 +2,8 @@
|
||||
|
||||
Welcome to the Proxmox Backup documentation!
|
||||
============================================
|
||||
|
||||
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
| Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
| Version |version| -- |today|
|
||||
|
||||
Permission is granted to copy, distribute and/or modify this document under the
|
||||
terms of the GNU Free Documentation License, Version 1.3 or any later version
|
||||
@ -24,6 +24,7 @@ in the section entitled "GNU Free Documentation License".
|
||||
installation.rst
|
||||
administration-guide.rst
|
||||
sysadmin.rst
|
||||
faq.rst
|
||||
|
||||
.. raw:: latex
|
||||
|
||||
@ -36,6 +37,7 @@ in the section entitled "GNU Free Documentation License".
|
||||
command-syntax.rst
|
||||
file-formats.rst
|
||||
backup-protocol.rst
|
||||
calendarevents.rst
|
||||
glossary.rst
|
||||
GFDL.rst
|
||||
|
||||
@ -43,10 +45,10 @@ in the section entitled "GNU Free Documentation License".
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
:caption: Developer Appendix
|
||||
|
||||
todos.rst
|
||||
|
||||
|
||||
* :ref:`genindex`
|
||||
|
||||
.. # * :ref:`genindex`
|
||||
|
@ -1,3 +1,6 @@
|
||||
|
||||
.. _chapter-zfs:
|
||||
|
||||
ZFS on Linux
|
||||
------------
|
||||
|
||||
|
@ -2,8 +2,6 @@ use std::io::Write;
|
||||
|
||||
use anyhow::{Error};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
||||
|
||||
@ -36,7 +34,7 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
let client = HttpClient::new(host, username, options)?;
|
||||
|
||||
let backup_time = "2019-06-28T10:49:48Z".parse::<DateTime<Utc>>()?;
|
||||
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||
|
||||
let client = BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true)
|
||||
.await?;
|
||||
|
@ -16,9 +16,9 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||
|
||||
let client = HttpClient::new(host, username, options)?;
|
||||
|
||||
let backup_time = chrono::Utc::now();
|
||||
let backup_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
|
||||
|
||||
println!("start upload speed test");
|
||||
let res = client.upload_speedtest(true).await?;
|
||||
|
@ -14,7 +14,7 @@ use crate::config::acl::{Role, ROLE_NAMES, PRIVILEGES};
|
||||
type: Array,
|
||||
items: {
|
||||
type: Object,
|
||||
description: "User name with description.",
|
||||
description: "Role with description and privileges.",
|
||||
properties: {
|
||||
roleid: {
|
||||
type: Role,
|
||||
|
@ -8,6 +8,7 @@ use proxmox::tools::fs::open_file_locked;
|
||||
use crate::api2::types::*;
|
||||
use crate::config::user;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
@ -25,10 +26,11 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
items: { type: user::User },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
permission: &Permission::Anybody,
|
||||
description: "Returns all or just the logged-in user, depending on privileges.",
|
||||
},
|
||||
)]
|
||||
/// List all users
|
||||
/// List users
|
||||
pub fn list_users(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
@ -37,11 +39,21 @@ pub fn list_users(
|
||||
|
||||
let (config, digest) = user::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("user")?;
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
|
||||
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
let filter_by_privs = |user: &user::User| {
|
||||
top_level_allowed || user.userid == userid
|
||||
};
|
||||
|
||||
let list:Vec<user::User> = config.convert_to_typed_array("user")?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
Ok(list.into_iter().filter(filter_by_privs).collect())
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -124,7 +136,10 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
||||
type: user::User,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Read user configuration data.
|
||||
@ -177,7 +192,10 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Update user configuration.
|
||||
@ -258,7 +276,10 @@ pub fn update_user(
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
permission: &Permission::Or(&[
|
||||
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||
&Permission::UserParam("userid"),
|
||||
]),
|
||||
},
|
||||
)]
|
||||
/// Remove a user from the configuration file.
|
||||
|
@ -1,6 +1,7 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
@ -171,7 +172,7 @@ fn list_groups(
|
||||
let result_item = GroupListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
last_backup: info.backup_dir.backup_time().timestamp(),
|
||||
last_backup: info.backup_dir.backup_time(),
|
||||
backup_count: list.len() as u64,
|
||||
files: info.files.clone(),
|
||||
owner: Some(owner),
|
||||
@ -229,7 +230,7 @@ pub fn list_snapshot_files(
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
||||
@ -279,7 +280,7 @@ fn delete_snapshot(
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -402,7 +403,7 @@ pub fn list_snapshots (
|
||||
let result_item = SnapshotListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||
backup_time: info.backup_dir.backup_time(),
|
||||
comment,
|
||||
verification,
|
||||
files,
|
||||
@ -489,7 +490,7 @@ pub fn verify(
|
||||
match (backup_type, backup_id, backup_time) {
|
||||
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
|
||||
let dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
backup_dir = Some(dir);
|
||||
}
|
||||
(Some(backup_type), Some(backup_id), None) => {
|
||||
@ -512,18 +513,27 @@ pub fn verify(
|
||||
userid,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||
|
||||
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
||||
let mut verified_chunks = HashSet::with_capacity(1024*16);
|
||||
let mut corrupt_chunks = HashSet::with_capacity(64);
|
||||
let mut res = Vec::new();
|
||||
if !verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)? {
|
||||
if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? {
|
||||
res.push(backup_dir.to_string());
|
||||
}
|
||||
res
|
||||
} else if let Some(backup_group) = backup_group {
|
||||
verify_backup_group(&datastore, &backup_group, &worker)?
|
||||
let (_count, failed_dirs) = verify_backup_group(
|
||||
datastore,
|
||||
&backup_group,
|
||||
verified_chunks,
|
||||
corrupt_chunks,
|
||||
None,
|
||||
worker.clone(),
|
||||
)?;
|
||||
failed_dirs
|
||||
} else {
|
||||
verify_all_backups(&datastore, &worker)?
|
||||
verify_all_backups(datastore, worker.clone())?
|
||||
};
|
||||
if failed_dirs.len() > 0 {
|
||||
worker.log("Failed to verify following snapshots:");
|
||||
@ -663,7 +673,7 @@ fn prune(
|
||||
prune_result.push(json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"keep": keep,
|
||||
}));
|
||||
}
|
||||
@ -687,7 +697,7 @@ fn prune(
|
||||
if keep_all { keep = true; }
|
||||
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
let timestamp = BackupDir::backup_time_to_string(backup_time);
|
||||
let timestamp = info.backup_dir.backup_time_string();
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
|
||||
@ -704,7 +714,7 @@ fn prune(
|
||||
prune_result.push(json!({
|
||||
"backup-type": group.backup_type(),
|
||||
"backup-id": group.backup_id(),
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"keep": keep,
|
||||
}));
|
||||
|
||||
@ -887,7 +897,7 @@ fn download_file(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -960,7 +970,7 @@ fn download_file_decoded(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -1073,7 +1083,7 @@ fn upload_backup_log(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
|
||||
@ -1087,7 +1097,7 @@ fn upload_backup_log(
|
||||
}
|
||||
|
||||
println!("Upload backup log to {}/{}/{}/{}/{}", store,
|
||||
backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
|
||||
backup_type, backup_id, backup_dir.backup_time_string(), file_name);
|
||||
|
||||
let data = req_body
|
||||
.map_err(Error::from)
|
||||
@ -1149,7 +1159,7 @@ fn catalog(
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -1266,7 +1276,7 @@ fn pxar_file_download(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -1407,7 +1417,7 @@ fn get_notes(
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
@ -1460,7 +1470,7 @@ fn set_notes(
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
@ -57,7 +57,8 @@ pub fn list_sync_jobs(
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
compute_next_event(&event, last, false).ok()
|
||||
// ignore errors
|
||||
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
||||
})();
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,7 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
||||
("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
|
||||
]),
|
||||
)
|
||||
).access(
|
||||
@ -56,6 +57,7 @@ fn upgrade_to_backup_protocol(
|
||||
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
@ -90,16 +92,50 @@ async move {
|
||||
|
||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||
|
||||
let worker_type = if backup_type == "host" && backup_id == "benchmark" {
|
||||
if !benchmark {
|
||||
bail!("unable to run benchmark without --benchmark flags");
|
||||
}
|
||||
"benchmark"
|
||||
} else {
|
||||
if benchmark {
|
||||
bail!("benchmark flags is only allowed on 'host/benchmark'");
|
||||
}
|
||||
"backup"
|
||||
};
|
||||
|
||||
// lock backup group to only allow one backup per group at a time
|
||||
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
|
||||
|
||||
// permission check
|
||||
if owner != userid { // only the owner is allowed to create additional snapshots
|
||||
if owner != userid && worker_type != "benchmark" {
|
||||
// only the owner is allowed to create additional snapshots
|
||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||
}
|
||||
|
||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time);
|
||||
let last_backup = {
|
||||
let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
|
||||
if let Some(info) = info {
|
||||
let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
|
||||
let verify = manifest.unprotected["verify_state"].clone();
|
||||
match serde_json::from_value::<SnapshotVerifyState>(verify) {
|
||||
Ok(verify) => {
|
||||
match verify.state {
|
||||
VerifyState::Ok => Some(info),
|
||||
VerifyState::Failed => None,
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
// no verify state found, treat as valid
|
||||
Some(info)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?;
|
||||
|
||||
let _last_guard = if let Some(last) = &last_backup {
|
||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||
@ -116,14 +152,15 @@ async move {
|
||||
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||
if !is_new { bail!("backup directory already exists."); }
|
||||
|
||||
WorkerTask::spawn("backup", Some(worker_id), userid.clone(), true, move |worker| {
|
||||
|
||||
WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
|
||||
let mut env = BackupEnvironment::new(
|
||||
env_type, userid, worker.clone(), datastore, backup_dir);
|
||||
|
||||
env.debug = debug;
|
||||
env.last_backup = last_backup;
|
||||
|
||||
env.log(format!("starting new backup on datastore '{}': {:?}", store, path));
|
||||
env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path));
|
||||
|
||||
let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
|
||||
|
||||
@ -143,6 +180,7 @@ async move {
|
||||
let window_size = 32*1024*1024; // max = (1 << 31) - 2
|
||||
http.http2_initial_stream_window_size(window_size);
|
||||
http.http2_initial_connection_window_size(window_size);
|
||||
http.http2_max_frame_size(4*1024*1024);
|
||||
|
||||
http.serve_connection(conn, service)
|
||||
.map_err(Error::from)
|
||||
@ -160,7 +198,11 @@ async move {
|
||||
req = req_fut => req,
|
||||
abrt = abort_future => abrt,
|
||||
};
|
||||
|
||||
if benchmark {
|
||||
env.log("benchmark finished successfully");
|
||||
env.remove_backup()?;
|
||||
return Ok(());
|
||||
}
|
||||
match (res, env.ensure_finished()) {
|
||||
(Ok(_), Ok(())) => {
|
||||
env.log("backup finished successfully");
|
||||
@ -334,7 +376,7 @@ fn create_fixed_index(
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
bail!("cannot reuse index - no previous backup exists");
|
||||
bail!("cannot reuse index - no valid previous backup exists");
|
||||
}
|
||||
};
|
||||
|
||||
@ -649,7 +691,7 @@ fn download_previous(
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => bail!("no previous backup"),
|
||||
None => bail!("no valid previous backup"),
|
||||
};
|
||||
|
||||
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
|
||||
|
@ -9,7 +9,7 @@ use proxmox::tools::digest_to_hex;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::api2::types::{Userid, SnapshotVerifyState, VerifyState};
|
||||
use crate::backup::*;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::server::formatter::*;
|
||||
@ -66,13 +66,16 @@ struct FixedWriterState {
|
||||
incremental: bool,
|
||||
}
|
||||
|
||||
// key=digest, value=(length, existance checked)
|
||||
type KnownChunksMap = HashMap<[u8;32], (u32, bool)>;
|
||||
|
||||
struct SharedBackupState {
|
||||
finished: bool,
|
||||
uid_counter: usize,
|
||||
file_counter: usize, // successfully uploaded files
|
||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||
known_chunks: HashMap<[u8;32], u32>,
|
||||
known_chunks: KnownChunksMap,
|
||||
backup_size: u64, // sums up size of all files
|
||||
backup_stat: UploadStatistic,
|
||||
}
|
||||
@ -153,7 +156,7 @@ impl BackupEnvironment {
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
state.known_chunks.insert(digest, length);
|
||||
state.known_chunks.insert(digest, (length, false));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -195,7 +198,7 @@ impl BackupEnvironment {
|
||||
if is_duplicate { data.upload_stat.duplicates += 1; }
|
||||
|
||||
// register chunk
|
||||
state.known_chunks.insert(digest, size);
|
||||
state.known_chunks.insert(digest, (size, true));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -228,7 +231,7 @@ impl BackupEnvironment {
|
||||
if is_duplicate { data.upload_stat.duplicates += 1; }
|
||||
|
||||
// register chunk
|
||||
state.known_chunks.insert(digest, size);
|
||||
state.known_chunks.insert(digest, (size, true));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -237,7 +240,7 @@ impl BackupEnvironment {
|
||||
let state = self.state.lock().unwrap();
|
||||
|
||||
match state.known_chunks.get(digest) {
|
||||
Some(len) => Some(*len),
|
||||
Some((len, _)) => Some(*len),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
@ -454,14 +457,55 @@ impl BackupEnvironment {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensure all chunks referenced in this backup actually exist.
|
||||
/// Only call *after* all writers have been closed, to avoid race with GC.
|
||||
/// In case of error, mark the previous backup as 'verify failed'.
|
||||
fn verify_chunk_existance(&self, known_chunks: &KnownChunksMap) -> Result<(), Error> {
|
||||
for (digest, (_, checked)) in known_chunks.iter() {
|
||||
if !checked && !self.datastore.chunk_path(digest).0.exists() {
|
||||
let mark_msg = if let Some(ref last_backup) = self.last_backup {
|
||||
let last_dir = &last_backup.backup_dir;
|
||||
let verify_state = SnapshotVerifyState {
|
||||
state: VerifyState::Failed,
|
||||
upid: self.worker.upid().clone(),
|
||||
};
|
||||
|
||||
let res = proxmox::try_block!{
|
||||
let (mut manifest, _) = self.datastore.load_manifest(last_dir)?;
|
||||
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
||||
self.datastore.store_manifest(last_dir, serde_json::to_value(manifest)?)
|
||||
};
|
||||
|
||||
if let Err(err) = res {
|
||||
format!("tried marking previous snapshot as bad, \
|
||||
but got error accessing manifest: {}", err)
|
||||
} else {
|
||||
"marked previous snapshot as bad, please use \
|
||||
'verify' for a detailed check".to_owned()
|
||||
}
|
||||
} else {
|
||||
"internal error: no base backup registered to mark invalid".to_owned()
|
||||
};
|
||||
|
||||
bail!(
|
||||
"chunk '{}' was attempted to be reused but doesn't exist - {}",
|
||||
digest_to_hex(digest),
|
||||
mark_msg
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Mark backup as finished
|
||||
pub fn finish_backup(&self) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
// test if all writer are correctly closed
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
if state.dynamic_writers.len() != 0 {
|
||||
// test if all writer are correctly closed
|
||||
if state.dynamic_writers.len() != 0 || state.fixed_writers.len() != 0 {
|
||||
bail!("found open index writer - unable to finish backup");
|
||||
}
|
||||
|
||||
@ -490,6 +534,8 @@ impl BackupEnvironment {
|
||||
}
|
||||
}
|
||||
|
||||
self.verify_chunk_existance(&state.known_chunks)?;
|
||||
|
||||
// marks the backup as successful
|
||||
state.finished = true;
|
||||
|
||||
|
@ -9,6 +9,7 @@ use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
||||
use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||
|
||||
@ -22,7 +23,7 @@ use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||
items: { type: datastore::DataStoreConfig },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_AUDIT, false),
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List all datastores
|
||||
@ -33,11 +34,18 @@ pub fn list_datastores(
|
||||
|
||||
let (config, digest) = datastore::config()?;
|
||||
|
||||
let list = config.convert_to_typed_array("datastore")?;
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||
let filter_by_privs = |store: &DataStoreConfig| {
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
|
||||
(user_privs & PRIV_DATASTORE_AUDIT) != 0
|
||||
};
|
||||
|
||||
Ok(list.into_iter().filter(filter_by_privs).collect())
|
||||
}
|
||||
|
||||
|
||||
@ -67,6 +75,10 @@ pub fn list_datastores(
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"verify-schedule": {
|
||||
optional: true,
|
||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
@ -119,6 +131,10 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::create_state_file("prune", &datastore.name)?;
|
||||
crate::config::jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||
crate::config::jobstate::create_state_file("verify", &datastore.name)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -163,6 +179,8 @@ pub enum DeletableProperty {
|
||||
gc_schedule,
|
||||
/// Delete the prune job schedule.
|
||||
prune_schedule,
|
||||
/// Delete the verify schedule property
|
||||
verify_schedule,
|
||||
/// Delete the keep-last property
|
||||
keep_last,
|
||||
/// Delete the keep-hourly property
|
||||
@ -196,6 +214,10 @@ pub enum DeletableProperty {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"verify-schedule": {
|
||||
optional: true,
|
||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
@ -244,6 +266,7 @@ pub fn update_datastore(
|
||||
comment: Option<String>,
|
||||
gc_schedule: Option<String>,
|
||||
prune_schedule: Option<String>,
|
||||
verify_schedule: Option<String>,
|
||||
keep_last: Option<u64>,
|
||||
keep_hourly: Option<u64>,
|
||||
keep_daily: Option<u64>,
|
||||
@ -272,6 +295,7 @@ pub fn update_datastore(
|
||||
DeletableProperty::comment => { data.comment = None; },
|
||||
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
|
||||
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
|
||||
DeletableProperty::verify_schedule => { data.verify_schedule = None; },
|
||||
DeletableProperty::keep_last => { data.keep_last = None; },
|
||||
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
|
||||
DeletableProperty::keep_daily => { data.keep_daily = None; },
|
||||
@ -291,8 +315,23 @@ pub fn update_datastore(
|
||||
}
|
||||
}
|
||||
|
||||
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
|
||||
if prune_schedule.is_some() { data.prune_schedule = prune_schedule; }
|
||||
let mut gc_schedule_changed = false;
|
||||
if gc_schedule.is_some() {
|
||||
gc_schedule_changed = data.gc_schedule != gc_schedule;
|
||||
data.gc_schedule = gc_schedule;
|
||||
}
|
||||
|
||||
let mut prune_schedule_changed = false;
|
||||
if prune_schedule.is_some() {
|
||||
prune_schedule_changed = data.prune_schedule != prune_schedule;
|
||||
data.prune_schedule = prune_schedule;
|
||||
}
|
||||
|
||||
let mut verify_schedule_changed = false;
|
||||
if verify_schedule.is_some() {
|
||||
verify_schedule_changed = data.verify_schedule != verify_schedule;
|
||||
data.verify_schedule = verify_schedule;
|
||||
}
|
||||
|
||||
if keep_last.is_some() { data.keep_last = keep_last; }
|
||||
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
|
||||
@ -305,6 +344,20 @@ pub fn update_datastore(
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
// we want to reset the statefiles, to avoid an immediate action in some cases
|
||||
// (e.g. going from monthly to weekly in the second week of the month)
|
||||
if gc_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("garbage_collection", &name)?;
|
||||
}
|
||||
|
||||
if prune_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("prune", &name)?;
|
||||
}
|
||||
|
||||
if verify_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("verify", &name)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -344,6 +397,11 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
// ignore errors
|
||||
let _ = crate::config::jobstate::remove_state_file("prune", &name);
|
||||
let _ = crate::config::jobstate::remove_state_file("garbage_collection", &name);
|
||||
let _ = crate::config::jobstate::remove_state_file("verify", &name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -198,6 +198,14 @@ pub fn read_interface(iface: String) -> Result<Value, Error> {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
"bond-primary": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_xmit_hash_policy: {
|
||||
type: BondXmitHashPolicy,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
@ -224,6 +232,8 @@ pub fn create_interface(
|
||||
bridge_ports: Option<String>,
|
||||
bridge_vlan_aware: Option<bool>,
|
||||
bond_mode: Option<LinuxBondMode>,
|
||||
bond_primary: Option<String>,
|
||||
bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||
slaves: Option<String>,
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
@ -284,7 +294,23 @@ pub fn create_interface(
|
||||
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
|
||||
}
|
||||
NetworkInterfaceType::Bond => {
|
||||
if bond_mode.is_some() { interface.bond_mode = bond_mode; }
|
||||
if let Some(mode) = bond_mode {
|
||||
interface.bond_mode = bond_mode;
|
||||
if bond_primary.is_some() {
|
||||
if mode != LinuxBondMode::active_backup {
|
||||
bail!("bond-primary is only valid with Active/Backup mode");
|
||||
}
|
||||
interface.bond_primary = bond_primary;
|
||||
}
|
||||
if bond_xmit_hash_policy.is_some() {
|
||||
if mode != LinuxBondMode::ieee802_3ad &&
|
||||
mode != LinuxBondMode::balance_xor
|
||||
{
|
||||
bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
|
||||
}
|
||||
interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
|
||||
}
|
||||
}
|
||||
if let Some(slaves) = slaves {
|
||||
let slaves = split_interface_list(&slaves)?;
|
||||
interface.set_bond_slaves(slaves)?;
|
||||
@ -343,6 +369,11 @@ pub enum DeletableProperty {
|
||||
bridge_vlan_aware,
|
||||
/// Delete bond-slaves (set to 'none')
|
||||
slaves,
|
||||
/// Delete bond-primary
|
||||
#[serde(rename = "bond-primary")]
|
||||
bond_primary,
|
||||
/// Delete bond transmit hash policy
|
||||
bond_xmit_hash_policy,
|
||||
}
|
||||
|
||||
|
||||
@ -420,6 +451,14 @@ pub enum DeletableProperty {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
"bond-primary": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_xmit_hash_policy: {
|
||||
type: BondXmitHashPolicy,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
@ -458,6 +497,8 @@ pub fn update_interface(
|
||||
bridge_ports: Option<String>,
|
||||
bridge_vlan_aware: Option<bool>,
|
||||
bond_mode: Option<LinuxBondMode>,
|
||||
bond_primary: Option<String>,
|
||||
bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||
slaves: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
@ -501,6 +542,8 @@ pub fn update_interface(
|
||||
DeletableProperty::bridge_ports => { interface.set_bridge_ports(Vec::new())?; }
|
||||
DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; }
|
||||
DeletableProperty::slaves => { interface.set_bond_slaves(Vec::new())?; }
|
||||
DeletableProperty::bond_primary => { interface.bond_primary = None; }
|
||||
DeletableProperty::bond_xmit_hash_policy => { interface.bond_xmit_hash_policy = None }
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -518,7 +561,23 @@ pub fn update_interface(
|
||||
let slaves = split_interface_list(&slaves)?;
|
||||
interface.set_bond_slaves(slaves)?;
|
||||
}
|
||||
if bond_mode.is_some() { interface.bond_mode = bond_mode; }
|
||||
if let Some(mode) = bond_mode {
|
||||
interface.bond_mode = bond_mode;
|
||||
if bond_primary.is_some() {
|
||||
if mode != LinuxBondMode::active_backup {
|
||||
bail!("bond-primary is only valid with Active/Backup mode");
|
||||
}
|
||||
interface.bond_primary = bond_primary;
|
||||
}
|
||||
if bond_xmit_hash_policy.is_some() {
|
||||
if mode != LinuxBondMode::ieee802_3ad &&
|
||||
mode != LinuxBondMode::balance_xor
|
||||
{
|
||||
bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
|
||||
}
|
||||
interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(cidr) = cidr {
|
||||
let (_, _, is_v6) = network::parse_cidr(&cidr)?;
|
||||
|
@ -1,10 +1,10 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::{Value, json};
|
||||
|
||||
use proxmox::api::{api, Router};
|
||||
use proxmox::api::{api, Permission, Router};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
use crate::rrd::{extract_cached_data, RRD_DATA_ENTRIES};
|
||||
|
||||
pub fn create_value_from_rrd(
|
||||
@ -15,7 +15,7 @@ pub fn create_value_from_rrd(
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut result = Vec::new();
|
||||
let now = epoch_now_f64()?;
|
||||
let now = proxmox::tools::time::epoch_f64();
|
||||
|
||||
for name in list {
|
||||
let (start, reso, list) = match extract_cached_data(basedir, name, now, timeframe, cf) {
|
||||
@ -57,6 +57,9 @@ pub fn create_value_from_rrd(
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read node stats
|
||||
fn get_node_stats(
|
||||
|
@ -1,11 +1,12 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
use crate::api2::types::NODE_SCHEMA;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::types::{NODE_SCHEMA, Userid};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -28,7 +29,7 @@ use crate::api2::types::NODE_SCHEMA;
|
||||
},
|
||||
serverid: {
|
||||
type: String,
|
||||
description: "The unique server ID.",
|
||||
description: "The unique server ID, if permitted to access.",
|
||||
},
|
||||
url: {
|
||||
type: String,
|
||||
@ -37,17 +38,28 @@ use crate::api2::types::NODE_SCHEMA;
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// Read subscription info.
|
||||
fn get_subscription(_param: Value) -> Result<Value, Error> {
|
||||
fn get_subscription(
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &[]);
|
||||
let server_id = if (user_privs & PRIV_SYS_AUDIT) != 0 {
|
||||
tools::get_hardware_address()?
|
||||
} else {
|
||||
"hidden".to_string()
|
||||
};
|
||||
|
||||
let url = "https://www.proxmox.com/en/proxmox-backup-server/pricing";
|
||||
Ok(json!({
|
||||
"status": "NotFound",
|
||||
"message": "There is no subscription key",
|
||||
"serverid": tools::get_hardware_address()?,
|
||||
"serverid": server_id,
|
||||
"url": url,
|
||||
}))
|
||||
}
|
||||
|
@ -1,4 +1,3 @@
|
||||
use chrono::prelude::*;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
@ -57,10 +56,11 @@ fn read_etc_localtime() -> Result<String, Error> {
|
||||
)]
|
||||
/// Read server time and time zone settings.
|
||||
fn get_time(_param: Value) -> Result<Value, Error> {
|
||||
let datetime = Local::now();
|
||||
let offset = datetime.offset();
|
||||
let time = datetime.timestamp();
|
||||
let localtime = time + (offset.fix().local_minus_utc() as i64);
|
||||
let time = proxmox::tools::time::epoch_i64();
|
||||
let tm = proxmox::tools::time::localtime(time)?;
|
||||
let offset = tm.tm_gmtoff;
|
||||
|
||||
let localtime = time + offset;
|
||||
|
||||
Ok(json!({
|
||||
"timezone": read_etc_localtime()?,
|
||||
|
@ -176,7 +176,13 @@ async fn pull (
|
||||
|
||||
worker.log(format!("sync datastore '{}' start", store));
|
||||
|
||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid).await?;
|
||||
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid);
|
||||
let future = select!{
|
||||
success = pull_future.fuse() => success,
|
||||
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
|
||||
};
|
||||
|
||||
let _ = future?;
|
||||
|
||||
worker.log(format!("sync datastore '{}' end", store));
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
//use chrono::{Local, TimeZone};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::header::{self, HeaderValue, UPGRADE};
|
||||
@ -83,12 +82,12 @@ fn upgrade_to_backup_reader_protocol(
|
||||
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
let path = datastore.base_path();
|
||||
|
||||
//let files = BackupInfo::list_files(&path, &backup_dir)?;
|
||||
|
||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time().timestamp());
|
||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
||||
|
||||
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
|
||||
let mut env = ReaderEnvironment::new(
|
||||
@ -121,6 +120,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||
let window_size = 32*1024*1024; // max = (1 << 31) - 2
|
||||
http.http2_initial_stream_window_size(window_size);
|
||||
http.http2_initial_connection_window_size(window_size);
|
||||
http.http2_max_frame_size(4*1024*1024);
|
||||
|
||||
http.serve_connection(conn, service)
|
||||
.map_err(Error::from)
|
||||
@ -229,8 +229,7 @@ fn download_chunk(
|
||||
|
||||
env.debug(format!("download chunk {:?}", path));
|
||||
|
||||
let data = tokio::fs::read(path)
|
||||
.await
|
||||
let data = tools::runtime::block_in_place(|| std::fs::read(path))
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err))?;
|
||||
|
||||
let body = Body::from(data);
|
||||
|
@ -23,7 +23,6 @@ use crate::api2::types::{
|
||||
use crate::server;
|
||||
use crate::backup::{DataStore};
|
||||
use crate::config::datastore;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::tools::statistics::{linear_regression};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::{
|
||||
@ -74,6 +73,9 @@ use crate::config::acl::{
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List Datastore usages and estimates
|
||||
fn datastore_status(
|
||||
@ -107,7 +109,7 @@ fn datastore_status(
|
||||
});
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
let now = epoch_now_f64()?;
|
||||
let now = proxmox::tools::time::epoch_f64();
|
||||
let rrd_resolution = RRDTimeFrameResolution::Month;
|
||||
let rrd_mode = RRDMode::Average;
|
||||
|
||||
|
@ -302,6 +302,11 @@ pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const VERIFY_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run verify job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
@ -380,13 +385,24 @@ pub struct GroupListItem {
|
||||
pub owner: Option<Userid>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Result of a verify operation.
|
||||
pub enum VerifyState {
|
||||
/// Verification was successful
|
||||
Ok,
|
||||
/// Verification reported one or more errors
|
||||
Failed,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: {
|
||||
schema: UPID_SCHEMA
|
||||
},
|
||||
state: {
|
||||
type: String
|
||||
type: VerifyState
|
||||
},
|
||||
},
|
||||
)]
|
||||
@ -395,8 +411,8 @@ pub struct GroupListItem {
|
||||
pub struct SnapshotVerifyState {
|
||||
/// UPID of the verify task
|
||||
pub upid: UPID,
|
||||
/// State of the verification. "failed" or "ok"
|
||||
pub state: String,
|
||||
/// State of the verification. Enum.
|
||||
pub state: VerifyState,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -559,6 +575,8 @@ pub struct GarbageCollectionStatus {
|
||||
pub pending_bytes: u64,
|
||||
/// Number of pending chunks (pending removal - kept for safety).
|
||||
pub pending_chunks: usize,
|
||||
/// Number of chunks marked as .bad by verify that have been removed by GC.
|
||||
pub removed_bad: usize,
|
||||
}
|
||||
|
||||
impl Default for GarbageCollectionStatus {
|
||||
@ -573,6 +591,7 @@ impl Default for GarbageCollectionStatus {
|
||||
removed_chunks: 0,
|
||||
pending_bytes: 0,
|
||||
pending_chunks: 0,
|
||||
removed_bad: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -685,7 +704,7 @@ pub enum LinuxBondMode {
|
||||
/// Broadcast policy
|
||||
broadcast = 3,
|
||||
/// IEEE 802.3ad Dynamic link aggregation
|
||||
//#[serde(rename = "802.3ad")]
|
||||
#[serde(rename = "802.3ad")]
|
||||
ieee802_3ad = 4,
|
||||
/// Adaptive transmit load balancing
|
||||
balance_tlb = 5,
|
||||
@ -693,6 +712,23 @@ pub enum LinuxBondMode {
|
||||
balance_alb = 6,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[allow(non_camel_case_types)]
|
||||
#[repr(u8)]
|
||||
/// Bond Transmit Hash Policy for LACP (802.3ad)
|
||||
pub enum BondXmitHashPolicy {
|
||||
/// Layer 2
|
||||
layer2 = 0,
|
||||
/// Layer 2+3
|
||||
#[serde(rename = "layer2+3")]
|
||||
layer2_3 = 1,
|
||||
/// Layer 3+4
|
||||
#[serde(rename = "layer3+4")]
|
||||
layer3_4 = 2,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
@ -798,7 +834,15 @@ pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
"bond-primary": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_xmit_hash_policy: {
|
||||
type: BondXmitHashPolicy,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@ -855,6 +899,10 @@ pub struct Interface {
|
||||
pub slaves: Option<Vec<String>>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub bond_mode: Option<LinuxBondMode>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
#[serde(rename = "bond-primary")]
|
||||
pub bond_primary: Option<String>,
|
||||
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||
}
|
||||
|
||||
// Regression tests
|
||||
|
@ -11,7 +11,6 @@ use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn compute_csrf_secret_digest(
|
||||
timestamp: i64,
|
||||
@ -32,7 +31,7 @@ pub fn assemble_csrf_prevention_token(
|
||||
userid: &Userid,
|
||||
) -> String {
|
||||
|
||||
let epoch = epoch_now_u64().unwrap() as i64;
|
||||
let epoch = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, userid);
|
||||
|
||||
@ -69,7 +68,7 @@ pub fn verify_csrf_prevention_token(
|
||||
bail!("invalid signature.");
|
||||
}
|
||||
|
||||
let now = epoch_now_u64()? as i64;
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let age = now - ttime;
|
||||
if age < min_age {
|
||||
|
@ -4,8 +4,6 @@ use anyhow::{bail, format_err, Error};
|
||||
use regex::Regex;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
use chrono::{DateTime, TimeZone, SecondsFormat, Utc};
|
||||
|
||||
use std::path::{PathBuf, Path};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
@ -105,8 +103,7 @@ impl BackupGroup {
|
||||
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp());
|
||||
let backup_dir = BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
|
||||
let files = list_backup_files(l2_fd, backup_time)?;
|
||||
|
||||
list.push(BackupInfo { backup_dir, files });
|
||||
@ -116,7 +113,7 @@ impl BackupGroup {
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<DateTime<Utc>>, Error> {
|
||||
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
|
||||
|
||||
let mut last = None;
|
||||
|
||||
@ -142,11 +139,11 @@ impl BackupGroup {
|
||||
}
|
||||
}
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
if let Some(last_dt) = last {
|
||||
if dt > last_dt { last = Some(dt); }
|
||||
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
||||
if let Some(last_timestamp) = last {
|
||||
if timestamp > last_timestamp { last = Some(timestamp); }
|
||||
} else {
|
||||
last = Some(dt);
|
||||
last = Some(timestamp);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -203,45 +200,63 @@ pub struct BackupDir {
|
||||
/// Backup group
|
||||
group: BackupGroup,
|
||||
/// Backup timestamp
|
||||
backup_time: DateTime<Utc>,
|
||||
backup_time: i64,
|
||||
// backup_time as rfc3339
|
||||
backup_time_string: String
|
||||
}
|
||||
|
||||
impl BackupDir {
|
||||
|
||||
pub fn new<T, U>(backup_type: T, backup_id: U, timestamp: i64) -> Self
|
||||
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
||||
where
|
||||
T: Into<String>,
|
||||
U: Into<String>,
|
||||
{
|
||||
// Note: makes sure that nanoseconds is 0
|
||||
Self {
|
||||
group: BackupGroup::new(backup_type.into(), backup_id.into()),
|
||||
backup_time: Utc.timestamp(timestamp, 0),
|
||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||
BackupDir::with_group(group, backup_time)
|
||||
}
|
||||
|
||||
pub fn with_rfc3339<T,U,V>(backup_type: T, backup_id: U, backup_time_string: V) -> Result<Self, Error>
|
||||
where
|
||||
T: Into<String>,
|
||||
U: Into<String>,
|
||||
V: Into<String>,
|
||||
{
|
||||
let backup_time_string = backup_time_string.into();
|
||||
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||
Ok(Self { group, backup_time, backup_time_string })
|
||||
}
|
||||
pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Self {
|
||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||
|
||||
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
||||
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
||||
Ok(Self { group, backup_time, backup_time_string })
|
||||
}
|
||||
|
||||
pub fn group(&self) -> &BackupGroup {
|
||||
&self.group
|
||||
}
|
||||
|
||||
pub fn backup_time(&self) -> DateTime<Utc> {
|
||||
pub fn backup_time(&self) -> i64 {
|
||||
self.backup_time
|
||||
}
|
||||
|
||||
pub fn backup_time_string(&self) -> &str {
|
||||
&self.backup_time_string
|
||||
}
|
||||
|
||||
pub fn relative_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = self.group.group_path();
|
||||
|
||||
relative_path.push(Self::backup_time_to_string(self.backup_time));
|
||||
relative_path.push(self.backup_time_string.clone());
|
||||
|
||||
relative_path
|
||||
}
|
||||
|
||||
pub fn backup_time_to_string(backup_time: DateTime<Utc>) -> String {
|
||||
backup_time.to_rfc3339_opts(SecondsFormat::Secs, true)
|
||||
pub fn backup_time_to_string(backup_time: i64) -> Result<String, Error> {
|
||||
// fixme: can this fail? (avoid unwrap)
|
||||
proxmox::tools::time::epoch_to_rfc3339_utc(backup_time)
|
||||
}
|
||||
}
|
||||
|
||||
@ -255,9 +270,11 @@ impl std::str::FromStr for BackupDir {
|
||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
BackupDir::with_rfc3339(
|
||||
cap.get(1).unwrap().as_str(),
|
||||
cap.get(2).unwrap().as_str(),
|
||||
cap.get(3).unwrap().as_str(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -265,14 +282,7 @@ impl std::fmt::Display for BackupDir {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.group.backup_type();
|
||||
let id = self.group.backup_id();
|
||||
let time = Self::backup_time_to_string(self.backup_time);
|
||||
write!(f, "{}/{}/{}", backup_type, id, time)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(BackupGroup, i64)> for BackupDir {
|
||||
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
|
||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||
write!(f, "{}/{}/{}", backup_type, id, self.backup_time_string)
|
||||
}
|
||||
}
|
||||
|
||||
@ -330,13 +340,12 @@ impl BackupInfo {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |l1_fd, backup_id, file_type| {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
||||
tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |l2_fd, backup_time_string, file_type| {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp());
|
||||
let backup_dir = BackupDir::with_rfc3339(backup_type, backup_id, backup_time_string)?;
|
||||
|
||||
let files = list_backup_files(l2_fd, backup_time)?;
|
||||
let files = list_backup_files(l2_fd, backup_time_string)?;
|
||||
|
||||
list.push(BackupInfo { backup_dir, files });
|
||||
|
||||
|
@ -5,7 +5,6 @@ use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::offset::{TimeZone, Local};
|
||||
|
||||
use pathpatterns::{MatchList, MatchType};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
@ -533,17 +532,17 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
self.dump_dir(&path, pos)?;
|
||||
}
|
||||
CatalogEntryType::File => {
|
||||
let dt = Local
|
||||
.timestamp_opt(mtime as i64, 0)
|
||||
.single() // chrono docs say timestamp_opt can only be None or Single!
|
||||
.unwrap_or_else(|| Local.timestamp(0, 0));
|
||||
let mut mtime_string = mtime.to_string();
|
||||
if let Ok(s) = proxmox::tools::time::strftime_local("%FT%TZ", mtime as i64) {
|
||||
mtime_string = s;
|
||||
}
|
||||
|
||||
println!(
|
||||
"{} {:?} {} {}",
|
||||
etype,
|
||||
path,
|
||||
size,
|
||||
dt.to_rfc3339_opts(chrono::SecondsFormat::Secs, false),
|
||||
mtime_string,
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
|
@ -104,12 +104,11 @@ impl ChunkStore {
|
||||
}
|
||||
let percentage = (i*100)/(64*1024);
|
||||
if percentage != last_percentage {
|
||||
eprintln!("{}%", percentage);
|
||||
// eprintln!("ChunkStore::create {}%", percentage);
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Self::open(name, base)
|
||||
}
|
||||
|
||||
@ -187,7 +186,7 @@ impl ChunkStore {
|
||||
pub fn get_chunk_iterator(
|
||||
&self,
|
||||
) -> Result<
|
||||
impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)> + std::iter::FusedIterator,
|
||||
impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
|
||||
Error
|
||||
> {
|
||||
use nix::dir::Dir;
|
||||
@ -219,19 +218,21 @@ impl ChunkStore {
|
||||
Some(Ok(entry)) => {
|
||||
// skip files if they're not a hash
|
||||
let bytes = entry.file_name().to_bytes();
|
||||
if bytes.len() != 64 {
|
||||
if bytes.len() != 64 && bytes.len() != 64 + ".0.bad".len() {
|
||||
continue;
|
||||
}
|
||||
if !bytes.iter().all(u8::is_ascii_hexdigit) {
|
||||
if !bytes.iter().take(64).all(u8::is_ascii_hexdigit) {
|
||||
continue;
|
||||
}
|
||||
return Some((Ok(entry), percentage));
|
||||
|
||||
let bad = bytes.ends_with(".bad".as_bytes());
|
||||
return Some((Ok(entry), percentage, bad));
|
||||
}
|
||||
Some(Err(err)) => {
|
||||
// stop after first error
|
||||
done = true;
|
||||
// and pass the error through:
|
||||
return Some((Err(err), percentage));
|
||||
return Some((Err(err), percentage, false));
|
||||
}
|
||||
None => (), // open next directory
|
||||
}
|
||||
@ -261,7 +262,7 @@ impl ChunkStore {
|
||||
// other errors are fatal, so end our iteration
|
||||
done = true;
|
||||
// and pass the error through:
|
||||
return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage));
|
||||
return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage, false));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -280,6 +281,7 @@ impl ChunkStore {
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
use nix::sys::stat::fstatat;
|
||||
use nix::unistd::{unlinkat, UnlinkatFlags};
|
||||
|
||||
let mut min_atime = phase1_start_time - 3600*24; // at least 24h (see mount option relatime)
|
||||
|
||||
@ -292,10 +294,10 @@ impl ChunkStore {
|
||||
let mut last_percentage = 0;
|
||||
let mut chunk_count = 0;
|
||||
|
||||
for (entry, percentage) in self.get_chunk_iterator()? {
|
||||
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
||||
if last_percentage != percentage {
|
||||
last_percentage = percentage;
|
||||
worker.log(format!("{}%, processed {} chunks", percentage, chunk_count));
|
||||
worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
|
||||
}
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
@ -321,14 +323,47 @@ impl ChunkStore {
|
||||
let lock = self.mutex.lock();
|
||||
|
||||
if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
|
||||
if stat.st_atime < min_atime {
|
||||
if bad {
|
||||
// filename validity checked in iterator
|
||||
let orig_filename = std::ffi::CString::new(&filename.to_bytes()[..64])?;
|
||||
match fstatat(
|
||||
dirfd,
|
||||
orig_filename.as_c_str(),
|
||||
nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW)
|
||||
{
|
||||
Ok(_) => {
|
||||
match unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
|
||||
Err(err) =>
|
||||
worker.warn(format!(
|
||||
"unlinking corrupt chunk {:?} failed on store '{}' - {}",
|
||||
filename,
|
||||
self.name,
|
||||
err,
|
||||
)),
|
||||
Ok(_) => {
|
||||
status.removed_bad += 1;
|
||||
status.removed_bytes += stat.st_size as u64;
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
|
||||
// chunk hasn't been rewritten yet, keep .bad file
|
||||
},
|
||||
Err(err) => {
|
||||
// some other error, warn user and keep .bad file around too
|
||||
worker.warn(format!(
|
||||
"error during stat on '{:?}' - {}",
|
||||
orig_filename,
|
||||
err,
|
||||
));
|
||||
}
|
||||
}
|
||||
} else if stat.st_atime < min_atime {
|
||||
//let age = now - stat.st_atime;
|
||||
//println!("UNLINK {} {:?}", age/(3600*24), filename);
|
||||
let res = unsafe { libc::unlinkat(dirfd, filename.as_ptr(), 0) };
|
||||
if res != 0 {
|
||||
let err = nix::Error::last();
|
||||
if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
|
||||
bail!(
|
||||
"unlink chunk {:?} failed on store '{}' - {}",
|
||||
"unlinking chunk {:?} failed on store '{}' - {}",
|
||||
filename,
|
||||
self.name,
|
||||
err,
|
||||
@ -366,6 +401,7 @@ impl ChunkStore {
|
||||
|
||||
if let Ok(metadata) = std::fs::metadata(&chunk_path) {
|
||||
if metadata.is_file() {
|
||||
self.touch_chunk(digest)?;
|
||||
return Ok((true, metadata.len()));
|
||||
} else {
|
||||
bail!("Got unexpected file type on store '{}' for chunk {}", self.name, digest_str);
|
||||
|
@ -10,7 +10,6 @@
|
||||
use std::io::Write;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
@ -216,10 +215,10 @@ impl CryptConfig {
|
||||
pub fn generate_rsa_encoded_key(
|
||||
&self,
|
||||
rsa: openssl::rsa::Rsa<openssl::pkey::Public>,
|
||||
created: DateTime<Local>,
|
||||
created: i64,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let modified = proxmox::tools::time::epoch_i64();
|
||||
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
|
||||
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
|
||||
|
||||
|
@ -72,7 +72,7 @@ impl DataBlob {
|
||||
}
|
||||
|
||||
// verify the CRC32 checksum
|
||||
fn verify_crc(&self) -> Result<(), Error> {
|
||||
pub fn verify_crc(&self) -> Result<(), Error> {
|
||||
let expected_crc = self.compute_crc();
|
||||
if expected_crc != self.crc() {
|
||||
bail!("Data blob has wrong CRC checksum.");
|
||||
@ -198,7 +198,10 @@ impl DataBlob {
|
||||
Ok(data)
|
||||
} else if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data_start = std::mem::size_of::<DataBlobHeader>();
|
||||
let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
|
||||
let mut reader = &self.raw_data[data_start..];
|
||||
let data = zstd::stream::decode_all(&mut reader)?;
|
||||
// zstd::block::decompress is abou 10% slower
|
||||
// let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
|
||||
if let Some(digest) = digest {
|
||||
Self::verify_digest(&data, None, digest)?;
|
||||
}
|
||||
@ -268,6 +271,12 @@ impl DataBlob {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns if chunk is encrypted
|
||||
pub fn is_encrypted(&self) -> bool {
|
||||
let magic = self.magic();
|
||||
magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0
|
||||
}
|
||||
|
||||
/// Verify digest and data length for unencrypted chunks.
|
||||
///
|
||||
/// To do that, we need to decompress data first. Please note that
|
||||
@ -304,7 +313,7 @@ impl DataBlob {
|
||||
|
||||
let digest = match config {
|
||||
Some(config) => config.compute_digest(data),
|
||||
None => openssl::sha::sha256(&data),
|
||||
None => openssl::sha::sha256(data),
|
||||
};
|
||||
if &digest != expected_digest {
|
||||
bail!("detected chunk with wrong digest.");
|
||||
|
@ -6,7 +6,6 @@ use std::convert::TryFrom;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
@ -71,6 +70,10 @@ impl DataStore {
|
||||
|
||||
let path = store_config["path"].as_str().unwrap();
|
||||
|
||||
Self::open_with_path(store_name, Path::new(path))
|
||||
}
|
||||
|
||||
pub fn open_with_path(store_name: &str, path: &Path) -> Result<Self, Error> {
|
||||
let chunk_store = ChunkStore::open(store_name, path)?;
|
||||
|
||||
let gc_status = GarbageCollectionStatus::default();
|
||||
@ -85,7 +88,7 @@ impl DataStore {
|
||||
pub fn get_chunk_iterator(
|
||||
&self,
|
||||
) -> Result<
|
||||
impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)>,
|
||||
impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)>,
|
||||
Error
|
||||
> {
|
||||
self.chunk_store.get_chunk_iterator()
|
||||
@ -242,7 +245,7 @@ impl DataStore {
|
||||
/// Returns the time of the last successful backup
|
||||
///
|
||||
/// Or None if there is no backup in the group (or the group dir does not exist).
|
||||
pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<DateTime<Utc>>, Error> {
|
||||
pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<i64>, Error> {
|
||||
let base_path = self.base_path();
|
||||
let mut group_path = base_path.clone();
|
||||
group_path.push(backup_group.group_path());
|
||||
@ -430,6 +433,12 @@ impl DataStore {
|
||||
|
||||
let image_list = self.list_images()?;
|
||||
|
||||
let image_count = image_list.len();
|
||||
|
||||
let mut done = 0;
|
||||
|
||||
let mut last_percentage: usize = 0;
|
||||
|
||||
for path in image_list {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
@ -444,6 +453,14 @@ impl DataStore {
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
}
|
||||
}
|
||||
done += 1;
|
||||
|
||||
let percentage = done*100/image_count;
|
||||
if percentage > last_percentage {
|
||||
worker.log(format!("percentage done: phase1 {}% ({} of {} index files)",
|
||||
percentage, done, image_count));
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -481,6 +498,9 @@ impl DataStore {
|
||||
if gc_status.pending_bytes > 0 {
|
||||
worker.log(&format!("Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), gc_status.pending_chunks));
|
||||
}
|
||||
if gc_status.removed_bad > 0 {
|
||||
worker.log(&format!("Removed bad files: {}", gc_status.removed_bad));
|
||||
}
|
||||
|
||||
worker.log(&format!("Original data usage: {}", HumanByte::from(gc_status.index_data_bytes)));
|
||||
|
||||
|
@ -21,14 +21,14 @@ use super::read_chunk::ReadChunk;
|
||||
use super::Chunker;
|
||||
use super::IndexFile;
|
||||
use super::{DataBlob, DataChunkBuilder};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
use crate::tools;
|
||||
|
||||
/// Header format definition for dynamic index files (`.dixd`)
|
||||
#[repr(C)]
|
||||
pub struct DynamicIndexHeader {
|
||||
pub magic: [u8; 8],
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
/// Sha256 over the index ``SHA256(offset1||digest1||offset2||digest2||...)``
|
||||
pub index_csum: [u8; 32],
|
||||
reserved: [u8; 4032], // overall size is one page (4096 bytes)
|
||||
@ -77,7 +77,7 @@ pub struct DynamicIndexReader {
|
||||
pub size: usize,
|
||||
index: Mmap<DynamicEntry>,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
pub index_csum: [u8; 32],
|
||||
}
|
||||
|
||||
@ -107,7 +107,7 @@ impl DynamicIndexReader {
|
||||
bail!("got unknown magic number");
|
||||
}
|
||||
|
||||
let ctime = u64::from_le(header.ctime);
|
||||
let ctime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let rawfd = file.as_raw_fd();
|
||||
|
||||
@ -480,7 +480,7 @@ pub struct DynamicIndexWriter {
|
||||
tmp_filename: PathBuf,
|
||||
csum: Option<openssl::sha::Sha256>,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
}
|
||||
|
||||
impl Drop for DynamicIndexWriter {
|
||||
@ -506,13 +506,13 @@ impl DynamicIndexWriter {
|
||||
|
||||
let mut writer = BufWriter::with_capacity(1024 * 1024, file);
|
||||
|
||||
let ctime = epoch_now_u64()?;
|
||||
let ctime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
let mut header = DynamicIndexHeader::zeroed();
|
||||
header.magic = super::DYNAMIC_SIZED_CHUNK_INDEX_1_0;
|
||||
header.ctime = u64::to_le(ctime);
|
||||
header.ctime = i64::to_le(ctime);
|
||||
header.uuid = *uuid.as_bytes();
|
||||
// header.index_csum = [0u8; 32];
|
||||
writer.write_all(header.as_bytes())?;
|
||||
|
@ -4,9 +4,8 @@ use std::io::{Seek, SeekFrom};
|
||||
use super::chunk_stat::*;
|
||||
use super::chunk_store::*;
|
||||
use super::{IndexFile, ChunkReadInfo};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
use crate::tools;
|
||||
|
||||
use chrono::{Local, TimeZone};
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
@ -23,7 +22,7 @@ use proxmox::tools::Uuid;
|
||||
pub struct FixedIndexHeader {
|
||||
pub magic: [u8; 8],
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
/// Sha256 over the index ``SHA256(digest1||digest2||...)``
|
||||
pub index_csum: [u8; 32],
|
||||
pub size: u64,
|
||||
@ -41,7 +40,7 @@ pub struct FixedIndexReader {
|
||||
index_length: usize,
|
||||
index: *mut u8,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
pub index_csum: [u8; 32],
|
||||
}
|
||||
|
||||
@ -82,7 +81,7 @@ impl FixedIndexReader {
|
||||
}
|
||||
|
||||
let size = u64::from_le(header.size);
|
||||
let ctime = u64::from_le(header.ctime);
|
||||
let ctime = i64::from_le(header.ctime);
|
||||
let chunk_size = u64::from_le(header.chunk_size);
|
||||
|
||||
let index_length = ((size + chunk_size - 1) / chunk_size) as usize;
|
||||
@ -148,10 +147,13 @@ impl FixedIndexReader {
|
||||
pub fn print_info(&self) {
|
||||
println!("Size: {}", self.size);
|
||||
println!("ChunkSize: {}", self.chunk_size);
|
||||
println!(
|
||||
"CTime: {}",
|
||||
Local.timestamp(self.ctime as i64, 0).format("%c")
|
||||
);
|
||||
|
||||
let mut ctime_str = self.ctime.to_string();
|
||||
if let Ok(s) = proxmox::tools::time::strftime_local("%c",self.ctime) {
|
||||
ctime_str = s;
|
||||
}
|
||||
|
||||
println!("CTime: {}", ctime_str);
|
||||
println!("UUID: {:?}", self.uuid);
|
||||
}
|
||||
}
|
||||
@ -228,7 +230,7 @@ pub struct FixedIndexWriter {
|
||||
index_length: usize,
|
||||
index: *mut u8,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub ctime: i64,
|
||||
}
|
||||
|
||||
// `index` is mmap()ed which cannot be thread-local so should be sendable
|
||||
@ -271,7 +273,7 @@ impl FixedIndexWriter {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = epoch_now_u64()?;
|
||||
let ctime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
@ -279,7 +281,7 @@ impl FixedIndexWriter {
|
||||
let header = unsafe { &mut *(buffer.as_ptr() as *mut FixedIndexHeader) };
|
||||
|
||||
header.magic = super::FIXED_SIZED_CHUNK_INDEX_1_0;
|
||||
header.ctime = u64::to_le(ctime);
|
||||
header.ctime = i64::to_le(ctime);
|
||||
header.size = u64::to_le(size as u64);
|
||||
header.chunk_size = u64::to_le(chunk_size as u64);
|
||||
header.uuid = *uuid.as_bytes();
|
||||
|
@ -1,7 +1,6 @@
|
||||
use anyhow::{bail, format_err, Context, Error};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
@ -61,10 +60,10 @@ impl KeyDerivationConfig {
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
pub struct KeyConfig {
|
||||
pub kdf: Option<KeyDerivationConfig>,
|
||||
#[serde(with = "proxmox::tools::serde::date_time_as_rfc3339")]
|
||||
pub created: DateTime<Local>,
|
||||
#[serde(with = "proxmox::tools::serde::date_time_as_rfc3339")]
|
||||
pub modified: DateTime<Local>,
|
||||
#[serde(with = "proxmox::tools::serde::epoch_as_rfc3339")]
|
||||
pub created: i64,
|
||||
#[serde(with = "proxmox::tools::serde::epoch_as_rfc3339")]
|
||||
pub modified: i64,
|
||||
#[serde(with = "proxmox::tools::serde::bytes_as_base64")]
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
@ -136,7 +135,7 @@ pub fn encrypt_key_with_passphrase(
|
||||
enc_data.extend_from_slice(&tag);
|
||||
enc_data.extend_from_slice(&encrypted_key);
|
||||
|
||||
let created = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let created = proxmox::tools::time::epoch_i64();
|
||||
|
||||
Ok(KeyConfig {
|
||||
kdf: Some(kdf),
|
||||
@ -149,7 +148,7 @@ pub fn encrypt_key_with_passphrase(
|
||||
pub fn load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
) -> Result<([u8;32], i64), Error> {
|
||||
do_load_and_decrypt_key(path, passphrase)
|
||||
.with_context(|| format!("failed to load decryption key from {:?}", path))
|
||||
}
|
||||
@ -157,14 +156,14 @@ pub fn load_and_decrypt_key(
|
||||
fn do_load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
) -> Result<([u8;32], i64), Error> {
|
||||
decrypt_key(&file_get_contents(&path)?, passphrase)
|
||||
}
|
||||
|
||||
pub fn decrypt_key(
|
||||
mut keydata: &[u8],
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
) -> Result<([u8;32], i64), Error> {
|
||||
let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?;
|
||||
|
||||
let raw_data = key_config.data;
|
||||
|
@ -103,7 +103,7 @@ impl BackupManifest {
|
||||
Self {
|
||||
backup_type: snapshot.group().backup_type().into(),
|
||||
backup_id: snapshot.group().backup_id().into(),
|
||||
backup_time: snapshot.backup_time().timestamp(),
|
||||
backup_time: snapshot.backup_time(),
|
||||
files: Vec::new(),
|
||||
unprotected: json!({}),
|
||||
signature: None,
|
||||
|
@ -2,18 +2,16 @@ use anyhow::{Error};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use chrono::{DateTime, Timelike, Datelike, Local};
|
||||
|
||||
use super::{BackupDir, BackupInfo};
|
||||
use super::BackupInfo;
|
||||
|
||||
enum PruneMark { Keep, KeepPartial, Remove }
|
||||
|
||||
fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
||||
fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>> (
|
||||
mark: &mut HashMap<PathBuf, PruneMark>,
|
||||
list: &Vec<BackupInfo>,
|
||||
keep: usize,
|
||||
select_id: F,
|
||||
) {
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut include_hash = HashSet::new();
|
||||
|
||||
@ -21,8 +19,7 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
||||
for info in list {
|
||||
let backup_id = info.backup_dir.relative_path();
|
||||
if let Some(PruneMark::Keep) = mark.get(&backup_id) {
|
||||
let local_time = info.backup_dir.backup_time().with_timezone(&Local);
|
||||
let sel_id: String = select_id(local_time, &info);
|
||||
let sel_id: String = select_id(&info)?;
|
||||
already_included.insert(sel_id);
|
||||
}
|
||||
}
|
||||
@ -30,8 +27,7 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
||||
for info in list {
|
||||
let backup_id = info.backup_dir.relative_path();
|
||||
if let Some(_) = mark.get(&backup_id) { continue; }
|
||||
let local_time = info.backup_dir.backup_time().with_timezone(&Local);
|
||||
let sel_id: String = select_id(local_time, &info);
|
||||
let sel_id: String = select_id(&info)?;
|
||||
|
||||
if already_included.contains(&sel_id) { continue; }
|
||||
|
||||
@ -43,6 +39,8 @@ fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
|
||||
mark.insert(backup_id, PruneMark::Remove);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_incomplete_snapshots(
|
||||
@ -182,44 +180,43 @@ pub fn compute_prune_info(
|
||||
remove_incomplete_snapshots(&mut mark, &list);
|
||||
|
||||
if let Some(keep_last) = options.keep_last {
|
||||
mark_selections(&mut mark, &list, keep_last as usize, |_local_time, info| {
|
||||
BackupDir::backup_time_to_string(info.backup_dir.backup_time())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_last as usize, |info| {
|
||||
Ok(info.backup_dir.backup_time_string().to_owned())
|
||||
})?;
|
||||
}
|
||||
|
||||
use proxmox::tools::time::strftime_local;
|
||||
|
||||
if let Some(keep_hourly) = options.keep_hourly {
|
||||
mark_selections(&mut mark, &list, keep_hourly as usize, |local_time, _info| {
|
||||
format!("{}/{}/{}/{}", local_time.year(), local_time.month(),
|
||||
local_time.day(), local_time.hour())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_hourly as usize, |info| {
|
||||
strftime_local("%Y/%m/%d/%H", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_daily) = options.keep_daily {
|
||||
mark_selections(&mut mark, &list, keep_daily as usize, |local_time, _info| {
|
||||
format!("{}/{}/{}", local_time.year(), local_time.month(), local_time.day())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_daily as usize, |info| {
|
||||
strftime_local("%Y/%m/%d", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_weekly) = options.keep_weekly {
|
||||
mark_selections(&mut mark, &list, keep_weekly as usize, |local_time, _info| {
|
||||
let iso_week = local_time.iso_week();
|
||||
let week = iso_week.week();
|
||||
// Note: This year number might not match the calendar year number.
|
||||
let iso_week_year = iso_week.year();
|
||||
format!("{}/{}", iso_week_year, week)
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_weekly as usize, |info| {
|
||||
// Note: Use iso-week year/week here. This year number
|
||||
// might not match the calendar year number.
|
||||
strftime_local("%G/%V", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_monthly) = options.keep_monthly {
|
||||
mark_selections(&mut mark, &list, keep_monthly as usize, |local_time, _info| {
|
||||
format!("{}/{}", local_time.year(), local_time.month())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_monthly as usize, |info| {
|
||||
strftime_local("%Y/%m", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_yearly) = options.keep_yearly {
|
||||
mark_selections(&mut mark, &list, keep_yearly as usize, |local_time, _info| {
|
||||
format!("{}/{}", local_time.year(), local_time.year())
|
||||
});
|
||||
mark_selections(&mut mark, &list, keep_yearly as usize, |info| {
|
||||
strftime_local("%Y", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
let prune_info: Vec<(BackupInfo, bool)> = list.into_iter()
|
||||
|
@ -1,4 +1,7 @@
|
||||
use std::collections::HashSet;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::atomic::{Ordering, AtomicUsize};
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
@ -6,12 +9,12 @@ use crate::server::WorkerTask;
|
||||
use crate::api2::types::*;
|
||||
|
||||
use super::{
|
||||
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
DataStore, DataBlob, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
CryptMode,
|
||||
FileInfo, ArchiveType, archive_type,
|
||||
};
|
||||
|
||||
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
|
||||
let blob = datastore.load_blob(backup_dir, &info.filename)?;
|
||||
|
||||
@ -36,48 +39,125 @@ fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_index_chunks(
|
||||
datastore: &DataStore,
|
||||
index: Box<dyn IndexFile>,
|
||||
verified_chunks: &mut HashSet<[u8;32]>,
|
||||
corrupt_chunks: &mut HashSet<[u8; 32]>,
|
||||
crypt_mode: CryptMode,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
fn rename_corrupted_chunk(
|
||||
datastore: Arc<DataStore>,
|
||||
digest: &[u8;32],
|
||||
worker: Arc<WorkerTask>,
|
||||
) {
|
||||
let (path, digest_str) = datastore.chunk_path(digest);
|
||||
|
||||
let mut errors = 0;
|
||||
let mut counter = 0;
|
||||
let mut new_path = path.clone();
|
||||
loop {
|
||||
new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
|
||||
if new_path.exists() && counter < 9 { counter += 1; } else { break; }
|
||||
}
|
||||
|
||||
match std::fs::rename(&path, &new_path) {
|
||||
Ok(_) => {
|
||||
worker.log(format!("corrupted chunk renamed to {:?}", &new_path));
|
||||
},
|
||||
Err(err) => {
|
||||
match err.kind() {
|
||||
std::io::ErrorKind::NotFound => { /* ignored */ },
|
||||
_ => worker.log(format!("could not rename corrupted chunk {:?} - {}", &path, err))
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// We use a separate thread to read/load chunks, so that we can do
|
||||
// load and verify in parallel to increase performance.
|
||||
fn chunk_reader_thread(
|
||||
datastore: Arc<DataStore>,
|
||||
index: Box<dyn IndexFile + Send>,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
errors: Arc<AtomicUsize>,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> std::sync::mpsc::Receiver<(DataBlob, [u8;32], u64)> {
|
||||
|
||||
let (sender, receiver) = std::sync::mpsc::sync_channel(3); // buffer up to 3 chunks
|
||||
|
||||
std::thread::spawn(move|| {
|
||||
for pos in 0..index.index_count() {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
|
||||
if verified_chunks.contains(&info.digest) {
|
||||
if verified_chunks.lock().unwrap().contains(&info.digest) {
|
||||
continue; // already verified
|
||||
}
|
||||
|
||||
if corrupt_chunks.contains(&info.digest) {
|
||||
if corrupt_chunks.lock().unwrap().contains(&info.digest) {
|
||||
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
||||
worker.log(format!("chunk {} was marked as corrupt", digest_str));
|
||||
errors += 1;
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
continue;
|
||||
}
|
||||
|
||||
let chunk = match datastore.load_chunk(&info.digest) {
|
||||
match datastore.load_chunk(&info.digest) {
|
||||
Err(err) => {
|
||||
corrupt_chunks.insert(info.digest);
|
||||
corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||
worker.log(format!("can't verify chunk, load failed - {}", err));
|
||||
errors += 1;
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(datastore.clone(), &info.digest, worker.clone());
|
||||
continue;
|
||||
},
|
||||
Ok(chunk) => chunk,
|
||||
}
|
||||
Ok(chunk) => {
|
||||
if sender.send((chunk, info.digest, size)).is_err() {
|
||||
break; // receiver gone - simply stop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
receiver
|
||||
}
|
||||
|
||||
fn verify_index_chunks(
|
||||
datastore: Arc<DataStore>,
|
||||
index: Box<dyn IndexFile + Send>,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
crypt_mode: CryptMode,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let errors = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let start_time = Instant::now();
|
||||
|
||||
let chunk_channel = chunk_reader_thread(
|
||||
datastore.clone(),
|
||||
index,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
errors.clone(),
|
||||
worker.clone(),
|
||||
);
|
||||
|
||||
let mut read_bytes = 0;
|
||||
let mut decoded_bytes = 0;
|
||||
|
||||
loop {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
let (chunk, digest, size) = match chunk_channel.recv() {
|
||||
Ok(tuple) => tuple,
|
||||
Err(std::sync::mpsc::RecvError) => break,
|
||||
};
|
||||
|
||||
read_bytes += chunk.raw_size();
|
||||
decoded_bytes += size;
|
||||
|
||||
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||
Err(err) => {
|
||||
corrupt_chunks.insert(info.digest);
|
||||
corrupt_chunks.lock().unwrap().insert(digest);
|
||||
worker.log(format!("can't verify chunk, unknown CryptMode - {}", err));
|
||||
errors += 1;
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
continue;
|
||||
},
|
||||
Ok(mode) => mode,
|
||||
@ -89,21 +169,33 @@ fn verify_index_chunks(
|
||||
chunk_crypt_mode,
|
||||
crypt_mode
|
||||
));
|
||||
errors += 1;
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
let size = info.range.end - info.range.start;
|
||||
|
||||
if let Err(err) = chunk.verify_unencrypted(size as usize, &info.digest) {
|
||||
corrupt_chunks.insert(info.digest);
|
||||
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
||||
corrupt_chunks.lock().unwrap().insert(digest);
|
||||
worker.log(format!("{}", err));
|
||||
errors += 1;
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(datastore.clone(), &digest, worker.clone());
|
||||
} else {
|
||||
verified_chunks.insert(info.digest);
|
||||
verified_chunks.lock().unwrap().insert(digest);
|
||||
}
|
||||
}
|
||||
|
||||
if errors > 0 {
|
||||
let elapsed = start_time.elapsed().as_secs_f64();
|
||||
|
||||
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
|
||||
let decoded_bytes_mib = (decoded_bytes as f64)/(1024.0*1024.0);
|
||||
|
||||
let read_speed = read_bytes_mib/elapsed;
|
||||
let decode_speed = decoded_bytes_mib/elapsed;
|
||||
|
||||
let error_count = errors.load(Ordering::SeqCst);
|
||||
|
||||
worker.log(format!(" verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
|
||||
read_bytes_mib, decoded_bytes_mib, elapsed, read_speed, decode_speed, error_count));
|
||||
|
||||
if errors.load(Ordering::SeqCst) > 0 {
|
||||
bail!("chunks could not be verified");
|
||||
}
|
||||
|
||||
@ -111,12 +203,12 @@ fn verify_index_chunks(
|
||||
}
|
||||
|
||||
fn verify_fixed_index(
|
||||
datastore: &DataStore,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: &BackupDir,
|
||||
info: &FileInfo,
|
||||
verified_chunks: &mut HashSet<[u8;32]>,
|
||||
corrupt_chunks: &mut HashSet<[u8;32]>,
|
||||
worker: &WorkerTask,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
@ -137,12 +229,12 @@ fn verify_fixed_index(
|
||||
}
|
||||
|
||||
fn verify_dynamic_index(
|
||||
datastore: &DataStore,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: &BackupDir,
|
||||
info: &FileInfo,
|
||||
verified_chunks: &mut HashSet<[u8;32]>,
|
||||
corrupt_chunks: &mut HashSet<[u8;32]>,
|
||||
worker: &WorkerTask,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
@ -172,11 +264,11 @@ fn verify_dynamic_index(
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_dir(
|
||||
datastore: &DataStore,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: &BackupDir,
|
||||
verified_chunks: &mut HashSet<[u8;32]>,
|
||||
corrupt_chunks: &mut HashSet<[u8;32]>,
|
||||
worker: &WorkerTask
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>
|
||||
) -> Result<bool, Error> {
|
||||
|
||||
let mut manifest = match datastore.load_manifest(&backup_dir) {
|
||||
@ -191,52 +283,52 @@ pub fn verify_backup_dir(
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
let mut verify_result = "ok";
|
||||
let mut verify_result = VerifyState::Ok;
|
||||
for info in manifest.files() {
|
||||
let result = proxmox::try_block!({
|
||||
worker.log(format!(" check {}", info.filename));
|
||||
match archive_type(&info.filename)? {
|
||||
ArchiveType::FixedIndex =>
|
||||
verify_fixed_index(
|
||||
&datastore,
|
||||
datastore.clone(),
|
||||
&backup_dir,
|
||||
info,
|
||||
verified_chunks,
|
||||
corrupt_chunks,
|
||||
worker
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
worker.clone(),
|
||||
),
|
||||
ArchiveType::DynamicIndex =>
|
||||
verify_dynamic_index(
|
||||
&datastore,
|
||||
datastore.clone(),
|
||||
&backup_dir,
|
||||
info,
|
||||
verified_chunks,
|
||||
corrupt_chunks,
|
||||
worker
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
worker.clone(),
|
||||
),
|
||||
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
||||
ArchiveType::Blob => verify_blob(datastore.clone(), &backup_dir, info),
|
||||
}
|
||||
});
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||
error_count += 1;
|
||||
verify_result = "failed";
|
||||
verify_result = VerifyState::Failed;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
let verify_state = SnapshotVerifyState {
|
||||
state: verify_result.to_string(),
|
||||
state: verify_result,
|
||||
upid: worker.upid().clone(),
|
||||
};
|
||||
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
||||
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
|
||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
@ -245,32 +337,45 @@ pub fn verify_backup_dir(
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
||||
/// - Ok((count, failed_dirs)) where failed_dirs had verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<Vec<String>, Error> {
|
||||
pub fn verify_backup_group(
|
||||
datastore: Arc<DataStore>,
|
||||
group: &BackupGroup,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(usize, Vec<String>), Error> {
|
||||
|
||||
let mut errors = Vec::new();
|
||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||
return Ok(errors);
|
||||
return Ok((0, errors));
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||
|
||||
let mut verified_chunks = HashSet::with_capacity(1024*16); // start with 16384 chunks (up to 65GB)
|
||||
let mut corrupt_chunks = HashSet::with_capacity(64); // start with 64 chunks since we assume there are few corrupt ones
|
||||
let (done, snapshot_count) = progress.unwrap_or((0, list.len()));
|
||||
|
||||
let mut count = 0;
|
||||
BackupInfo::sort_list(&mut list, false); // newest first
|
||||
for info in list {
|
||||
if !verify_backup_dir(datastore, &info.backup_dir, &mut verified_chunks, &mut corrupt_chunks, worker)?{
|
||||
count += 1;
|
||||
if !verify_backup_dir(datastore.clone(), &info.backup_dir, verified_chunks.clone(), corrupt_chunks.clone(), worker.clone())?{
|
||||
errors.push(info.backup_dir.to_string());
|
||||
}
|
||||
if snapshot_count != 0 {
|
||||
let pos = done + count;
|
||||
let percentage = ((pos as f64) * 100.0)/(snapshot_count as f64);
|
||||
worker.log(format!("percentage done: {:.2}% ({} of {} snapshots)", percentage, pos, snapshot_count));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(errors)
|
||||
Ok((count, errors))
|
||||
}
|
||||
|
||||
/// Verify all backups inside a datastore
|
||||
@ -280,12 +385,15 @@ pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &
|
||||
/// Returns
|
||||
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<Vec<String>, Error> {
|
||||
pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) -> Result<Vec<String>, Error> {
|
||||
|
||||
let mut errors = Vec::new();
|
||||
|
||||
let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Ok(list) => list
|
||||
.into_iter()
|
||||
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
|
||||
.collect::<Vec<BackupGroup>>(),
|
||||
Err(err) => {
|
||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||
return Ok(errors);
|
||||
@ -294,11 +402,32 @@ pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<
|
||||
|
||||
list.sort_unstable();
|
||||
|
||||
worker.log(format!("verify datastore {}", datastore.name()));
|
||||
let mut snapshot_count = 0;
|
||||
for group in list.iter() {
|
||||
snapshot_count += group.list_backups(&datastore.base_path())?.len();
|
||||
}
|
||||
|
||||
// start with 16384 chunks (up to 65GB)
|
||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||
|
||||
// start with 64 chunks since we assume there are few corrupt ones
|
||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||
|
||||
worker.log(format!("verify datastore {} ({} snapshots)", datastore.name(), snapshot_count));
|
||||
|
||||
let mut done = 0;
|
||||
for group in list {
|
||||
let mut group_errors = verify_backup_group(datastore, &group, worker)?;
|
||||
let (count, mut group_errors) = verify_backup_group(
|
||||
datastore.clone(),
|
||||
&group,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
Some((done, snapshot_count)),
|
||||
worker.clone(),
|
||||
)?;
|
||||
errors.append(&mut group_errors);
|
||||
|
||||
done += count;
|
||||
}
|
||||
|
||||
Ok(errors)
|
||||
|
@ -8,7 +8,6 @@ use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{Local, DateTime, Utc, TimeZone};
|
||||
use futures::future::FutureExt;
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
use serde_json::{json, Value};
|
||||
@ -16,11 +15,20 @@ use tokio::sync::mpsc;
|
||||
use xdg::BaseDirectories;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::cli::*;
|
||||
use proxmox::api::api;
|
||||
use proxmox::{
|
||||
tools::{
|
||||
time::{strftime_local, epoch_i64},
|
||||
fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size},
|
||||
},
|
||||
api::{
|
||||
api,
|
||||
ApiHandler,
|
||||
ApiMethod,
|
||||
RpcEnvironment,
|
||||
schema::*,
|
||||
cli::*,
|
||||
},
|
||||
};
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use proxmox_backup::tools;
|
||||
@ -246,7 +254,7 @@ pub async fn api_datastore_latest_snapshot(
|
||||
client: &HttpClient,
|
||||
store: &str,
|
||||
group: BackupGroup,
|
||||
) -> Result<(String, String, DateTime<Utc>), Error> {
|
||||
) -> Result<(String, String, i64), Error> {
|
||||
|
||||
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
|
||||
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
|
||||
@ -257,7 +265,7 @@ pub async fn api_datastore_latest_snapshot(
|
||||
|
||||
list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
|
||||
|
||||
let backup_time = Utc.timestamp(list[0].backup_time, 0);
|
||||
let backup_time = list[0].backup_time;
|
||||
|
||||
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
|
||||
}
|
||||
@ -373,7 +381,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
@ -444,7 +452,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
@ -502,7 +510,7 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
|
||||
let result = client.delete(&path, Some(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
}))).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
@ -639,7 +647,7 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
|
||||
let mut result = client.get(&path, Some(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
}))).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
@ -986,18 +994,18 @@ async fn create_backup(
|
||||
}
|
||||
}
|
||||
|
||||
let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
|
||||
let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
record_repository(&repo);
|
||||
|
||||
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
|
||||
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
|
||||
|
||||
println!("Client name: {}", proxmox::tools::nodename());
|
||||
|
||||
let start_time = Local::now();
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
||||
println!("Starting backup protocol: {}", strftime_local("%c", epoch_i64())?);
|
||||
|
||||
let (crypt_config, rsa_encrypted_key) = match keydata {
|
||||
None => (None, None),
|
||||
@ -1026,6 +1034,7 @@ async fn create_backup(
|
||||
&backup_id,
|
||||
backup_time,
|
||||
verbose,
|
||||
false
|
||||
).await?;
|
||||
|
||||
let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
|
||||
@ -1034,7 +1043,7 @@ async fn create_backup(
|
||||
None
|
||||
};
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
|
||||
let mut catalog = None;
|
||||
@ -1149,11 +1158,11 @@ async fn create_backup(
|
||||
|
||||
client.finish().await?;
|
||||
|
||||
let end_time = Local::now();
|
||||
let elapsed = end_time.signed_duration_since(start_time);
|
||||
println!("Duration: {}", elapsed);
|
||||
let end_time = std::time::Instant::now();
|
||||
let elapsed = end_time.duration_since(start_time);
|
||||
println!("Duration: {:.2}s", elapsed.as_secs_f64());
|
||||
|
||||
println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
||||
println!("End Time: {}", strftime_local("%c", epoch_i64())?);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -1491,7 +1500,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
let args = json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
});
|
||||
|
||||
let body = hyper::Body::from(raw_data);
|
||||
@ -1559,7 +1568,7 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
@ -1751,11 +1760,12 @@ async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<Str
|
||||
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
|
||||
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
|
||||
{
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
|
||||
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
@ -1786,7 +1796,7 @@ async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<St
|
||||
let query = tools::json_object_to_query(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time().timestamp(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
})).unwrap();
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
|
||||
|
@ -1,4 +1,4 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
@ -13,7 +13,7 @@ use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::buildcfg;
|
||||
use proxmox_backup::server;
|
||||
use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64};
|
||||
use proxmox_backup::tools::daemon;
|
||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||
@ -53,6 +53,7 @@ async fn run() -> Result<(), Error> {
|
||||
config.add_alias("extjs", "/usr/share/javascript/extjs");
|
||||
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
|
||||
config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
|
||||
config.add_alias("locale", "/usr/share/pbs-i18n");
|
||||
config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
|
||||
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
|
||||
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
|
||||
@ -86,8 +87,6 @@ async fn run() -> Result<(), Error> {
|
||||
let acceptor = Arc::clone(&acceptor);
|
||||
async move {
|
||||
sock.set_nodelay(true).unwrap();
|
||||
sock.set_send_buffer_size(1024*1024).unwrap();
|
||||
sock.set_recv_buffer_size(1024*1024).unwrap();
|
||||
Ok(tokio_openssl::accept(&acceptor, sock)
|
||||
.await
|
||||
.ok() // handshake errors aren't be fatal, so return None to filter
|
||||
@ -145,10 +144,11 @@ fn start_task_scheduler() {
|
||||
tokio::spawn(task.map(|_| ()));
|
||||
}
|
||||
|
||||
use std::time:: {Instant, Duration};
|
||||
use std::time::{SystemTime, Instant, Duration, UNIX_EPOCH};
|
||||
|
||||
fn next_minute() -> Result<Instant, Error> {
|
||||
let epoch_now = epoch_now()?;
|
||||
let now = SystemTime::now();
|
||||
let epoch_now = now.duration_since(UNIX_EPOCH)?;
|
||||
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
|
||||
Ok(Instant::now() + epoch_next - epoch_now)
|
||||
}
|
||||
@ -196,45 +196,20 @@ async fn schedule_tasks() -> Result<(), Error> {
|
||||
|
||||
schedule_datastore_garbage_collection().await;
|
||||
schedule_datastore_prune().await;
|
||||
schedule_datastore_verification().await;
|
||||
schedule_datastore_sync_jobs().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> {
|
||||
|
||||
let list = proxmox_backup::server::read_task_list()?;
|
||||
|
||||
let mut last: Option<&server::UPID> = None;
|
||||
|
||||
for entry in list.iter() {
|
||||
if entry.upid.worker_type == worker_type {
|
||||
if let Some(ref id) = entry.upid.worker_id {
|
||||
if id == worker_id {
|
||||
match last {
|
||||
Some(ref upid) => {
|
||||
if upid.starttime < entry.upid.starttime {
|
||||
last = Some(&entry.upid)
|
||||
}
|
||||
}
|
||||
None => {
|
||||
last = Some(&entry.upid)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(last.cloned())
|
||||
}
|
||||
|
||||
|
||||
async fn schedule_datastore_garbage_collection() {
|
||||
|
||||
use proxmox_backup::backup::DataStore;
|
||||
use proxmox_backup::server::{UPID, WorkerTask};
|
||||
use proxmox_backup::config::datastore::{self, DataStoreConfig};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
@ -290,33 +265,33 @@ async fn schedule_datastore_garbage_collection() {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match lookup_last_worker(worker_type, &store) {
|
||||
Ok(Some(upid)) => upid.starttime,
|
||||
Ok(None) => 0,
|
||||
match jobstate::last_run_time(worker_type, &store) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(next) => next,
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let mut job = match Job::new(worker_type, &store) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let store2 = store.clone();
|
||||
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
@ -325,9 +300,20 @@ async fn schedule_datastore_garbage_collection() {
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
datastore.garbage_collection(&worker)
|
||||
|
||||
let result = datastore.garbage_collection(&worker);
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
) {
|
||||
eprintln!("unable to start garbage collection on store {} - {}", store2, err);
|
||||
@ -338,9 +324,12 @@ async fn schedule_datastore_garbage_collection() {
|
||||
async fn schedule_datastore_prune() {
|
||||
|
||||
use proxmox_backup::backup::{
|
||||
PruneOptions, DataStore, BackupGroup, BackupDir, compute_prune_info};
|
||||
PruneOptions, DataStore, BackupGroup, compute_prune_info};
|
||||
use proxmox_backup::server::{WorkerTask};
|
||||
use proxmox_backup::config::datastore::{self, DataStoreConfig};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
@ -397,37 +386,32 @@ async fn schedule_datastore_prune() {
|
||||
|
||||
let worker_type = "prune";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &store) {
|
||||
Ok(Some(upid)) => {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
continue;
|
||||
}
|
||||
upid.starttime
|
||||
}
|
||||
Ok(None) => 0,
|
||||
let last = match jobstate::last_run_time(worker_type, &store) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(next) => next,
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let mut job = match Job::new(worker_type, &store) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let store2 = store.clone();
|
||||
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
@ -436,6 +420,11 @@ async fn schedule_datastore_prune() {
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
let result = try_block!({
|
||||
|
||||
worker.log(format!("Starting datastore prune on store \"{}\"", store));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
||||
@ -456,15 +445,22 @@ async fn schedule_datastore_prune() {
|
||||
"{} {}/{}/{}",
|
||||
if keep { "keep" } else { "remove" },
|
||||
group.backup_type(), group.backup_id(),
|
||||
BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
|
||||
|
||||
info.backup_dir.backup_time_string()));
|
||||
if !keep {
|
||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
) {
|
||||
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
|
||||
@ -472,6 +468,120 @@ async fn schedule_datastore_prune() {
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_datastore_verification() {
|
||||
use proxmox_backup::backup::{DataStore, verify_all_backups};
|
||||
use proxmox_backup::server::{WorkerTask};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
let config = match datastore::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read datastore config - {}", err);
|
||||
return;
|
||||
}
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
for (store, (_, store_config)) in config.sections {
|
||||
let datastore = match DataStore::lookup_datastore(&store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_datastore failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
|
||||
Ok(c) => c,
|
||||
Err(err) => {
|
||||
eprintln!("datastore config from_value failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let event_str = match store_config.verify_schedule {
|
||||
Some(event_str) => event_str,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let event = match parse_calendar_event(&event_str) {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
eprintln!("unable to parse schedule '{}' - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let worker_type = "verify";
|
||||
|
||||
let last = match jobstate::last_run_time(worker_type, &store) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let mut job = match Job::new(worker_type, &store) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let worker_id = store.clone();
|
||||
let store2 = store.clone();
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(worker_id),
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
worker.log(format!("starting verification on store {}", store2));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
let result = try_block!({
|
||||
let failed_dirs = verify_all_backups(datastore, worker.clone())?;
|
||||
if failed_dirs.len() > 0 {
|
||||
worker.log("Failed to verify following snapshots:");
|
||||
for dir in failed_dirs {
|
||||
worker.log(format!("\t{}", dir));
|
||||
}
|
||||
Err(format_err!("verification failed - please check the log for details"))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
});
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
result
|
||||
},
|
||||
) {
|
||||
eprintln!("unable to start verification on store {} - {}", store, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn schedule_datastore_sync_jobs() {
|
||||
|
||||
use proxmox_backup::{
|
||||
@ -520,20 +630,16 @@ async fn schedule_datastore_sync_jobs() {
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(next) => next,
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let job = match Job::new(worker_type, &job_id) {
|
||||
|
@ -3,7 +3,6 @@ use std::sync::Arc;
|
||||
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use serde::Serialize;
|
||||
|
||||
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||
@ -22,6 +21,8 @@ use proxmox_backup::backup::{
|
||||
load_and_decrypt_key,
|
||||
CryptConfig,
|
||||
KeyDerivationConfig,
|
||||
DataBlob,
|
||||
DataChunkBuilder,
|
||||
};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
@ -61,6 +62,9 @@ struct Speed {
|
||||
"aes256_gcm": {
|
||||
type: Speed,
|
||||
},
|
||||
"verify": {
|
||||
type: Speed,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Copy, Clone, Serialize)]
|
||||
@ -76,29 +80,34 @@ struct BenchmarkResult {
|
||||
decompress: Speed,
|
||||
/// AES256 GCM encryption speed
|
||||
aes256_gcm: Speed,
|
||||
/// Verify speed
|
||||
verify: Speed,
|
||||
}
|
||||
|
||||
|
||||
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
||||
tls: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 590.0, // TLS to localhost, AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 690.0, // TLS to localhost, AMD Ryzen 7 2700X
|
||||
},
|
||||
sha256: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 2120.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 2022.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
compress: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 2158.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 752.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
decompress: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 8062.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 1198.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
aes256_gcm: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 3803.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 3645.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
verify: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 758.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
};
|
||||
|
||||
@ -195,6 +204,9 @@ fn render_result(
|
||||
.column(ColumnConfig::new("decompress")
|
||||
.header("ZStd level 1 decompression speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("verify")
|
||||
.header("Chunk verification speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("aes256_gcm")
|
||||
.header("AES256 GCM encryption speed")
|
||||
.right_align(false).renderer(render_speed));
|
||||
@ -212,7 +224,7 @@ async fn test_upload_speed(
|
||||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
||||
let backup_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
record_repository(&repo);
|
||||
@ -226,6 +238,7 @@ async fn test_upload_speed(
|
||||
"benchmark",
|
||||
backup_time,
|
||||
false,
|
||||
true
|
||||
).await?;
|
||||
|
||||
if verbose { eprintln!("Start TLS speed test"); }
|
||||
@ -257,7 +270,17 @@ fn test_crypt_speed(
|
||||
|
||||
let crypt_config = CryptConfig::new(testkey)?;
|
||||
|
||||
let random_data = proxmox::sys::linux::random_data(1024*1024)?;
|
||||
//let random_data = proxmox::sys::linux::random_data(1024*1024)?;
|
||||
let mut random_data = vec![];
|
||||
// generate pseudo random byte sequence
|
||||
for i in 0..256*1024 {
|
||||
for j in 0..4 {
|
||||
let byte = ((i >> (j<<3))&0xff) as u8;
|
||||
random_data.push(byte);
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(random_data.len(), 1024*1024);
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
@ -322,5 +345,23 @@ fn test_crypt_speed(
|
||||
|
||||
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let (chunk, digest) = DataChunkBuilder::new(&random_data)
|
||||
.compress(true)
|
||||
.build()?;
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
chunk.verify_unencrypted(random_data.len(), &digest)?;
|
||||
bytes += random_data.len();
|
||||
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.verify.speed = Some(speed);
|
||||
|
||||
eprintln!("Verify speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{Local, TimeZone};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
@ -112,7 +111,7 @@ fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
let created = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let created = proxmox::tools::time::epoch_i64();
|
||||
|
||||
store_key_config(
|
||||
&path,
|
||||
@ -180,7 +179,7 @@ fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error
|
||||
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let modified = proxmox::tools::time::epoch_i64();
|
||||
|
||||
store_key_config(
|
||||
&path,
|
||||
|
@ -141,7 +141,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
|
||||
let file_info = manifest.lookup_file_info(&archive_name)?;
|
||||
let file_info = manifest.lookup_file_info(&server_archive_name)?;
|
||||
|
||||
if server_archive_name.ends_with(".didx") {
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
|
@ -1,16 +1,18 @@
|
||||
use anyhow::{format_err, Error};
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::io::{Write, Seek, SeekFrom};
|
||||
use std::fs::File;
|
||||
use std::sync::Arc;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::future::AbortHandle;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::tools::digest_to_hex;
|
||||
|
||||
use crate::backup::*;
|
||||
use crate::{
|
||||
tools::compute_file_csum,
|
||||
backup::*,
|
||||
};
|
||||
|
||||
use super::{HttpClient, H2Client};
|
||||
|
||||
@ -41,14 +43,14 @@ impl BackupReader {
|
||||
datastore: &str,
|
||||
backup_type: &str,
|
||||
backup_id: &str,
|
||||
backup_time: DateTime<Utc>,
|
||||
backup_time: i64,
|
||||
debug: bool,
|
||||
) -> Result<Arc<BackupReader>, Error> {
|
||||
|
||||
let param = json!({
|
||||
"backup-type": backup_type,
|
||||
"backup-id": backup_id,
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"store": datastore,
|
||||
"debug": debug,
|
||||
});
|
||||
@ -220,29 +222,3 @@ impl BackupReader {
|
||||
Ok(index)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compute_file_csum(file: &mut File) -> Result<([u8; 32], u64), Error> {
|
||||
|
||||
file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let mut hasher = openssl::sha::Sha256::new();
|
||||
let mut buffer = proxmox::tools::vec::undefined(256*1024);
|
||||
let mut size: u64 = 0;
|
||||
|
||||
loop {
|
||||
let count = match file.read(&mut buffer) {
|
||||
Ok(count) => count,
|
||||
Err(ref err) if err.kind() == std::io::ErrorKind::Interrupted => { continue; }
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
if count == 0 {
|
||||
break;
|
||||
}
|
||||
size += count as u64;
|
||||
hasher.update(&buffer[..count]);
|
||||
}
|
||||
|
||||
let csum = hasher.finish();
|
||||
|
||||
Ok((csum, size))
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::*;
|
||||
use futures::stream::Stream;
|
||||
use futures::future::AbortHandle;
|
||||
@ -51,16 +50,18 @@ impl BackupWriter {
|
||||
datastore: &str,
|
||||
backup_type: &str,
|
||||
backup_id: &str,
|
||||
backup_time: DateTime<Utc>,
|
||||
backup_time: i64,
|
||||
debug: bool,
|
||||
benchmark: bool
|
||||
) -> Result<Arc<BackupWriter>, Error> {
|
||||
|
||||
let param = json!({
|
||||
"backup-type": backup_type,
|
||||
"backup-id": backup_id,
|
||||
"backup-time": backup_time.timestamp(),
|
||||
"backup-time": backup_time,
|
||||
"store": datastore,
|
||||
"debug": debug
|
||||
"debug": debug,
|
||||
"benchmark": benchmark
|
||||
});
|
||||
|
||||
let req = HttpClient::request_builder(
|
||||
|
@ -1,8 +1,8 @@
|
||||
use std::io::Write;
|
||||
use std::task::{Context, Poll};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use chrono::Utc;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use http::Uri;
|
||||
@ -30,7 +30,7 @@ use crate::tools::{self, BroadcastFuture, DEFAULT_ENCODE_SET};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AuthInfo {
|
||||
pub username: String,
|
||||
pub userid: Userid,
|
||||
pub ticket: String,
|
||||
pub token: String,
|
||||
}
|
||||
@ -100,7 +100,9 @@ pub struct HttpClient {
|
||||
client: Client<HttpsConnector>,
|
||||
server: String,
|
||||
fingerprint: Arc<Mutex<Option<String>>>,
|
||||
auth: BroadcastFuture<AuthInfo>,
|
||||
first_auth: BroadcastFuture<()>,
|
||||
auth: Arc<RwLock<AuthInfo>>,
|
||||
ticket_abort: futures::future::AbortHandle,
|
||||
_options: HttpClientOptions,
|
||||
}
|
||||
|
||||
@ -199,7 +201,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
||||
|
||||
let mut data = file_get_json(&path, Some(json!({})))?;
|
||||
|
||||
let now = Utc::now().timestamp();
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
|
||||
data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token});
|
||||
|
||||
@ -230,7 +232,7 @@ fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(Stri
|
||||
// usually /run/user/<uid>/...
|
||||
let path = base.place_runtime_file("tickets").ok()?;
|
||||
let data = file_get_json(&path, None).ok()?;
|
||||
let now = Utc::now().timestamp();
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
|
||||
let uinfo = data[server][userid.as_str()].as_object()?;
|
||||
let timestamp = uinfo["timestamp"].as_i64()?;
|
||||
@ -292,7 +294,6 @@ impl HttpClient {
|
||||
|
||||
let mut httpc = hyper::client::HttpConnector::new();
|
||||
httpc.set_nodelay(true); // important for h2 download performance!
|
||||
httpc.set_recv_buffer_size(Some(1024*1024)); //important for h2 download performance!
|
||||
httpc.enforce_http(false); // we want https...
|
||||
|
||||
let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build());
|
||||
@ -319,6 +320,41 @@ impl HttpClient {
|
||||
}
|
||||
};
|
||||
|
||||
let auth = Arc::new(RwLock::new(AuthInfo {
|
||||
userid: userid.clone(),
|
||||
ticket: password.clone(),
|
||||
token: "".to_string(),
|
||||
}));
|
||||
|
||||
let server2 = server.to_string();
|
||||
let client2 = client.clone();
|
||||
let auth2 = auth.clone();
|
||||
let prefix2 = options.prefix.clone();
|
||||
|
||||
let renewal_future = async move {
|
||||
loop {
|
||||
tokio::time::delay_for(Duration::new(60*15, 0)).await; // 15 minutes
|
||||
let (userid, ticket) = {
|
||||
let authinfo = auth2.read().unwrap().clone();
|
||||
(authinfo.userid, authinfo.ticket)
|
||||
};
|
||||
match Self::credentials(client2.clone(), server2.clone(), userid, ticket).await {
|
||||
Ok(auth) => {
|
||||
if use_ticket_cache & &prefix2.is_some() {
|
||||
let _ = store_ticket_info(prefix2.as_ref().unwrap(), &server2, &auth.userid.to_string(), &auth.ticket, &auth.token);
|
||||
}
|
||||
*auth2.write().unwrap() = auth;
|
||||
},
|
||||
Err(err) => {
|
||||
eprintln!("re-authentication failed: {}", err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let (renewal_future, ticket_abort) = futures::future::abortable(renewal_future);
|
||||
|
||||
let login_future = Self::credentials(
|
||||
client.clone(),
|
||||
server.to_owned(),
|
||||
@ -327,13 +363,14 @@ impl HttpClient {
|
||||
).map_ok({
|
||||
let server = server.to_string();
|
||||
let prefix = options.prefix.clone();
|
||||
let authinfo = auth.clone();
|
||||
|
||||
move |auth| {
|
||||
if use_ticket_cache & &prefix.is_some() {
|
||||
let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.username, &auth.ticket, &auth.token);
|
||||
let _ = store_ticket_info(prefix.as_ref().unwrap(), &server, &auth.userid.to_string(), &auth.ticket, &auth.token);
|
||||
}
|
||||
|
||||
auth
|
||||
*authinfo.write().unwrap() = auth;
|
||||
tokio::spawn(renewal_future);
|
||||
}
|
||||
});
|
||||
|
||||
@ -341,7 +378,9 @@ impl HttpClient {
|
||||
client,
|
||||
server: String::from(server),
|
||||
fingerprint: verified_fingerprint,
|
||||
auth: BroadcastFuture::new(Box::new(login_future)),
|
||||
auth,
|
||||
ticket_abort,
|
||||
first_auth: BroadcastFuture::new(Box::new(login_future)),
|
||||
_options: options,
|
||||
})
|
||||
}
|
||||
@ -351,7 +390,9 @@ impl HttpClient {
|
||||
/// Login is done on demand, so this is only required if you need
|
||||
/// access to authentication data in 'AuthInfo'.
|
||||
pub async fn login(&self) -> Result<AuthInfo, Error> {
|
||||
self.auth.listen().await
|
||||
self.first_auth.listen().await?;
|
||||
let authinfo = self.auth.read().unwrap();
|
||||
Ok(authinfo.clone())
|
||||
}
|
||||
|
||||
/// Returns the optional fingerprint passed to the new() constructor.
|
||||
@ -590,7 +631,7 @@ impl HttpClient {
|
||||
let req = Self::request_builder(&server, "POST", "/api2/json/access/ticket", Some(data)).unwrap();
|
||||
let cred = Self::api_request(client, req).await?;
|
||||
let auth = AuthInfo {
|
||||
username: cred["data"]["username"].as_str().unwrap().to_owned(),
|
||||
userid: cred["data"]["username"].as_str().unwrap().parse()?,
|
||||
ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(),
|
||||
token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(),
|
||||
};
|
||||
@ -668,6 +709,12 @@ impl HttpClient {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for HttpClient {
|
||||
fn drop(&mut self) {
|
||||
self.ticket_abort.abort();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct H2Client {
|
||||
|
@ -3,15 +3,20 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::json;
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::time::SystemTime;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use proxmox::api::error::{StatusCode, HttpError};
|
||||
use crate::server::{WorkerTask};
|
||||
use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
use super::*;
|
||||
use crate::{
|
||||
tools::{ParallelHandler, compute_file_csum},
|
||||
server::WorkerTask,
|
||||
backup::*,
|
||||
api2::types::*,
|
||||
client::*,
|
||||
};
|
||||
|
||||
|
||||
// fixme: implement filters
|
||||
@ -19,27 +24,86 @@ use super::*;
|
||||
// Todo: correctly lock backup groups
|
||||
|
||||
async fn pull_index_chunks<I: IndexFile>(
|
||||
_worker: &WorkerTask,
|
||||
chunk_reader: &mut RemoteChunkReader,
|
||||
worker: &WorkerTask,
|
||||
chunk_reader: RemoteChunkReader,
|
||||
target: Arc<DataStore>,
|
||||
index: I,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
use futures::stream::{self, StreamExt, TryStreamExt};
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let chunk_exists = target.cond_touch_chunk(&info.digest, false)?;
|
||||
let start_time = SystemTime::now();
|
||||
|
||||
let stream = stream::iter(
|
||||
(0..index.index_count())
|
||||
.map(|pos| index.chunk_info(pos).unwrap())
|
||||
.filter(|info| {
|
||||
let mut guard = downloaded_chunks.lock().unwrap();
|
||||
let done = guard.contains(&info.digest);
|
||||
if !done {
|
||||
// Note: We mark a chunk as downloaded before its actually downloaded
|
||||
// to avoid duplicate downloads.
|
||||
guard.insert(info.digest);
|
||||
}
|
||||
!done
|
||||
})
|
||||
);
|
||||
|
||||
let target2 = target.clone();
|
||||
let verify_pool = ParallelHandler::new(
|
||||
"sync chunk writer", 4,
|
||||
move |(chunk, digest, size): (DataBlob, [u8;32], u64)| {
|
||||
// println!("verify and write {}", proxmox::tools::digest_to_hex(&digest));
|
||||
chunk.verify_unencrypted(size as usize, &digest)?;
|
||||
target2.insert_chunk(&chunk, &digest)?;
|
||||
Ok(())
|
||||
}
|
||||
);
|
||||
|
||||
let verify_and_write_channel = verify_pool.channel();
|
||||
|
||||
let bytes = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
stream
|
||||
.map(|info| {
|
||||
|
||||
let target = Arc::clone(&target);
|
||||
let chunk_reader = chunk_reader.clone();
|
||||
let bytes = Arc::clone(&bytes);
|
||||
let verify_and_write_channel = verify_and_write_channel.clone();
|
||||
|
||||
Ok::<_, Error>(async move {
|
||||
let chunk_exists = crate::tools::runtime::block_in_place(|| target.cond_touch_chunk(&info.digest, false))?;
|
||||
if chunk_exists {
|
||||
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
continue;
|
||||
return Ok::<_, Error>(());
|
||||
}
|
||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
|
||||
let raw_size = chunk.raw_size() as usize;
|
||||
|
||||
chunk.verify_unencrypted(info.size() as usize, &info.digest)?;
|
||||
// decode, verify and write in a separate threads to maximize throughput
|
||||
crate::tools::runtime::block_in_place(|| verify_and_write_channel.send((chunk, info.digest, info.size())))?;
|
||||
|
||||
target.insert_chunk(&chunk, &info.digest)?;
|
||||
}
|
||||
bytes.fetch_add(raw_size, Ordering::SeqCst);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.try_buffer_unordered(20)
|
||||
.try_for_each(|_res| futures::future::ok(()))
|
||||
.await?;
|
||||
|
||||
drop(verify_and_write_channel);
|
||||
|
||||
verify_pool.complete()?;
|
||||
|
||||
let elapsed = start_time.elapsed()?.as_secs_f64();
|
||||
|
||||
let bytes = bytes.load(Ordering::SeqCst);
|
||||
|
||||
worker.log(format!("downloaded {} bytes ({} MiB/s)", bytes, (bytes as f64)/(1024.0*1024.0*elapsed)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -52,6 +116,7 @@ async fn download_manifest(
|
||||
let mut tmp_manifest_file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.read(true)
|
||||
.open(&filename)?;
|
||||
|
||||
@ -85,6 +150,7 @@ async fn pull_single_archive(
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
archive_info: &FileInfo,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let archive_name = &archive_info.filename;
|
||||
@ -111,7 +177,7 @@ async fn pull_single_archive(
|
||||
let (csum, size) = index.compute_csum();
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
pull_index_chunks(worker, chunk_reader.clone(), tgt_store.clone(), index, downloaded_chunks).await?;
|
||||
}
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
@ -119,7 +185,7 @@ async fn pull_single_archive(
|
||||
let (csum, size) = index.compute_csum();
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
pull_index_chunks(worker, chunk_reader.clone(), tgt_store.clone(), index, downloaded_chunks).await?;
|
||||
}
|
||||
ArchiveType::Blob => {
|
||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||
@ -165,6 +231,7 @@ async fn pull_snapshot(
|
||||
reader: Arc<BackupReader>,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut manifest_name = tgt_store.base_path();
|
||||
@ -218,6 +285,7 @@ async fn pull_snapshot(
|
||||
try_client_log_download(worker, reader, &client_log_name).await?;
|
||||
}
|
||||
worker.log("no data changes");
|
||||
let _ = std::fs::remove_file(&tmp_manifest_name);
|
||||
return Ok(()); // nothing changed
|
||||
}
|
||||
}
|
||||
@ -273,6 +341,7 @@ async fn pull_snapshot(
|
||||
tgt_store.clone(),
|
||||
snapshot,
|
||||
&item,
|
||||
downloaded_chunks.clone(),
|
||||
).await?;
|
||||
}
|
||||
|
||||
@ -295,6 +364,7 @@ pub async fn pull_snapshot_from(
|
||||
reader: Arc<BackupReader>,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&snapshot)?;
|
||||
@ -302,7 +372,7 @@ pub async fn pull_snapshot_from(
|
||||
if is_new {
|
||||
worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
|
||||
|
||||
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await {
|
||||
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks).await {
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot, true) {
|
||||
worker.log(format!("cleanup error - {}", cleanup_err));
|
||||
}
|
||||
@ -311,7 +381,7 @@ pub async fn pull_snapshot_from(
|
||||
worker.log(format!("sync snapshot {:?} done", snapshot.relative_path()));
|
||||
} else {
|
||||
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks).await?;
|
||||
worker.log(format!("re-sync snapshot {:?} done", snapshot.relative_path()));
|
||||
}
|
||||
|
||||
@ -346,8 +416,11 @@ pub async fn pull_group(
|
||||
|
||||
let mut remote_snapshots = std::collections::HashSet::new();
|
||||
|
||||
// start with 16384 chunks (up to 65GB)
|
||||
let downloaded_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*64)));
|
||||
|
||||
for item in list {
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
|
||||
// in-progress backups can't be synced
|
||||
if let None = item.size {
|
||||
@ -379,7 +452,7 @@ pub async fn pull_group(
|
||||
true,
|
||||
).await?;
|
||||
|
||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks.clone()).await?;
|
||||
}
|
||||
|
||||
if delete {
|
||||
|
@ -15,7 +15,7 @@ pub struct RemoteChunkReader {
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
cache_hint: HashMap<[u8; 32], usize>,
|
||||
cache_hint: Arc<HashMap<[u8; 32], usize>>,
|
||||
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
|
||||
}
|
||||
|
||||
@ -33,7 +33,7 @@ impl RemoteChunkReader {
|
||||
client,
|
||||
crypt_config,
|
||||
crypt_mode,
|
||||
cache_hint,
|
||||
cache_hint: Arc::new(cache_hint),
|
||||
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
@ -44,6 +44,10 @@ pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name").schema()
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"verify-schedule": {
|
||||
optional: true,
|
||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
@ -83,6 +87,8 @@ pub struct DataStoreConfig {
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub prune_schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub verify_schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_last: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_hourly: Option<u64>,
|
||||
|
@ -48,7 +48,6 @@ use proxmox::tools::fs::{
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::server::{upid_read_status, worker_is_active_local, TaskState, UPID};
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -178,7 +177,7 @@ impl JobState {
|
||||
}
|
||||
} else {
|
||||
Ok(JobState::Created {
|
||||
time: epoch_now_u64()? as i64 - 30,
|
||||
time: proxmox::tools::time::epoch_i64() - 30,
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -199,7 +198,7 @@ impl Job {
|
||||
jobtype: jobtype.to_string(),
|
||||
jobname: jobname.to_string(),
|
||||
state: JobState::Created {
|
||||
time: epoch_now_u64()? as i64,
|
||||
time: proxmox::tools::time::epoch_i64(),
|
||||
},
|
||||
_lock,
|
||||
})
|
||||
|
@ -17,7 +17,7 @@ pub use lexer::*;
|
||||
mod parser;
|
||||
pub use parser::*;
|
||||
|
||||
use crate::api2::types::{Interface, NetworkConfigMethod, NetworkInterfaceType, LinuxBondMode};
|
||||
use crate::api2::types::{Interface, NetworkConfigMethod, NetworkInterfaceType, LinuxBondMode, BondXmitHashPolicy};
|
||||
|
||||
lazy_static!{
|
||||
static ref PHYSICAL_NIC_REGEX: Regex = Regex::new(r"^(?:eth\d+|en[^:.]+|ib\d+)$").unwrap();
|
||||
@ -44,6 +44,19 @@ pub fn bond_mode_to_str(mode: LinuxBondMode) -> &'static str {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bond_xmit_hash_policy_from_str(s: &str) -> Result<BondXmitHashPolicy, Error> {
|
||||
BondXmitHashPolicy::deserialize(s.into_deserializer())
|
||||
.map_err(|_: value::Error| format_err!("invalid bond_xmit_hash_policy '{}'", s))
|
||||
}
|
||||
|
||||
pub fn bond_xmit_hash_policy_to_str(policy: &BondXmitHashPolicy) -> &'static str {
|
||||
match policy {
|
||||
BondXmitHashPolicy::layer2 => "layer2",
|
||||
BondXmitHashPolicy::layer2_3 => "layer2+3",
|
||||
BondXmitHashPolicy::layer3_4 => "layer3+4",
|
||||
}
|
||||
}
|
||||
|
||||
impl Interface {
|
||||
|
||||
pub fn new(name: String) -> Self {
|
||||
@ -67,6 +80,8 @@ impl Interface {
|
||||
bridge_vlan_aware: None,
|
||||
slaves: None,
|
||||
bond_mode: None,
|
||||
bond_primary: None,
|
||||
bond_xmit_hash_policy: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -169,6 +184,19 @@ impl Interface {
|
||||
NetworkInterfaceType::Bond => {
|
||||
let mode = self.bond_mode.unwrap_or(LinuxBondMode::balance_rr);
|
||||
writeln!(w, "\tbond-mode {}", bond_mode_to_str(mode))?;
|
||||
if let Some(primary) = &self.bond_primary {
|
||||
if mode == LinuxBondMode::active_backup {
|
||||
writeln!(w, "\tbond-primary {}", primary)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(xmit_policy) = &self.bond_xmit_hash_policy {
|
||||
if mode == LinuxBondMode::ieee802_3ad ||
|
||||
mode == LinuxBondMode::balance_xor
|
||||
{
|
||||
writeln!(w, "\tbond_xmit_hash_policy {}", bond_xmit_hash_policy_to_str(xmit_policy))?;
|
||||
}
|
||||
}
|
||||
|
||||
let slaves = self.slaves.as_ref().unwrap_or(&EMPTY_LIST);
|
||||
if slaves.is_empty() {
|
||||
|
@ -26,6 +26,8 @@ pub enum Token {
|
||||
BridgeVlanAware,
|
||||
BondSlaves,
|
||||
BondMode,
|
||||
BondPrimary,
|
||||
BondXmitHashPolicy,
|
||||
EOF,
|
||||
}
|
||||
|
||||
@ -51,7 +53,10 @@ lazy_static! {
|
||||
map.insert("bond-slaves", Token::BondSlaves);
|
||||
map.insert("bond_slaves", Token::BondSlaves);
|
||||
map.insert("bond-mode", Token::BondMode);
|
||||
map.insert("bond_mode", Token::BondMode);
|
||||
map.insert("bond-primary", Token::BondPrimary);
|
||||
map.insert("bond_primary", Token::BondPrimary);
|
||||
map.insert("bond_xmit_hash_policy", Token::BondXmitHashPolicy);
|
||||
map.insert("bond-xmit-hash-policy", Token::BondXmitHashPolicy);
|
||||
map
|
||||
};
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ use regex::Regex;
|
||||
use super::helper::*;
|
||||
use super::lexer::*;
|
||||
|
||||
use super::{NetworkConfig, NetworkOrderEntry, Interface, NetworkConfigMethod, NetworkInterfaceType, bond_mode_from_str};
|
||||
use super::{NetworkConfig, NetworkOrderEntry, Interface, NetworkConfigMethod, NetworkInterfaceType, bond_mode_from_str, bond_xmit_hash_policy_from_str};
|
||||
|
||||
pub struct NetworkParser<R: BufRead> {
|
||||
input: Peekable<Lexer<R>>,
|
||||
@ -243,6 +243,18 @@ impl <R: BufRead> NetworkParser<R> {
|
||||
interface.bond_mode = Some(bond_mode_from_str(&mode)?);
|
||||
self.eat(Token::Newline)?;
|
||||
}
|
||||
Token::BondPrimary => {
|
||||
self.eat(Token::BondPrimary)?;
|
||||
let primary = self.next_text()?;
|
||||
interface.bond_primary = Some(primary);
|
||||
self.eat(Token::Newline)?;
|
||||
}
|
||||
Token::BondXmitHashPolicy => {
|
||||
self.eat(Token::BondXmitHashPolicy)?;
|
||||
let policy = bond_xmit_hash_policy_from_str(&self.next_text()?)?;
|
||||
interface.bond_xmit_hash_policy = Some(policy);
|
||||
self.eat(Token::Newline)?;
|
||||
}
|
||||
Token::Netmask => bail!("netmask is deprecated and no longer supported"),
|
||||
|
||||
_ => { // parse addon attributes
|
||||
|
@ -17,17 +17,3 @@ pub trait BackupCatalogWriter {
|
||||
fn add_fifo(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
fn add_socket(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
pub struct DummyCatalogWriter();
|
||||
|
||||
impl BackupCatalogWriter for DummyCatalogWriter {
|
||||
fn start_directory(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn end_directory(&mut self) -> Result<(), Error> { Ok(()) }
|
||||
fn add_file(&mut self, _name: &CStr, _size: u64, _mtime: u64) -> Result<(), Error> { Ok(()) }
|
||||
fn add_symlink(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn add_hardlink(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn add_block_device(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn add_char_device(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn add_fifo(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn add_socket(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ use std::path::Path;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pxar::{mode, Entry, EntryKind, Metadata};
|
||||
use pxar::{mode, Entry, EntryKind, Metadata, format::StatxTimestamp};
|
||||
|
||||
/// Get the file permissions as `nix::Mode`
|
||||
pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
|
||||
@ -114,13 +114,17 @@ fn mode_string(entry: &Entry) -> String {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||
use chrono::offset::TimeZone;
|
||||
fn format_mtime(mtime: &StatxTimestamp) -> String {
|
||||
if let Ok(s) = proxmox::tools::time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
|
||||
return s;
|
||||
}
|
||||
format!("{}.{}", mtime.secs, mtime.nanos)
|
||||
}
|
||||
|
||||
pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||
let mode_string = mode_string(entry);
|
||||
|
||||
let meta = entry.metadata();
|
||||
let mtime = chrono::Local.timestamp(meta.stat.mtime.secs, meta.stat.mtime.nanos);
|
||||
|
||||
let (size, link) = match entry.kind() {
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
|
||||
@ -134,7 +138,7 @@ pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||
"{} {:<13} {} {:>8} {:?}{}",
|
||||
mode_string,
|
||||
format!("{}/{}", meta.stat.uid, meta.stat.gid),
|
||||
mtime.format("%Y-%m-%d %H:%M:%S"),
|
||||
format_mtime(&meta.stat.mtime),
|
||||
size,
|
||||
entry.path(),
|
||||
link,
|
||||
@ -142,12 +146,9 @@ pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||
}
|
||||
|
||||
pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
use chrono::offset::TimeZone;
|
||||
|
||||
let mode_string = mode_string(entry);
|
||||
|
||||
let meta = entry.metadata();
|
||||
let mtime = chrono::Local.timestamp(meta.stat.mtime.secs, meta.stat.mtime.nanos);
|
||||
|
||||
let (size, link, type_name) = match entry.kind() {
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
|
||||
@ -196,6 +197,6 @@ pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
mode_string,
|
||||
meta.stat.uid,
|
||||
meta.stat.gid,
|
||||
mtime.format("%Y-%m-%d %H:%M:%S"),
|
||||
format_mtime(&meta.stat.mtime),
|
||||
)
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ use lazy_static::lazy_static;
|
||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
|
||||
use crate::api2::types::{RRDMode, RRDTimeFrameResolution};
|
||||
use crate::tools::epoch_now_f64;
|
||||
|
||||
use super::*;
|
||||
|
||||
@ -42,7 +41,7 @@ pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<
|
||||
std::fs::create_dir_all(path.parent().unwrap())?;
|
||||
|
||||
let mut map = RRD_CACHE.write().unwrap();
|
||||
let now = epoch_now_f64()?;
|
||||
let now = proxmox::tools::time::epoch_f64();
|
||||
|
||||
if let Some(rrd) = map.get_mut(rel_path) {
|
||||
rrd.update(now, value);
|
||||
|
@ -313,7 +313,13 @@ pub async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher +
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
fn get_index(userid: Option<Userid>, token: Option<String>, api: &Arc<ApiConfig>, parts: Parts) -> Response<Body> {
|
||||
fn get_index(
|
||||
userid: Option<Userid>,
|
||||
token: Option<String>,
|
||||
language: Option<String>,
|
||||
api: &Arc<ApiConfig>,
|
||||
parts: Parts,
|
||||
) -> Response<Body> {
|
||||
|
||||
let nodename = proxmox::tools::nodename();
|
||||
let userid = userid.as_ref().map(|u| u.as_str()).unwrap_or("");
|
||||
@ -333,10 +339,18 @@ fn get_index(userid: Option<Userid>, token: Option<String>, api: &Arc<ApiConfig>
|
||||
}
|
||||
}
|
||||
|
||||
let mut lang = String::from("");
|
||||
if let Some(language) = language {
|
||||
if Path::new(&format!("/usr/share/pbs-i18n/pbs-lang-{}.js", language)).exists() {
|
||||
lang = language;
|
||||
}
|
||||
}
|
||||
|
||||
let data = json!({
|
||||
"NodeName": nodename,
|
||||
"UserName": userid,
|
||||
"CSRFPreventionToken": token,
|
||||
"language": lang,
|
||||
"debug": debug,
|
||||
});
|
||||
|
||||
@ -441,12 +455,14 @@ async fn handle_static_file_download(filename: PathBuf) -> Result<Response<Body
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_auth_data(headers: &http::HeaderMap) -> (Option<String>, Option<String>) {
|
||||
fn extract_auth_data(headers: &http::HeaderMap) -> (Option<String>, Option<String>, Option<String>) {
|
||||
|
||||
let mut ticket = None;
|
||||
let mut language = None;
|
||||
if let Some(raw_cookie) = headers.get("COOKIE") {
|
||||
if let Ok(cookie) = raw_cookie.to_str() {
|
||||
ticket = tools::extract_auth_cookie(cookie, "PBSAuthCookie");
|
||||
ticket = tools::extract_cookie(cookie, "PBSAuthCookie");
|
||||
language = tools::extract_cookie(cookie, "PBSLangCookie");
|
||||
}
|
||||
}
|
||||
|
||||
@ -455,7 +471,7 @@ fn extract_auth_data(headers: &http::HeaderMap) -> (Option<String>, Option<Strin
|
||||
_ => None,
|
||||
};
|
||||
|
||||
(ticket, token)
|
||||
(ticket, token, language)
|
||||
}
|
||||
|
||||
fn check_auth(
|
||||
@ -526,7 +542,7 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
) {
|
||||
// explicitly allow those calls without auth
|
||||
} else {
|
||||
let (ticket, token) = extract_auth_data(&parts.headers);
|
||||
let (ticket, token, _) = extract_auth_data(&parts.headers);
|
||||
match check_auth(&method, &ticket, &token, &user_info) {
|
||||
Ok(userid) => rpcenv.set_user(Some(userid.to_string())),
|
||||
Err(err) => {
|
||||
@ -573,20 +589,20 @@ pub async fn handle_request(api: Arc<ApiConfig>, req: Request<Body>) -> Result<R
|
||||
}
|
||||
|
||||
if comp_len == 0 {
|
||||
let (ticket, token) = extract_auth_data(&parts.headers);
|
||||
let (ticket, token, language) = extract_auth_data(&parts.headers);
|
||||
if ticket != None {
|
||||
match check_auth(&method, &ticket, &token, &user_info) {
|
||||
Ok(userid) => {
|
||||
let new_token = assemble_csrf_prevention_token(csrf_secret(), &userid);
|
||||
return Ok(get_index(Some(userid), Some(new_token), &api, parts));
|
||||
return Ok(get_index(Some(userid), Some(new_token), language, &api, parts));
|
||||
}
|
||||
_ => {
|
||||
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
|
||||
return Ok(get_index(None, None, &api, parts));
|
||||
return Ok(get_index(None, None, language, &api, parts));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Ok(get_index(None, None, &api, parts));
|
||||
return Ok(get_index(None, None, language, &api, parts));
|
||||
}
|
||||
} else {
|
||||
let filename = api.find_alias(&components);
|
||||
|
@ -1,7 +1,6 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use chrono::Local;
|
||||
|
||||
use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
|
||||
use proxmox::const_regex;
|
||||
@ -89,7 +88,7 @@ impl UPID {
|
||||
Ok(UPID {
|
||||
pid,
|
||||
pstart: procfs::PidStat::read_from_pid(nix::unistd::Pid::from_raw(pid))?.starttime,
|
||||
starttime: Local::now().timestamp(),
|
||||
starttime: proxmox::tools::time::epoch_i64(),
|
||||
task_id,
|
||||
worker_type: worker_type.to_owned(),
|
||||
worker_id,
|
||||
|
@ -1,11 +1,10 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::io::{Read, BufRead, BufReader};
|
||||
use std::panic::UnwindSafe;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use chrono::Local;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use lazy_static::lazy_static;
|
||||
@ -195,8 +194,8 @@ pub fn create_task_log_dirs() -> Result<(), Error> {
|
||||
/// If there is not a single line with at valid datetime, we assume the
|
||||
/// starttime to be the endtime
|
||||
pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
|
||||
let mut endtime = upid.starttime;
|
||||
let mut status = TaskState::Unknown { endtime };
|
||||
|
||||
let mut status = TaskState::Unknown { endtime: upid.starttime };
|
||||
|
||||
let path = upid.log_path();
|
||||
|
||||
@ -207,22 +206,32 @@ pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
|
||||
use std::io::SeekFrom;
|
||||
let _ = file.seek(SeekFrom::End(-8192)); // ignore errors
|
||||
|
||||
let reader = BufReader::new(file);
|
||||
let mut data = Vec::with_capacity(8192);
|
||||
file.read_to_end(&mut data)?;
|
||||
|
||||
for line in reader.lines() {
|
||||
let line = line?;
|
||||
|
||||
let mut iter = line.splitn(2, ": ");
|
||||
if let Some(time_str) = iter.next() {
|
||||
endtime = chrono::DateTime::parse_from_rfc3339(time_str)
|
||||
.map_err(|err| format_err!("cannot parse '{}': {}", time_str, err))?
|
||||
.timestamp();
|
||||
} else {
|
||||
continue;
|
||||
// task logs should end with newline, we do not want it here
|
||||
if data.len() > 0 && data[data.len()-1] == b'\n' {
|
||||
data.pop();
|
||||
}
|
||||
match iter.next().and_then(|rest| rest.strip_prefix("TASK ")) {
|
||||
None => continue,
|
||||
Some(rest) => {
|
||||
|
||||
let last_line = {
|
||||
let mut start = 0;
|
||||
for pos in (0..data.len()).rev() {
|
||||
if data[pos] == b'\n' {
|
||||
start = data.len().min(pos + 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
&data[start..]
|
||||
};
|
||||
|
||||
let last_line = std::str::from_utf8(last_line)
|
||||
.map_err(|err| format_err!("upid_read_status: utf8 parse failed: {}", err))?;
|
||||
|
||||
let mut iter = last_line.splitn(2, ": ");
|
||||
if let Some(time_str) = iter.next() {
|
||||
if let Ok(endtime) = proxmox::tools::time::parse_rfc3339(time_str) {
|
||||
if let Some(rest) = iter.next().and_then(|rest| rest.strip_prefix("TASK ")) {
|
||||
if let Ok(state) = TaskState::from_endtime_and_message(endtime, rest) {
|
||||
status = state;
|
||||
}
|
||||
@ -352,8 +361,9 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result<Vec<TaskListInfo>, E
|
||||
},
|
||||
None => {
|
||||
println!("Detected stopped UPID {}", upid_str);
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let status = upid_read_status(&upid)
|
||||
.unwrap_or_else(|_| TaskState::Unknown { endtime: Local::now().timestamp() });
|
||||
.unwrap_or_else(|_| TaskState::Unknown { endtime: now });
|
||||
finish_list.push(TaskListInfo {
|
||||
upid, upid_str, state: Some(status)
|
||||
});
|
||||
@ -577,7 +587,7 @@ impl WorkerTask {
|
||||
pub fn create_state(&self, result: &Result<(), Error>) -> TaskState {
|
||||
let warn_count = self.data.lock().unwrap().warn_count;
|
||||
|
||||
let endtime = Local::now().timestamp();
|
||||
let endtime = proxmox::tools::time::epoch_i64();
|
||||
|
||||
if let Err(err) = result {
|
||||
TaskState::Error { message: err.to_string(), endtime }
|
||||
|
52
src/tools.rs
@ -5,11 +5,9 @@ use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::BuildHasher;
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufRead, ErrorKind, Read};
|
||||
use std::io::{self, BufRead, ErrorKind, Read, Seek, SeekFrom};
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use std::time::{SystemTime, SystemTimeError, UNIX_EPOCH};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
@ -35,6 +33,9 @@ pub mod statistics;
|
||||
pub mod systemd;
|
||||
pub mod nom;
|
||||
|
||||
mod parallel_handler;
|
||||
pub use parallel_handler::*;
|
||||
|
||||
mod wrapped_reader_stream;
|
||||
pub use wrapped_reader_stream::*;
|
||||
|
||||
@ -326,9 +327,9 @@ pub fn assert_if_modified(digest1: &str, digest2: &str) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Extract authentication cookie from cookie header.
|
||||
/// Extract a specific cookie from cookie header.
|
||||
/// We assume cookie_name is already url encoded.
|
||||
pub fn extract_auth_cookie(cookie: &str, cookie_name: &str) -> Option<String> {
|
||||
pub fn extract_cookie(cookie: &str, cookie_name: &str) -> Option<String> {
|
||||
for pair in cookie.split(';') {
|
||||
let (name, value) = match pair.find('=') {
|
||||
Some(i) => (pair[..i].trim(), pair[(i + 1)..].trim()),
|
||||
@ -547,18 +548,6 @@ pub fn file_get_non_comment_lines<P: AsRef<Path>>(
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn epoch_now() -> Result<Duration, SystemTimeError> {
|
||||
SystemTime::now().duration_since(UNIX_EPOCH)
|
||||
}
|
||||
|
||||
pub fn epoch_now_f64() -> Result<f64, SystemTimeError> {
|
||||
Ok(epoch_now()?.as_secs_f64())
|
||||
}
|
||||
|
||||
pub fn epoch_now_u64() -> Result<u64, SystemTimeError> {
|
||||
Ok(epoch_now()?.as_secs())
|
||||
}
|
||||
|
||||
pub fn setup_safe_path_env() {
|
||||
std::env::set_var("PATH", "/sbin:/bin:/usr/sbin:/usr/bin");
|
||||
// Make %ENV safer - as suggested by https://perldoc.perl.org/perlsec.html
|
||||
@ -577,3 +566,32 @@ pub fn strip_ascii_whitespace(line: &[u8]) -> &[u8] {
|
||||
None => &[],
|
||||
}
|
||||
}
|
||||
|
||||
/// Seeks to start of file and computes the SHA256 hash
|
||||
pub fn compute_file_csum(file: &mut File) -> Result<([u8; 32], u64), Error> {
|
||||
|
||||
file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let mut hasher = openssl::sha::Sha256::new();
|
||||
let mut buffer = proxmox::tools::vec::undefined(256*1024);
|
||||
let mut size: u64 = 0;
|
||||
|
||||
loop {
|
||||
let count = match file.read(&mut buffer) {
|
||||
Ok(count) => count,
|
||||
Err(ref err) if err.kind() == std::io::ErrorKind::Interrupted => {
|
||||
continue;
|
||||
}
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
if count == 0 {
|
||||
break;
|
||||
}
|
||||
size += count as u64;
|
||||
hasher.update(&buffer[..count]);
|
||||
}
|
||||
|
||||
let csum = hasher.finish();
|
||||
|
||||
Ok((csum, size))
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
use anyhow::{Error};
|
||||
use chrono::{TimeZone, Local};
|
||||
use std::io::Write;
|
||||
|
||||
/// Log messages with timestamps into files
|
||||
@ -56,7 +55,10 @@ impl FileLogger {
|
||||
stdout.write_all(b"\n").unwrap();
|
||||
}
|
||||
|
||||
let line = format!("{}: {}\n", Local.timestamp(Local::now().timestamp(), 0).to_rfc3339(), msg);
|
||||
let now = proxmox::tools::time::epoch_i64();
|
||||
let rfc3339 = proxmox::tools::time::epoch_to_rfc3339(now).unwrap();
|
||||
|
||||
let line = format!("{}: {}\n", rfc3339, msg);
|
||||
self.file.write_all(line.as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use chrono::{Local, TimeZone};
|
||||
|
||||
pub fn strip_server_file_expenstion(name: &str) -> String {
|
||||
|
||||
@ -25,8 +24,12 @@ pub fn render_epoch(value: &Value, _record: &Value) -> Result<String, Error> {
|
||||
if value.is_null() { return Ok(String::new()); }
|
||||
let text = match value.as_i64() {
|
||||
Some(epoch) => {
|
||||
Local.timestamp(epoch, 0).format("%c").to_string()
|
||||
if let Ok(epoch_string) = proxmox::tools::time::strftime_local("%c", epoch as i64) {
|
||||
epoch_string
|
||||
} else {
|
||||
epoch.to_string()
|
||||
}
|
||||
},
|
||||
None => {
|
||||
value.to_string()
|
||||
}
|
||||
|
133
src/tools/parallel_handler.rs
Normal file
@ -0,0 +1,133 @@
|
||||
use std::thread::{JoinHandle};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use crossbeam_channel::{bounded, Sender};
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
/// A handle to send data toö the worker thread (implements clone)
|
||||
pub struct SendHandle<I> {
|
||||
input: Sender<I>,
|
||||
abort: Arc<Mutex<Option<String>>>,
|
||||
}
|
||||
|
||||
/// A thread pool which run the supplied closure
|
||||
///
|
||||
/// The send command sends data to the worker threads. If one handler
|
||||
/// returns an error, we mark the channel as failed and it is no
|
||||
/// longer possible to send data.
|
||||
///
|
||||
/// When done, the 'complete()' method needs to be called to check for
|
||||
/// outstanding errors.
|
||||
pub struct ParallelHandler<I> {
|
||||
handles: Vec<JoinHandle<()>>,
|
||||
name: String,
|
||||
input: SendHandle<I>,
|
||||
}
|
||||
|
||||
impl <I: Send + Sync +'static> SendHandle<I> {
|
||||
|
||||
/// Returns the first error happened, if any
|
||||
pub fn check_abort(&self) -> Result<(), Error> {
|
||||
let guard = self.abort.lock().unwrap();
|
||||
if let Some(err_msg) = &*guard {
|
||||
return Err(format_err!("{}", err_msg));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Send data to the worker threads
|
||||
pub fn send(&self, input: I) -> Result<(), Error> {
|
||||
self.check_abort()?;
|
||||
self.input.send(input)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl <I> Clone for SendHandle<I> {
|
||||
fn clone(&self) -> Self {
|
||||
Self { input: self.input.clone(), abort: self.abort.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
impl <I: Send + Sync + 'static> ParallelHandler<I> {
|
||||
|
||||
/// Create a new thread pool, each thread processing incoming data
|
||||
/// with 'handler_fn'.
|
||||
pub fn new<F>(
|
||||
name: &str,
|
||||
threads: usize,
|
||||
handler_fn: F,
|
||||
) -> Self
|
||||
where F: Fn(I) -> Result<(), Error> + Send + Sync + Clone + 'static,
|
||||
{
|
||||
let mut handles = Vec::new();
|
||||
let (input_tx, input_rx) = bounded::<I>(threads);
|
||||
|
||||
let abort = Arc::new(Mutex::new(None));
|
||||
|
||||
for i in 0..threads {
|
||||
let input_rx = input_rx.clone();
|
||||
let abort = abort.clone();
|
||||
let handler_fn = handler_fn.clone();
|
||||
handles.push(
|
||||
std::thread::Builder::new()
|
||||
.name(format!("{} ({})", name, i))
|
||||
.spawn(move || {
|
||||
loop {
|
||||
let data = match input_rx.recv() {
|
||||
Ok(data) => data,
|
||||
Err(_) => return,
|
||||
};
|
||||
match (handler_fn)(data) {
|
||||
Ok(()) => {},
|
||||
Err(err) => {
|
||||
let mut guard = abort.lock().unwrap();
|
||||
if guard.is_none() {
|
||||
*guard = Some(err.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
Self {
|
||||
handles,
|
||||
name: name.to_string(),
|
||||
input: SendHandle {
|
||||
input: input_tx,
|
||||
abort,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a cloneable channel to send data to the worker threads
|
||||
pub fn channel(&self) -> SendHandle<I> {
|
||||
self.input.clone()
|
||||
}
|
||||
|
||||
/// Send data to the worker threads
|
||||
pub fn send(&self, input: I) -> Result<(), Error> {
|
||||
self.input.send(input)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Wait for worker threads to complete and check for errors
|
||||
pub fn complete(self) -> Result<(), Error> {
|
||||
self.input.check_abort()?;
|
||||
drop(self.input);
|
||||
let mut msg = Vec::new();
|
||||
for (i, handle) in self.handles.into_iter().enumerate() {
|
||||
if let Err(panic) = handle.join() {
|
||||
match panic.downcast::<&str>() {
|
||||
Ok(panic_msg) => msg.push(format!("thread {} ({}) paniced: {}", self.name, i, panic_msg)),
|
||||
Err(_) => msg.push(format!("thread {} ({}) paniced", self.name, i)),
|
||||
}
|
||||
}
|
||||
}
|
||||
if msg.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Err(format_err!("{}", msg.join("\n")))
|
||||
}
|
||||
}
|
@ -2,7 +2,6 @@ pub mod types;
|
||||
pub mod config;
|
||||
|
||||
mod parse_time;
|
||||
pub mod tm_editor;
|
||||
pub mod time;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
@ -145,6 +145,9 @@ fn parse_date_time_comp(max: usize) -> impl Fn(&str) -> IResult<&str, DateTimeVa
|
||||
let (i, value) = parse_time_comp(max)(i)?;
|
||||
|
||||
if let (i, Some(end)) = opt(preceded(tag(".."), parse_time_comp(max)))(i)? {
|
||||
if value > end {
|
||||
return Err(parse_error(i, "range start is bigger than end"));
|
||||
}
|
||||
return Ok((i, DateTimeValue::Range(value, end)))
|
||||
}
|
||||
|
||||
@ -158,10 +161,17 @@ fn parse_date_time_comp(max: usize) -> impl Fn(&str) -> IResult<&str, DateTimeVa
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_date_time_comp_list(max: usize) -> impl Fn(&str) -> IResult<&str, Vec<DateTimeValue>> {
|
||||
fn parse_date_time_comp_list(start: u32, max: usize) -> impl Fn(&str) -> IResult<&str, Vec<DateTimeValue>> {
|
||||
move |i: &str| {
|
||||
if i.starts_with("*") {
|
||||
return Ok((&i[1..], Vec::new()));
|
||||
let i = &i[1..];
|
||||
if i.starts_with("/") {
|
||||
let (n, repeat) = parse_time_comp(max)(&i[1..])?;
|
||||
if repeat > 0 {
|
||||
return Ok((n, vec![DateTimeValue::Repeated(start, repeat)]));
|
||||
}
|
||||
}
|
||||
return Ok((i, Vec::new()));
|
||||
}
|
||||
|
||||
separated_nonempty_list(tag(","), parse_date_time_comp(max))(i)
|
||||
@ -171,9 +181,9 @@ fn parse_date_time_comp_list(max: usize) -> impl Fn(&str) -> IResult<&str, Vec<D
|
||||
fn parse_time_spec(i: &str) -> IResult<&str, (Vec<DateTimeValue>, Vec<DateTimeValue>, Vec<DateTimeValue>)> {
|
||||
|
||||
let (i, (hour, minute, opt_second)) = tuple((
|
||||
parse_date_time_comp_list(24),
|
||||
preceded(tag(":"), parse_date_time_comp_list(60)),
|
||||
opt(preceded(tag(":"), parse_date_time_comp_list(60))),
|
||||
parse_date_time_comp_list(0, 24),
|
||||
preceded(tag(":"), parse_date_time_comp_list(0, 60)),
|
||||
opt(preceded(tag(":"), parse_date_time_comp_list(0, 60))),
|
||||
))(i)?;
|
||||
|
||||
if let Some(second) = opt_second {
|
||||
@ -183,6 +193,25 @@ fn parse_time_spec(i: &str) -> IResult<&str, (Vec<DateTimeValue>, Vec<DateTimeVa
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_date_spec(i: &str) -> IResult<&str, (Vec<DateTimeValue>, Vec<DateTimeValue>, Vec<DateTimeValue>)> {
|
||||
|
||||
// TODO: implement ~ for days (man systemd.time)
|
||||
if let Ok((i, (year, month, day))) = tuple((
|
||||
parse_date_time_comp_list(0, 2200), // the upper limit for systemd, stay compatible
|
||||
preceded(tag("-"), parse_date_time_comp_list(1, 13)),
|
||||
preceded(tag("-"), parse_date_time_comp_list(1, 32)),
|
||||
))(i) {
|
||||
Ok((i, (year, month, day)))
|
||||
} else if let Ok((i, (month, day))) = tuple((
|
||||
parse_date_time_comp_list(1, 13),
|
||||
preceded(tag("-"), parse_date_time_comp_list(1, 32)),
|
||||
))(i) {
|
||||
Ok((i, (Vec::new(), month, day)))
|
||||
} else {
|
||||
Err(parse_error(i, "invalid date spec"))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_calendar_event(i: &str) -> Result<CalendarEvent, Error> {
|
||||
parse_complete_line("calendar event", i, parse_calendar_event_incomplete)
|
||||
}
|
||||
@ -191,7 +220,7 @@ fn parse_calendar_event_incomplete(mut i: &str) -> IResult<&str, CalendarEvent>
|
||||
|
||||
let mut has_dayspec = false;
|
||||
let mut has_timespec = false;
|
||||
let has_datespec = false;
|
||||
let mut has_datespec = false;
|
||||
|
||||
let mut event = CalendarEvent::default();
|
||||
|
||||
@ -228,8 +257,52 @@ fn parse_calendar_event_incomplete(mut i: &str) -> IResult<&str, CalendarEvent>
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
"monthly" | "yearly" | "quarterly" | "semiannually" => {
|
||||
return Err(parse_error(i, "unimplemented date or time specification"));
|
||||
"monthly" => {
|
||||
return Ok(("", CalendarEvent {
|
||||
hour: vec![DateTimeValue::Single(0)],
|
||||
minute: vec![DateTimeValue::Single(0)],
|
||||
second: vec![DateTimeValue::Single(0)],
|
||||
day: vec![DateTimeValue::Single(1)],
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
"yearly" | "annually" => {
|
||||
return Ok(("", CalendarEvent {
|
||||
hour: vec![DateTimeValue::Single(0)],
|
||||
minute: vec![DateTimeValue::Single(0)],
|
||||
second: vec![DateTimeValue::Single(0)],
|
||||
day: vec![DateTimeValue::Single(1)],
|
||||
month: vec![DateTimeValue::Single(1)],
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
"quarterly" => {
|
||||
return Ok(("", CalendarEvent {
|
||||
hour: vec![DateTimeValue::Single(0)],
|
||||
minute: vec![DateTimeValue::Single(0)],
|
||||
second: vec![DateTimeValue::Single(0)],
|
||||
day: vec![DateTimeValue::Single(1)],
|
||||
month: vec![
|
||||
DateTimeValue::Single(1),
|
||||
DateTimeValue::Single(4),
|
||||
DateTimeValue::Single(7),
|
||||
DateTimeValue::Single(10),
|
||||
],
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
"semiannually" | "semi-annually" => {
|
||||
return Ok(("", CalendarEvent {
|
||||
hour: vec![DateTimeValue::Single(0)],
|
||||
minute: vec![DateTimeValue::Single(0)],
|
||||
second: vec![DateTimeValue::Single(0)],
|
||||
day: vec![DateTimeValue::Single(1)],
|
||||
month: vec![
|
||||
DateTimeValue::Single(1),
|
||||
DateTimeValue::Single(7),
|
||||
],
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
_ => { /* continue */ }
|
||||
}
|
||||
@ -246,7 +319,13 @@ fn parse_calendar_event_incomplete(mut i: &str) -> IResult<&str, CalendarEvent>
|
||||
for range in range_list { event.days.insert(range); }
|
||||
}
|
||||
|
||||
// todo: support date specs
|
||||
if let (n, Some((year, month, day))) = opt(parse_date_spec)(i)? {
|
||||
event.year = year;
|
||||
event.month = month;
|
||||
event.day = day;
|
||||
has_datespec = true;
|
||||
i = space0(n)?.0;
|
||||
}
|
||||
|
||||
if let (n, Some((hour, minute, second))) = opt(parse_time_spec)(i)? {
|
||||
event.hour = hour;
|
||||
|
@ -1,8 +1,11 @@
|
||||
use anyhow::{bail, Error};
|
||||
use std::convert::TryInto;
|
||||
|
||||
use anyhow::Error;
|
||||
use bitflags::bitflags;
|
||||
|
||||
use proxmox::tools::time::TmEditor;
|
||||
|
||||
pub use super::parse_time::*;
|
||||
use super::tm_editor::*;
|
||||
|
||||
bitflags!{
|
||||
#[derive(Default)]
|
||||
@ -17,7 +20,7 @@ bitflags!{
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum DateTimeValue {
|
||||
Single(u32),
|
||||
Range(u32, u32),
|
||||
@ -54,7 +57,7 @@ impl DateTimeValue {
|
||||
let mut next: Option<u32> = None;
|
||||
let mut set_next = |v: u32| {
|
||||
if let Some(n) = next {
|
||||
if v > n { next = Some(v); }
|
||||
if v < n { next = Some(v); }
|
||||
} else {
|
||||
next = Some(v);
|
||||
}
|
||||
@ -91,7 +94,7 @@ impl DateTimeValue {
|
||||
/// Calendar events may be used to refer to one or more points in time in a
|
||||
/// single expression. They are designed after the systemd.time Calendar Events
|
||||
/// specification, but are not guaranteed to be 100% compatible.
|
||||
#[derive(Default, Debug)]
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct CalendarEvent {
|
||||
/// the days in a week this event should trigger
|
||||
pub days: WeekDays,
|
||||
@ -101,17 +104,15 @@ pub struct CalendarEvent {
|
||||
pub minute: Vec<DateTimeValue>,
|
||||
/// the hour(s) this event should trigger
|
||||
pub hour: Vec<DateTimeValue>,
|
||||
/* FIXME: TODO
|
||||
/// the day(s) in a month this event should trigger
|
||||
pub day: Vec<DateTimeValue>,
|
||||
/// the month(s) in a year this event should trigger
|
||||
pub month: Vec<DateTimeValue>,
|
||||
/// the years(s) this event should trigger
|
||||
pub year: Vec<DateTimeValue>,
|
||||
*/
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct TimeSpan {
|
||||
pub nsec: u64,
|
||||
pub usec: u64,
|
||||
@ -155,105 +156,135 @@ pub fn compute_next_event(
|
||||
event: &CalendarEvent,
|
||||
last: i64,
|
||||
utc: bool,
|
||||
) -> Result<i64, Error> {
|
||||
) -> Result<Option<i64>, Error> {
|
||||
|
||||
let last = last + 1; // at least one second later
|
||||
|
||||
let all_days = event.days.is_empty() || event.days.is_all();
|
||||
|
||||
let mut t = TmEditor::new(last, utc)?;
|
||||
let mut t = TmEditor::with_epoch(last, utc)?;
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
loop {
|
||||
if count > 1000 { // should not happen
|
||||
bail!("unable to compute next calendar event");
|
||||
// cancel after 1000 loops
|
||||
if count > 1000 {
|
||||
return Ok(None);
|
||||
} else {
|
||||
count += 1;
|
||||
}
|
||||
|
||||
if !all_days && t.changes.contains(TMChanges::WDAY) { // match day first
|
||||
let day_num = t.day_num();
|
||||
let day = WeekDays::from_bits(1<<day_num).unwrap();
|
||||
if event.days.contains(day) {
|
||||
t.changes.remove(TMChanges::WDAY);
|
||||
if !event.year.is_empty() {
|
||||
let year: u32 = t.year().try_into()?;
|
||||
if !DateTimeValue::list_contains(&event.year, year) {
|
||||
if let Some(n) = DateTimeValue::find_next(&event.year, year) {
|
||||
t.add_years((n - year).try_into()?)?;
|
||||
continue;
|
||||
} else {
|
||||
// if we have no valid year, we cannot find a correct timestamp
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !event.month.is_empty() {
|
||||
let month: u32 = t.month().try_into()?;
|
||||
if !DateTimeValue::list_contains(&event.month, month) {
|
||||
if let Some(n) = DateTimeValue::find_next(&event.month, month) {
|
||||
t.add_months((n - month).try_into()?)?;
|
||||
} else {
|
||||
// if we could not find valid month, retry next year
|
||||
t.add_years(1)?;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if !event.day.is_empty() {
|
||||
let day: u32 = t.day().try_into()?;
|
||||
if !DateTimeValue::list_contains(&event.day, day) {
|
||||
if let Some(n) = DateTimeValue::find_next(&event.day, day) {
|
||||
t.add_days((n - day).try_into()?)?;
|
||||
} else {
|
||||
// if we could not find valid mday, retry next month
|
||||
t.add_months(1)?;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if !all_days { // match day first
|
||||
let day_num: u32 = t.day_num().try_into()?;
|
||||
let day = WeekDays::from_bits(1<<day_num).unwrap();
|
||||
if !event.days.contains(day) {
|
||||
if let Some(n) = ((day_num+1)..7)
|
||||
.find(|d| event.days.contains(WeekDays::from_bits(1<<d).unwrap()))
|
||||
{
|
||||
// try next day
|
||||
t.add_days(n - day_num, true);
|
||||
continue;
|
||||
t.add_days((n - day_num).try_into()?)?;
|
||||
} else {
|
||||
// try next week
|
||||
t.add_days(7 - day_num, true);
|
||||
continue;
|
||||
t.add_days((7 - day_num).try_into()?)?;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// this day
|
||||
if !event.hour.is_empty() && t.changes.contains(TMChanges::HOUR) {
|
||||
let hour = t.hour() as u32;
|
||||
if DateTimeValue::list_contains(&event.hour, hour) {
|
||||
t.changes.remove(TMChanges::HOUR);
|
||||
} else {
|
||||
if !event.hour.is_empty() {
|
||||
let hour = t.hour().try_into()?;
|
||||
if !DateTimeValue::list_contains(&event.hour, hour) {
|
||||
if let Some(n) = DateTimeValue::find_next(&event.hour, hour) {
|
||||
// test next hour
|
||||
t.set_time(n as libc::c_int, 0, 0);
|
||||
continue;
|
||||
t.set_time(n.try_into()?, 0, 0)?;
|
||||
} else {
|
||||
// test next day
|
||||
t.add_days(1, true);
|
||||
continue;
|
||||
t.add_days(1)?;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// this hour
|
||||
if !event.minute.is_empty() && t.changes.contains(TMChanges::MIN) {
|
||||
let minute = t.min() as u32;
|
||||
if DateTimeValue::list_contains(&event.minute, minute) {
|
||||
t.changes.remove(TMChanges::MIN);
|
||||
} else {
|
||||
if !event.minute.is_empty() {
|
||||
let minute = t.min().try_into()?;
|
||||
if !DateTimeValue::list_contains(&event.minute, minute) {
|
||||
if let Some(n) = DateTimeValue::find_next(&event.minute, minute) {
|
||||
// test next minute
|
||||
t.set_min_sec(n as libc::c_int, 0);
|
||||
continue;
|
||||
t.set_min_sec(n.try_into()?, 0)?;
|
||||
} else {
|
||||
// test next hour
|
||||
t.set_time(t.hour() + 1, 0, 0);
|
||||
continue;
|
||||
t.set_time(t.hour() + 1, 0, 0)?;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// this minute
|
||||
if !event.second.is_empty() && t.changes.contains(TMChanges::SEC) {
|
||||
let second = t.sec() as u32;
|
||||
if DateTimeValue::list_contains(&event.second, second) {
|
||||
t.changes.remove(TMChanges::SEC);
|
||||
} else {
|
||||
if !event.second.is_empty() {
|
||||
let second = t.sec().try_into()?;
|
||||
if !DateTimeValue::list_contains(&event.second, second) {
|
||||
if let Some(n) = DateTimeValue::find_next(&event.second, second) {
|
||||
// test next second
|
||||
t.set_sec(n as libc::c_int);
|
||||
continue;
|
||||
t.set_sec(n.try_into()?)?;
|
||||
} else {
|
||||
// test next min
|
||||
t.set_min_sec(t.min() + 1, 0);
|
||||
continue;
|
||||
t.set_min_sec(t.min() + 1, 0)?;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let next = t.into_epoch()?;
|
||||
return Ok(next)
|
||||
return Ok(Some(next))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use anyhow::bail;
|
||||
|
||||
use super::*;
|
||||
use proxmox::tools::time::*;
|
||||
|
||||
@ -280,7 +311,7 @@ mod test {
|
||||
};
|
||||
|
||||
match compute_next_event(&event, last, true) {
|
||||
Ok(next) => {
|
||||
Ok(Some(next)) => {
|
||||
if next == expect {
|
||||
println!("next {:?} => {}", event, next);
|
||||
} else {
|
||||
@ -288,12 +319,25 @@ mod test {
|
||||
event, gmtime(next), gmtime(expect));
|
||||
}
|
||||
}
|
||||
Ok(None) => bail!("next {:?} failed to find a timestamp", event),
|
||||
Err(err) => bail!("compute next for '{}' failed - {}", v, err),
|
||||
}
|
||||
|
||||
Ok(expect)
|
||||
};
|
||||
|
||||
let test_never = |v: &'static str, last: i64| -> Result<(), Error> {
|
||||
let event = match parse_calendar_event(v) {
|
||||
Ok(event) => event,
|
||||
Err(err) => bail!("parsing '{}' failed - {}", v, err),
|
||||
};
|
||||
|
||||
match compute_next_event(&event, last, true)? {
|
||||
None => Ok(()),
|
||||
Some(next) => bail!("compute next for '{}' succeeded, but expected fail - result {}", v, next),
|
||||
}
|
||||
};
|
||||
|
||||
const MIN: i64 = 60;
|
||||
const HOUR: i64 = 3600;
|
||||
const DAY: i64 = 3600*24;
|
||||
@ -320,6 +364,13 @@ mod test {
|
||||
test_value("sat", THURSDAY_00_00, THURSDAY_00_00 + 2*DAY)?;
|
||||
test_value("sun", THURSDAY_00_00, THURSDAY_00_00 + 3*DAY)?;
|
||||
|
||||
// test multiple values for a single field
|
||||
// and test that the order does not matter
|
||||
test_value("5,10:4,8", THURSDAY_00_00, THURSDAY_00_00 + 5*HOUR + 4*MIN)?;
|
||||
test_value("10,5:8,4", THURSDAY_00_00, THURSDAY_00_00 + 5*HOUR + 4*MIN)?;
|
||||
test_value("6,4..10:23,5/5", THURSDAY_00_00, THURSDAY_00_00 + 4*HOUR + 5*MIN)?;
|
||||
test_value("4..10,6:5/5,23", THURSDAY_00_00, THURSDAY_00_00 + 4*HOUR + 5*MIN)?;
|
||||
|
||||
// test month wrapping
|
||||
test_value("sat", JUL_31_2020, JUL_31_2020 + 1*DAY)?;
|
||||
test_value("sun", JUL_31_2020, JUL_31_2020 + 2*DAY)?;
|
||||
@ -361,6 +412,23 @@ mod test {
|
||||
n = test_value("1:0", n, THURSDAY_00_00 + i*DAY + HOUR)?;
|
||||
}
|
||||
|
||||
// test date functionality
|
||||
|
||||
test_value("2020-07-31", 0, JUL_31_2020)?;
|
||||
test_value("02-28", 0, (31+27)*DAY)?;
|
||||
test_value("02-29", 0, 2*365*DAY + (31+28)*DAY)?; // 1972-02-29
|
||||
test_value("1965/5-01-01", -1, THURSDAY_00_00)?;
|
||||
test_value("2020-7..9-2/2", JUL_31_2020, JUL_31_2020 + 2*DAY)?;
|
||||
test_value("2020,2021-12-31", JUL_31_2020, DEC_31_2020)?;
|
||||
|
||||
test_value("monthly", 0, 31*DAY)?;
|
||||
test_value("quarterly", 0, (31+28+31)*DAY)?;
|
||||
test_value("semiannually", 0, (31+28+31+30+31+30)*DAY)?;
|
||||
test_value("yearly", 0, (365)*DAY)?;
|
||||
|
||||
test_never("2021-02-29", 0)?;
|
||||
test_never("02-30", 0)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1,185 +0,0 @@
|
||||
use anyhow::Error;
|
||||
use bitflags::bitflags;
|
||||
|
||||
use proxmox::tools::time::*;
|
||||
|
||||
bitflags!{
|
||||
#[derive(Default)]
|
||||
pub struct TMChanges: u8 {
|
||||
const SEC = 1;
|
||||
const MIN = 2;
|
||||
const HOUR = 4;
|
||||
const MDAY = 8;
|
||||
const MON = 16;
|
||||
const YEAR = 32;
|
||||
const WDAY = 64;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TmEditor {
|
||||
utc: bool,
|
||||
t: libc::tm,
|
||||
pub changes: TMChanges,
|
||||
}
|
||||
|
||||
fn is_leap_year(year: libc::c_int) -> bool {
|
||||
if year % 4 != 0 { return false; }
|
||||
if year % 100 != 0 { return true; }
|
||||
if year % 400 != 0 { return false; }
|
||||
return true;
|
||||
}
|
||||
|
||||
fn days_in_month(mon: libc::c_int, year: libc::c_int) -> libc::c_int {
|
||||
|
||||
let mon = mon % 12;
|
||||
|
||||
static MAP: &[libc::c_int] = &[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
|
||||
|
||||
if mon == 1 && is_leap_year(year) { return 29; }
|
||||
|
||||
MAP[mon as usize]
|
||||
}
|
||||
|
||||
impl TmEditor {
|
||||
|
||||
pub fn new(epoch: i64, utc: bool) -> Result<Self, Error> {
|
||||
let mut t = if utc { gmtime(epoch)? } else { localtime(epoch)? };
|
||||
t.tm_year += 1900; // real years for clarity
|
||||
Ok(Self { utc, t, changes: TMChanges::all() })
|
||||
}
|
||||
|
||||
pub fn into_epoch(mut self) -> Result<i64, Error> {
|
||||
self.t.tm_year -= 1900;
|
||||
let epoch = if self.utc { timegm(self.t)? } else { timelocal(self.t)? };
|
||||
Ok(epoch)
|
||||
}
|
||||
|
||||
pub fn add_days(&mut self, days: libc::c_int, reset_time: bool) {
|
||||
if days == 0 { return; }
|
||||
if reset_time {
|
||||
self.t.tm_hour = 0;
|
||||
self.t.tm_min = 0;
|
||||
self.t.tm_sec = 0;
|
||||
self.changes.insert(TMChanges::HOUR|TMChanges::MIN|TMChanges::SEC);
|
||||
}
|
||||
self.t.tm_mday += days;
|
||||
self.t.tm_wday += days;
|
||||
self.changes.insert(TMChanges::MDAY|TMChanges::WDAY);
|
||||
self.wrap_time();
|
||||
}
|
||||
|
||||
pub fn hour(&self) -> libc::c_int { self.t.tm_hour }
|
||||
pub fn min(&self) -> libc::c_int { self.t.tm_min }
|
||||
pub fn sec(&self) -> libc::c_int { self.t.tm_sec }
|
||||
|
||||
// Note: tm_wday (0-6, Sunday = 0) => convert to Sunday = 6
|
||||
pub fn day_num(&self) -> libc::c_int {
|
||||
(self.t.tm_wday + 6) % 7
|
||||
}
|
||||
|
||||
pub fn set_time(&mut self, hour: libc::c_int, min: libc::c_int, sec: libc::c_int) {
|
||||
self.t.tm_hour = hour;
|
||||
self.t.tm_min = min;
|
||||
self.t.tm_sec = sec;
|
||||
self.changes.insert(TMChanges::HOUR|TMChanges::MIN|TMChanges::SEC);
|
||||
self.wrap_time();
|
||||
}
|
||||
|
||||
pub fn set_min_sec(&mut self, min: libc::c_int, sec: libc::c_int) {
|
||||
self.t.tm_min = min;
|
||||
self.t.tm_sec = sec;
|
||||
self.changes.insert(TMChanges::MIN|TMChanges::SEC);
|
||||
self.wrap_time();
|
||||
}
|
||||
|
||||
fn wrap_time(&mut self) {
|
||||
|
||||
// sec: 0..59
|
||||
if self.t.tm_sec >= 60 {
|
||||
self.t.tm_min += self.t.tm_sec / 60;
|
||||
self.t.tm_sec %= 60;
|
||||
self.changes.insert(TMChanges::SEC|TMChanges::MIN);
|
||||
}
|
||||
|
||||
// min: 0..59
|
||||
if self.t.tm_min >= 60 {
|
||||
self.t.tm_hour += self.t.tm_min / 60;
|
||||
self.t.tm_min %= 60;
|
||||
self.changes.insert(TMChanges::MIN|TMChanges::HOUR);
|
||||
}
|
||||
|
||||
// hour: 0..23
|
||||
if self.t.tm_hour >= 24 {
|
||||
self.t.tm_mday += self.t.tm_hour / 24;
|
||||
self.t.tm_wday += self.t.tm_hour / 24;
|
||||
self.t.tm_hour %= 24;
|
||||
self.changes.insert(TMChanges::HOUR|TMChanges::MDAY|TMChanges::WDAY);
|
||||
}
|
||||
|
||||
// Translate to 0..($days_in_mon-1)
|
||||
self.t.tm_mday -= 1;
|
||||
loop {
|
||||
let days_in_mon = days_in_month(self.t.tm_mon, self.t.tm_year);
|
||||
if self.t.tm_mday < days_in_mon { break; }
|
||||
// Wrap one month
|
||||
self.t.tm_mday -= days_in_mon;
|
||||
self.t.tm_mon += 1;
|
||||
self.changes.insert(TMChanges::MDAY|TMChanges::WDAY|TMChanges::MON);
|
||||
}
|
||||
|
||||
// Translate back to 1..$days_in_mon
|
||||
self.t.tm_mday += 1;
|
||||
|
||||
// mon: 0..11
|
||||
if self.t.tm_mon >= 12 {
|
||||
self.t.tm_year += self.t.tm_mon / 12;
|
||||
self.t.tm_mon %= 12;
|
||||
self.changes.insert(TMChanges::MON|TMChanges::YEAR);
|
||||
}
|
||||
|
||||
self.t.tm_wday %= 7;
|
||||
}
|
||||
|
||||
pub fn set_sec(&mut self, v: libc::c_int) {
|
||||
self.t.tm_sec = v;
|
||||
self.changes.insert(TMChanges::SEC);
|
||||
self.wrap_time();
|
||||
}
|
||||
|
||||
pub fn set_min(&mut self, v: libc::c_int) {
|
||||
self.t.tm_min = v;
|
||||
self.changes.insert(TMChanges::MIN);
|
||||
self.wrap_time();
|
||||
}
|
||||
|
||||
pub fn set_hour(&mut self, v: libc::c_int) {
|
||||
self.t.tm_hour = v;
|
||||
self.changes.insert(TMChanges::HOUR);
|
||||
self.wrap_time();
|
||||
}
|
||||
|
||||
pub fn set_mday(&mut self, v: libc::c_int) {
|
||||
self.t.tm_mday = v;
|
||||
self.changes.insert(TMChanges::MDAY);
|
||||
self.wrap_time();
|
||||
}
|
||||
|
||||
pub fn set_mon(&mut self, v: libc::c_int) {
|
||||
self.t.tm_mon = v;
|
||||
self.changes.insert(TMChanges::MON);
|
||||
self.wrap_time();
|
||||
}
|
||||
|
||||
pub fn set_year(&mut self, v: libc::c_int) {
|
||||
self.t.tm_year = v;
|
||||
self.changes.insert(TMChanges::YEAR);
|
||||
self.wrap_time();
|
||||
}
|
||||
|
||||
pub fn set_wday(&mut self, v: libc::c_int) {
|
||||
self.t.tm_wday = v;
|
||||
self.changes.insert(TMChanges::WDAY);
|
||||
self.wrap_time();
|
||||
}
|
||||
|
||||
}
|
@ -252,6 +252,6 @@ pub const SYSTEMD_TIMESPAN_SCHEMA: Schema = StringSchema::new(
|
||||
.schema();
|
||||
|
||||
pub const SYSTEMD_CALENDAR_EVENT_SCHEMA: Schema = StringSchema::new(
|
||||
"systemd time span")
|
||||
"systemd calendar event")
|
||||
.format(&ApiStringFormat::VerifyFn(super::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
@ -11,7 +11,6 @@ use openssl::sign::{Signer, Verifier};
|
||||
use percent_encoding::{percent_decode_str, percent_encode, AsciiSet};
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
pub const TICKET_LIFETIME: i64 = 3600 * 2; // 2 hours
|
||||
|
||||
@ -69,7 +68,7 @@ where
|
||||
Ok(Self {
|
||||
prefix: Cow::Borrowed(prefix),
|
||||
data: data.to_string(),
|
||||
time: epoch_now_u64()? as i64,
|
||||
time: proxmox::tools::time::epoch_i64(),
|
||||
signature: None,
|
||||
_type_marker: PhantomData,
|
||||
})
|
||||
@ -174,7 +173,7 @@ where
|
||||
None => bail!("invalid ticket without signature"),
|
||||
};
|
||||
|
||||
let age = epoch_now_u64()? as i64 - self.time;
|
||||
let age = proxmox::tools::time::epoch_i64() - self.time;
|
||||
if age < time_frame.start {
|
||||
bail!("invalid ticket - timestamp newer than expected");
|
||||
}
|
||||
@ -272,7 +271,6 @@ mod test {
|
||||
|
||||
use super::Ticket;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn simple_test<F>(key: &PKey<Private>, aad: Option<&str>, modify: F)
|
||||
where
|
||||
@ -314,7 +312,7 @@ mod test {
|
||||
false
|
||||
});
|
||||
simple_test(&key, None, |t| {
|
||||
t.change_time(epoch_now_u64().unwrap() as i64 + 0x1000_0000);
|
||||
t.change_time(proxmox::tools::time::epoch_i64() + 0x1000_0000);
|
||||
false
|
||||
});
|
||||
}
|
||||
|
@ -6,17 +6,16 @@ Ext.define('pbs-data-store-snapshots', {
|
||||
{
|
||||
name: 'backup-time',
|
||||
type: 'date',
|
||||
dateFormat: 'timestamp'
|
||||
dateFormat: 'timestamp',
|
||||
},
|
||||
'files',
|
||||
'owner',
|
||||
'verification',
|
||||
{ name: 'size', type: 'int', allowNull: true, },
|
||||
{ name: 'size', type: 'int', allowNull: true },
|
||||
{
|
||||
name: 'crypt-mode',
|
||||
type: 'boolean',
|
||||
calculate: function(data) {
|
||||
let encrypted = 0;
|
||||
let crypt = {
|
||||
none: 0,
|
||||
mixed: 0,
|
||||
@ -24,25 +23,24 @@ Ext.define('pbs-data-store-snapshots', {
|
||||
encrypt: 0,
|
||||
count: 0,
|
||||
};
|
||||
let signed = 0;
|
||||
data.files.forEach(file => {
|
||||
if (file.filename === 'index.json.blob') return; // is never encrypted
|
||||
let mode = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
|
||||
if (mode !== -1) {
|
||||
crypt[file['crypt-mode']]++;
|
||||
}
|
||||
crypt.count++;
|
||||
}
|
||||
});
|
||||
|
||||
return PBS.Utils.calculateCryptMode(crypt);
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'matchesFilter',
|
||||
type: 'boolean',
|
||||
defaultValue: true,
|
||||
},
|
||||
]
|
||||
],
|
||||
});
|
||||
|
||||
Ext.define('PBS.DataStoreContent', {
|
||||
@ -70,7 +68,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
view.getStore().setSorters([
|
||||
'backup-group',
|
||||
'text',
|
||||
'backup-time'
|
||||
'backup-time',
|
||||
]);
|
||||
Proxmox.Utils.monStoreErrors(view, this.store);
|
||||
this.reload(); // initial load
|
||||
@ -88,7 +86,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
this.store.setProxy({
|
||||
type: 'proxmox',
|
||||
timeout: 300*1000, // 5 minutes, we should make that api call faster
|
||||
url: url
|
||||
url: url,
|
||||
});
|
||||
|
||||
this.store.load();
|
||||
@ -124,7 +122,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
expanded: false,
|
||||
backup_type: item.data["backup-type"],
|
||||
backup_id: item.data["backup-id"],
|
||||
children: []
|
||||
children: [],
|
||||
};
|
||||
}
|
||||
|
||||
@ -163,7 +161,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
}
|
||||
return false;
|
||||
},
|
||||
after: () => {},
|
||||
after: Ext.emptyFn,
|
||||
});
|
||||
|
||||
for (const item of records) {
|
||||
@ -181,7 +179,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
|
||||
data.children = [];
|
||||
for (const file of data.files) {
|
||||
file.text = file.filename,
|
||||
file.text = file.filename;
|
||||
file['crypt-mode'] = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
|
||||
file.leaf = true;
|
||||
file.matchesFilter = true;
|
||||
@ -192,6 +190,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
children.push(data);
|
||||
}
|
||||
|
||||
let nowSeconds = Date.now() / 1000;
|
||||
let children = [];
|
||||
for (const [name, group] of Object.entries(groups)) {
|
||||
let last_backup = 0;
|
||||
@ -201,7 +200,13 @@ Ext.define('PBS.DataStoreContent', {
|
||||
'sign-only': 0,
|
||||
encrypt: 0,
|
||||
};
|
||||
for (const item of group.children) {
|
||||
let verify = {
|
||||
outdated: 0,
|
||||
none: 0,
|
||||
failed: 0,
|
||||
ok: 0,
|
||||
};
|
||||
for (let item of group.children) {
|
||||
crypt[PBS.Utils.cryptmap[item['crypt-mode']]]++;
|
||||
if (item["backup-time"] > last_backup && item.size !== null) {
|
||||
last_backup = item["backup-time"];
|
||||
@ -209,13 +214,24 @@ Ext.define('PBS.DataStoreContent', {
|
||||
group.files = item.files;
|
||||
group.size = item.size;
|
||||
group.owner = item.owner;
|
||||
verify.lastFailed = item.verification && item.verification.state !== 'ok';
|
||||
}
|
||||
if (item.verification &&
|
||||
(!group.verification || group.verification.state !== 'failed')) {
|
||||
group.verification = item.verification;
|
||||
if (!item.verification) {
|
||||
verify.none++;
|
||||
} else {
|
||||
if (item.verification.state === 'ok') {
|
||||
verify.ok++;
|
||||
} else {
|
||||
verify.failed++;
|
||||
}
|
||||
|
||||
let task = Proxmox.Utils.parse_task_upid(item.verification.upid);
|
||||
item.verification.lastTime = task.starttime;
|
||||
if (nowSeconds - task.starttime > 30 * 24 * 60 * 60) {
|
||||
verify.outdated++;
|
||||
}
|
||||
}
|
||||
}
|
||||
group.verification = verify;
|
||||
group.count = group.children.length;
|
||||
group.matchesFilter = true;
|
||||
crypt.count = group.count;
|
||||
@ -226,7 +242,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
|
||||
view.setRootNode({
|
||||
expanded: true,
|
||||
children: children
|
||||
children: children,
|
||||
});
|
||||
|
||||
if (selected !== undefined) {
|
||||
@ -246,13 +262,13 @@ Ext.define('PBS.DataStoreContent', {
|
||||
Proxmox.Utils.setErrorMask(view, false);
|
||||
if (view.getStore().getFilters().length > 0) {
|
||||
let searchBox = me.lookup("searchbox");
|
||||
let searchvalue = searchBox.getValue();;
|
||||
let searchvalue = searchBox.getValue();
|
||||
me.search(searchBox, searchvalue);
|
||||
}
|
||||
},
|
||||
|
||||
onPrune: function(view, rI, cI, item, e, rec) {
|
||||
var view = this.getView();
|
||||
view = this.getView();
|
||||
|
||||
if (!(rec && rec.data)) return;
|
||||
let data = rec.data;
|
||||
@ -270,7 +286,8 @@ Ext.define('PBS.DataStoreContent', {
|
||||
},
|
||||
|
||||
onVerify: function(view, rI, cI, item, e, rec) {
|
||||
var view = this.getView();
|
||||
let me = this;
|
||||
view = me.getView();
|
||||
|
||||
if (!view.datastore) return;
|
||||
|
||||
@ -302,6 +319,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
success: function(response, options) {
|
||||
Ext.create('Proxmox.window.TaskViewer', {
|
||||
upid: response.result.data,
|
||||
taskDone: () => me.reload(),
|
||||
}).show();
|
||||
},
|
||||
});
|
||||
@ -309,7 +327,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
|
||||
onForget: function(view, rI, cI, item, e, rec) {
|
||||
let me = this;
|
||||
var view = this.getView();
|
||||
view = this.getView();
|
||||
|
||||
if (!(rec && rec.data)) return;
|
||||
let data = rec.data;
|
||||
@ -364,7 +382,8 @@ Ext.define('PBS.DataStoreContent', {
|
||||
let atag = document.createElement('a');
|
||||
params['file-name'] = file;
|
||||
atag.download = filename;
|
||||
let url = new URL(`/api2/json/admin/datastore/${view.datastore}/download-decoded`, window.location.origin);
|
||||
let url = new URL(`/api2/json/admin/datastore/${view.datastore}/download-decoded`,
|
||||
window.location.origin);
|
||||
for (const [key, value] of Object.entries(params)) {
|
||||
url.searchParams.append(key, value);
|
||||
}
|
||||
@ -459,12 +478,22 @@ Ext.define('PBS.DataStoreContent', {
|
||||
},
|
||||
},
|
||||
|
||||
viewConfig: {
|
||||
getRowClass: function(record, index) {
|
||||
let verify = record.get('verification');
|
||||
if (verify && verify.lastFailed) {
|
||||
return 'proxmox-invalid-row';
|
||||
}
|
||||
return null;
|
||||
},
|
||||
},
|
||||
|
||||
columns: [
|
||||
{
|
||||
xtype: 'treecolumn',
|
||||
header: gettext("Backup Group"),
|
||||
dataIndex: 'text',
|
||||
flex: 1
|
||||
flex: 1,
|
||||
},
|
||||
{
|
||||
header: gettext('Actions'),
|
||||
@ -511,9 +540,9 @@ Ext.define('PBS.DataStoreContent', {
|
||||
data.filename &&
|
||||
data.filename.endsWith('pxar.didx') &&
|
||||
data['crypt-mode'] < 3);
|
||||
}
|
||||
},
|
||||
]
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
xtype: 'datecolumn',
|
||||
@ -521,7 +550,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
sortable: true,
|
||||
dataIndex: 'backup-time',
|
||||
format: 'Y-m-d H:i:s',
|
||||
width: 150
|
||||
width: 150,
|
||||
},
|
||||
{
|
||||
header: gettext("Size"),
|
||||
@ -543,6 +572,8 @@ Ext.define('PBS.DataStoreContent', {
|
||||
format: '0',
|
||||
header: gettext("Count"),
|
||||
sortable: true,
|
||||
width: 75,
|
||||
align: 'right',
|
||||
dataIndex: 'count',
|
||||
},
|
||||
{
|
||||
@ -565,29 +596,66 @@ Ext.define('PBS.DataStoreContent', {
|
||||
if (iconCls) {
|
||||
iconTxt = `<i class="fa fa-fw fa-${iconCls}"></i> `;
|
||||
}
|
||||
return (iconTxt + PBS.Utils.cryptText[v]) || Proxmox.Utils.unknownText
|
||||
}
|
||||
return (iconTxt + PBS.Utils.cryptText[v]) || Proxmox.Utils.unknownText;
|
||||
},
|
||||
},
|
||||
{
|
||||
header: gettext('Verify State'),
|
||||
sortable: true,
|
||||
dataIndex: 'verification',
|
||||
width: 120,
|
||||
renderer: (v, meta, record) => {
|
||||
if (v === undefined || v === null || !v.state) {
|
||||
//meta.tdCls = "x-grid-row-loading";
|
||||
return record.data.leaf ? '' : gettext('None');
|
||||
let i = (cls, txt) => `<i class="fa fa-fw fa-${cls}"></i> ${txt}`;
|
||||
if (v === undefined || v === null) {
|
||||
return record.data.leaf ? '' : i('question-circle-o warning', gettext('None'));
|
||||
}
|
||||
let task = Proxmox.Utils.parse_task_upid(v.upid);
|
||||
let verify_time = Proxmox.Utils.render_timestamp(task.starttime);
|
||||
let iconCls = v.state === 'ok' ? 'check good' : 'times critical';
|
||||
let tip = `Verify task started on ${verify_time}`;
|
||||
let tip, iconCls, txt;
|
||||
if (record.parentNode.id === 'root') {
|
||||
tip = v.state === 'ok'
|
||||
? 'All verification OK in backup group'
|
||||
: 'At least one failed verification in backup group!';
|
||||
if (v.failed === 0) {
|
||||
if (v.none === 0) {
|
||||
if (v.outdated > 0) {
|
||||
tip = 'All OK, but some snapshots were not verified in last 30 days';
|
||||
iconCls = 'check warning';
|
||||
txt = gettext('All OK (old)');
|
||||
} else {
|
||||
tip = 'All snapshots verified at least once in last 30 days';
|
||||
iconCls = 'check good';
|
||||
txt = gettext('All OK');
|
||||
}
|
||||
} else if (v.ok === 0) {
|
||||
tip = `${v.none} not verified yet`;
|
||||
iconCls = 'question-circle-o warning';
|
||||
txt = gettext('None');
|
||||
} else {
|
||||
tip = `${v.ok} OK, ${v.none} not verified yet`;
|
||||
iconCls = 'check faded';
|
||||
txt = `${v.ok} OK`;
|
||||
}
|
||||
} else {
|
||||
tip = `${v.ok} OK, ${v.failed} failed, ${v.none} not verified yet`;
|
||||
iconCls = 'times critical';
|
||||
txt = v.ok === 0 && v.none === 0
|
||||
? gettext('All failed')
|
||||
: `${v.failed} failed`;
|
||||
}
|
||||
} else if (!v.state) {
|
||||
return record.data.leaf ? '' : gettext('None');
|
||||
} else {
|
||||
let verify_time = Proxmox.Utils.render_timestamp(v.lastTime);
|
||||
tip = `Last verify task started on ${verify_time}`;
|
||||
txt = v.state;
|
||||
iconCls = 'times critical';
|
||||
if (v.state === 'ok') {
|
||||
iconCls = 'check good';
|
||||
let now = Date.now() / 1000;
|
||||
if (now - v.lastTime > 30 * 24 * 60 * 60) {
|
||||
tip = `Last verify task over 30 days ago: ${verify_time}`;
|
||||
iconCls = 'check warning';
|
||||
}
|
||||
}
|
||||
}
|
||||
return `<span data-qtip="${tip}">
|
||||
<i class="fa fa-fw fa-${iconCls}"></i> ${v.state}
|
||||
<i class="fa fa-fw fa-${iconCls}"></i> ${txt}
|
||||
</span>`;
|
||||
},
|
||||
listeners: {
|
||||
@ -619,6 +687,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
{
|
||||
xtype: 'textfield',
|
||||
reference: 'searchbox',
|
||||
emptyText: gettext('group, date or owner'),
|
||||
triggers: {
|
||||
clear: {
|
||||
cls: 'pmx-clear-trigger',
|
||||
@ -628,7 +697,7 @@ Ext.define('PBS.DataStoreContent', {
|
||||
this.triggers.clear.setVisible(false);
|
||||
this.setValue('');
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
listeners: {
|
||||
change: {
|
||||
@ -636,6 +705,6 @@ Ext.define('PBS.DataStoreContent', {
|
||||
buffer: 500,
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
});
|
||||
|
@ -54,6 +54,11 @@ all: js/proxmox-backup-gui.js css/ext6-pbs.css
|
||||
js:
|
||||
mkdir js
|
||||
|
||||
.PHONY: OnlineHelpInfo.js
|
||||
OnlineHelpInfo.js:
|
||||
$(MAKE) -C ../docs onlinehelpinfo
|
||||
mv ../docs/output/scanrefs/OnlineHelpInfo.js .
|
||||
|
||||
js/proxmox-backup-gui.js: js OnlineHelpInfo.js ${JSSRC}
|
||||
cat OnlineHelpInfo.js ${JSSRC} >$@.tmp
|
||||
mv $@.tmp $@
|
||||
|
@ -1,6 +1,30 @@
|
||||
var proxmoxOnlineHelpInfo = {
|
||||
const proxmoxOnlineHelpInfo = {
|
||||
"pbs_documentation_index": {
|
||||
"link" : "/pbs-docs/index.html",
|
||||
"link": "/docs/index.html",
|
||||
"title": "Proxmox Backup Server Documentation Index"
|
||||
},
|
||||
"datastore-intro": {
|
||||
"link": "/docs/administration-guide.html#datastore-intro",
|
||||
"title": ":term:`DataStore`"
|
||||
},
|
||||
"user-mgmt": {
|
||||
"link": "/docs/administration-guide.html#user-mgmt",
|
||||
"title": "User Management"
|
||||
},
|
||||
"user-acl": {
|
||||
"link": "/docs/administration-guide.html#user-acl",
|
||||
"title": "Access Control"
|
||||
},
|
||||
"backup-remote": {
|
||||
"link": "/docs/administration-guide.html#backup-remote",
|
||||
"title": ":term:`Remote`"
|
||||
},
|
||||
"syncjobs": {
|
||||
"link": "/docs/administration-guide.html#syncjobs",
|
||||
"title": "Sync Jobs"
|
||||
},
|
||||
"chapter-zfs": {
|
||||
"link": "/docs/sysadmin.html#chapter-zfs",
|
||||
"title": "ZFS on Linux"
|
||||
}
|
||||
};
|
||||
|
@ -41,9 +41,9 @@ Ext.define('PBS.Utils', {
|
||||
let files = data.count;
|
||||
if (mixed > 0) {
|
||||
return PBS.Utils.cryptmap.indexOf('mixed');
|
||||
} else if (files === encrypted) {
|
||||
} else if (files === encrypted && encrypted > 0) {
|
||||
return PBS.Utils.cryptmap.indexOf('encrypt');
|
||||
} else if (files === signed) {
|
||||
} else if (files === signed && signed > 0) {
|
||||
return PBS.Utils.cryptmap.indexOf('sign-only');
|
||||
} else if ((signed+encrypted) === 0) {
|
||||
return PBS.Utils.cryptmap.indexOf('none');
|
||||
|
@ -11,8 +11,9 @@ Ext.define('pbs-datastore-list', {
|
||||
Ext.define('pbs-data-store-config', {
|
||||
extend: 'Ext.data.Model',
|
||||
fields: [
|
||||
'name', 'path', 'comment', 'gc-schedule', 'prune-schedule', 'keep-last',
|
||||
'keep-hourly', 'keep-daily', 'keep-weekly', 'keep-monthly', 'keep-yearly',
|
||||
'name', 'path', 'comment', 'gc-schedule', 'prune-schedule',
|
||||
'verify-schedule', 'keep-last', 'keep-hourly', 'keep-daily',
|
||||
'keep-weekly', 'keep-monthly', 'keep-yearly',
|
||||
],
|
||||
proxy: {
|
||||
type: 'proxmox',
|
||||
|
@ -4,15 +4,17 @@ Ext.define('PBS.data.CalendarEventExamples', {
|
||||
|
||||
field: ['value', 'text'],
|
||||
data: [
|
||||
//FIXME { value: '*/30', text: Ext.String.format(gettext("Every {0} minutes"), 30) },
|
||||
{ value: '*:0/30', text: Ext.String.format(gettext("Every {0} minutes"), 30) },
|
||||
{ value: 'hourly', text: gettext("Every hour") },
|
||||
//FIXME { value: '*/2:00', text: gettext("Every two hours") },
|
||||
{ value: '0/2:00', text: gettext("Every two hours") },
|
||||
{ value: '2,22:30', text: gettext("Every day") + " 02:30, 22:30" },
|
||||
{ value: 'daily', text: gettext("Every day") + " 00:00" },
|
||||
{ value: 'mon..fri', text: gettext("Monday to Friday") + " 00:00" },
|
||||
//FIXME{ value: 'mon..fri */1:00', text: gettext("Monday to Friday") + ': ' + gettext("hourly") },
|
||||
{ value: 'mon..fri *:00', text: gettext("Monday to Friday") + ', ' + gettext("hourly") },
|
||||
{ value: 'sat 18:15', text: gettext("Every Saturday") + " 18:15" },
|
||||
//FIXME{ value: 'monthly', text: gettext("Every 1st of Month") + " 00:00" }, // not yet possible..
|
||||
{ value: 'monthly', text: gettext("Every first day of the Month") + " 00:00" },
|
||||
{ value: 'sat *-1..7 02:00', text: gettext("Every first Saturday of the month") + " 02:00" },
|
||||
{ value: 'yearly', text: gettext("First day of the year") + " 00:00" },
|
||||
],
|
||||
});
|
||||
|
||||
@ -26,6 +28,8 @@ Ext.define('PBS.form.CalendarEvent', {
|
||||
displayField: 'text',
|
||||
queryMode: 'local',
|
||||
|
||||
matchFieldWidth: false,
|
||||
|
||||
config: {
|
||||
deleteEmpty: true,
|
||||
},
|
||||
|
@ -12,7 +12,11 @@
|
||||
<link rel="stylesheet" type="text/css" href="/fontawesome/css/font-awesome.css" />
|
||||
<link rel="stylesheet" type="text/css" href="/widgettoolkit/css/ext6-pmx.css" />
|
||||
<link rel="stylesheet" type="text/css" href="/css/ext6-pbs.css" />
|
||||
{{#if language}}
|
||||
<script type='text/javascript' src='/locale/pbs-lang-{{ language }}.js'></script>
|
||||
{{else}}
|
||||
<script type='text/javascript'> function gettext(buf) { return buf; } </script>
|
||||
{{/if}}
|
||||
{{#if debug}}
|
||||
<script type="text/javascript" src="/extjs/ext-all-debug.js"></script>
|
||||
<script type="text/javascript" src="/extjs/charts-debug.js"></script>
|
||||
|
@ -3,6 +3,8 @@ Ext.define('PBS.window.ACLEdit', {
|
||||
alias: 'widget.pbsACLAdd',
|
||||
mixins: ['Proxmox.Mixin.CBind'],
|
||||
|
||||
onlineHelp: 'user_acl',
|
||||
|
||||
url: '/access/acl',
|
||||
method: 'PUT',
|
||||
isAdd: true,
|
||||
|
@ -3,6 +3,9 @@ Ext.define('PBS.DataStoreEdit', {
|
||||
alias: 'widget.pbsDataStoreEdit',
|
||||
mixins: ['Proxmox.Mixin.CBind'],
|
||||
|
||||
|
||||
onlineHelp: 'datastore_intro',
|
||||
|
||||
subject: gettext('Datastore'),
|
||||
isAdd: true,
|
||||
|
||||
@ -71,6 +74,15 @@ Ext.define('PBS.DataStoreEdit', {
|
||||
deleteEmpty: '{!isCreate}',
|
||||
},
|
||||
},
|
||||
{
|
||||
xtype: 'pbsCalendarEvent',
|
||||
name: 'verify-schedule',
|
||||
fieldLabel: gettext("Verify Schedule"),
|
||||
emptyText: gettext('none'),
|
||||
cbind: {
|
||||
deleteEmpty: '{!isCreate}',
|
||||
},
|
||||
},
|
||||
],
|
||||
columnB: [
|
||||
{
|
||||
|