Compare commits
66 Commits
Author | SHA1 | Date | |
---|---|---|---|
1ff840ffad | |||
7443a6e092 | |||
3a9988638b | |||
96ee857752 | |||
887018bb79 | |||
9696f5193b | |||
e13c4f66bb | |||
8a25809573 | |||
d87b193b0b | |||
ea5289e869 | |||
1f6a4f587a | |||
705b2293ec | |||
d2c7ef09ba | |||
27f86f997e | |||
fc93d38076 | |||
a5a85d41ff | |||
08cb2038bd | |||
6f711c1737 | |||
42ec9f577f | |||
9de69cdb1a | |||
bd260569d3 | |||
36cb4b30ef | |||
4e717240bf | |||
e9764238df | |||
26f499b17b | |||
cc7995ac40 | |||
43abba4b4f | |||
58f950c546 | |||
c426e65893 | |||
caea8d611f | |||
7d0754a6d2 | |||
5afa0755ea | |||
40b63186a6 | |||
8f6088c130 | |||
2162e2c15d | |||
0d5ab04a90 | |||
4059285649 | |||
2e079b8bf2 | |||
4ff2c9b832 | |||
a8e2940ff3 | |||
d5d5f2174e | |||
2311238450 | |||
2ea501ffdf | |||
4eb4e94918 | |||
817bcda848 | |||
f6de2c7359 | |||
3f0b9c10ec | |||
2b66abbfab | |||
402c8861d8 | |||
3f683799a8 | |||
573bcd9a92 | |||
90779237ae | |||
1f82f9b7b5 | |||
19b5c3c43e | |||
fe3e65c3ea | |||
fdaab0df4e | |||
b957aa81bd | |||
8ea00f6e49 | |||
4bd789b0fa | |||
2f050cf2ed | |||
e22f4882e7 | |||
c65bc99a41 | |||
355c055e81 | |||
c2009e5309 | |||
23f74c190e | |||
a6f8728339 |
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.3.0"
|
version = "0.7.0"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
@ -38,11 +38,11 @@ pam-sys = "0.5"
|
|||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pathpatterns = "0.1.1"
|
pathpatterns = "0.1.1"
|
||||||
proxmox = { version = "0.1.40", features = [ "sortable-macro", "api-macro" ] }
|
proxmox = { version = "0.1.42", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||||
proxmox-fuse = "0.1.0"
|
proxmox-fuse = "0.1.0"
|
||||||
pxar = { version = "0.1.8", features = [ "tokio-io", "futures-io" ] }
|
pxar = { version = "0.2.0", features = [ "tokio-io", "futures-io" ] }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "6"
|
rustyline = "6"
|
||||||
|
2
Makefile
2
Makefile
@ -37,6 +37,8 @@ CARGO ?= cargo
|
|||||||
COMPILED_BINS := \
|
COMPILED_BINS := \
|
||||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
||||||
|
|
||||||
|
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||||
|
|
||||||
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
||||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||||
|
2
TODO.rst
2
TODO.rst
@ -30,8 +30,6 @@ Chores:
|
|||||||
|
|
||||||
* move tools/xattr.rs and tools/acl.rs to proxmox/sys/linux/
|
* move tools/xattr.rs and tools/acl.rs to proxmox/sys/linux/
|
||||||
|
|
||||||
* recompute PXAR_ header types from strings: avoid using numbers from casync
|
|
||||||
|
|
||||||
* remove pbs-* systemd timers and services on package purge
|
* remove pbs-* systemd timers and services on package purge
|
||||||
|
|
||||||
|
|
||||||
|
56
debian/changelog
vendored
56
debian/changelog
vendored
@ -1,6 +1,60 @@
|
|||||||
|
rust-proxmox-backup (0.7.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* implement clone for RemoteChunkReader
|
||||||
|
|
||||||
|
* improve docs
|
||||||
|
|
||||||
|
* client: add --encryption boolen parameter
|
||||||
|
|
||||||
|
* client: use default encryption key if it is available
|
||||||
|
|
||||||
|
* d/rules: do not compress .pdf files
|
||||||
|
|
||||||
|
* ui: various fixes
|
||||||
|
|
||||||
|
* add beta text with link to bugtracker
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 07 Jul 2020 07:40:05 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.6.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* make ReadChunk not require mutable self.
|
||||||
|
|
||||||
|
* ui: increase timeout for snapshot listing
|
||||||
|
|
||||||
|
* ui: consistently spell Datastore without space between words
|
||||||
|
|
||||||
|
* ui: disk create: sync and improve 'add-datastore' checkbox label
|
||||||
|
|
||||||
|
* proxmox-backup-client: add benchmark command
|
||||||
|
|
||||||
|
* pxar: fixup 'vanished-file' logic a bit
|
||||||
|
|
||||||
|
* ui: add verify button
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 03 Jul 2020 09:45:52 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.5.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* partially revert commit 1f82f9b7b5d231da22a541432d5617cb303c0000
|
||||||
|
|
||||||
|
* ui: allow to Forget (delete) backup snapshots
|
||||||
|
|
||||||
|
* pxar: deal with files changing size during archiving
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 29 Jun 2020 13:00:54 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.4.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* change api for incremental backups mode
|
||||||
|
|
||||||
|
* zfs disk management gui
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Jun 2020 10:43:27 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (0.3.0-1) unstable; urgency=medium
|
rust-proxmox-backup (0.3.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
* support incrtemental backups mode
|
* support incremental backups mode
|
||||||
|
|
||||||
* new disk management
|
* new disk management
|
||||||
|
|
||||||
|
3
debian/control.in
vendored
3
debian/control.in
vendored
@ -3,11 +3,14 @@ Architecture: any
|
|||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 6.0.1),
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
|
lvm2,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.2-4),
|
proxmox-widget-toolkit (>= 2.2-4),
|
||||||
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
${shlibs:Depends},
|
${shlibs:Depends},
|
||||||
|
Recommends: zfsutils-linux,
|
||||||
Description: Proxmox Backup Server daemon with tools and GUI
|
Description: Proxmox Backup Server daemon with tools and GUI
|
||||||
This package contains the Proxmox Backup Server daemons and related
|
This package contains the Proxmox Backup Server daemons and related
|
||||||
tools. This includes a web-based graphical user interface.
|
tools. This includes a web-based graphical user interface.
|
||||||
|
3
debian/rules
vendored
3
debian/rules
vendored
@ -45,3 +45,6 @@ override_dh_installsystemd:
|
|||||||
# TODO: remove once available (Debian 11 ?)
|
# TODO: remove once available (Debian 11 ?)
|
||||||
override_dh_dwz:
|
override_dh_dwz:
|
||||||
dh_dwz --no-dwz-multifile
|
dh_dwz --no-dwz-multifile
|
||||||
|
|
||||||
|
override_dh_compress:
|
||||||
|
dh_compress -X.pdf
|
||||||
|
@ -1,11 +1,5 @@
|
|||||||
include ../defines.mk
|
include ../defines.mk
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
|
||||||
COMPILEDIR := ../target/release
|
|
||||||
else
|
|
||||||
COMPILEDIR := ../target/debug
|
|
||||||
endif
|
|
||||||
|
|
||||||
GENERATED_SYNOPSIS := \
|
GENERATED_SYNOPSIS := \
|
||||||
proxmox-backup-client/synopsis.rst \
|
proxmox-backup-client/synopsis.rst \
|
||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
@ -26,6 +20,15 @@ SPHINXOPTS =
|
|||||||
SPHINXBUILD = sphinx-build
|
SPHINXBUILD = sphinx-build
|
||||||
BUILDDIR = output
|
BUILDDIR = output
|
||||||
|
|
||||||
|
ifeq ($(BUILD_MODE), release)
|
||||||
|
COMPILEDIR := ../target/release
|
||||||
|
SPHINXOPTS += -t release
|
||||||
|
else
|
||||||
|
COMPILEDIR := ../target/debug
|
||||||
|
SPHINXOPTS += -t devbuild
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
# Sphinx internal variables.
|
# Sphinx internal variables.
|
||||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
||||||
|
|
||||||
|
11
docs/conf.py
11
docs/conf.py
@ -17,7 +17,7 @@
|
|||||||
# add these directories to sys.path here. If the directory is relative to the
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
#
|
#
|
||||||
# import os
|
import os
|
||||||
# import sys
|
# import sys
|
||||||
# sys.path.insert(0, os.path.abspath('.'))
|
# sys.path.insert(0, os.path.abspath('.'))
|
||||||
|
|
||||||
@ -45,8 +45,11 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
|
|||||||
# Add any Sphinx extension module names here, as strings. They can be
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
# ones.
|
# ones.
|
||||||
|
|
||||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo"]
|
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo"]
|
||||||
|
|
||||||
|
todo_link_only = True
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
templates_path = ['_templates']
|
templates_path = ['_templates']
|
||||||
|
|
||||||
@ -76,9 +79,11 @@ author = 'Proxmox Support Team'
|
|||||||
# built documents.
|
# built documents.
|
||||||
#
|
#
|
||||||
# The short X.Y version.
|
# The short X.Y version.
|
||||||
version = '0.2'
|
vstr = lambda s: '<devbuild>' if s is None else str(s)
|
||||||
|
|
||||||
|
version = vstr(os.getenv('DEB_VERSION_UPSTREAM'))
|
||||||
# The full version, including alpha/beta/rc tags.
|
# The full version, including alpha/beta/rc tags.
|
||||||
release = '0.2-1'
|
release = vstr(os.getenv('DEB_VERSION'))
|
||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
# for a list of supported languages.
|
# for a list of supported languages.
|
||||||
|
@ -1,18 +1,15 @@
|
|||||||
.. Proxmox Backup documentation master file
|
.. Proxmox Backup documentation master file
|
||||||
|
|
||||||
Welcome to Proxmox Backup's documentation!
|
Welcome to the Proxmox Backup documentation!
|
||||||
==========================================
|
============================================
|
||||||
|
|
||||||
Copyright (C) 2019 Proxmox Server Solutions GmbH
|
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||||
|
|
||||||
Permission is granted to copy, distribute and/or modify this document
|
Permission is granted to copy, distribute and/or modify this document under the
|
||||||
under the terms of the GNU Free Documentation License, Version 1.3 or
|
terms of the GNU Free Documentation License, Version 1.3 or any later version
|
||||||
any later version published by the Free Software Foundation; with no
|
published by the Free Software Foundation; with no Invariant Sections, no
|
||||||
Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A
|
Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included
|
||||||
copy of the license is included in the section entitled "GNU Free
|
in the section entitled "GNU Free Documentation License".
|
||||||
Documentation License".
|
|
||||||
|
|
||||||
.. todolist::
|
|
||||||
|
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
@ -37,5 +34,14 @@ Documentation License".
|
|||||||
glossary.rst
|
glossary.rst
|
||||||
GFDL.rst
|
GFDL.rst
|
||||||
|
|
||||||
|
.. only:: html and devbuild
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
:caption: Developer Appendix
|
||||||
|
|
||||||
|
todos.rst
|
||||||
|
|
||||||
|
|
||||||
* :ref:`genindex`
|
* :ref:`genindex`
|
||||||
|
|
||||||
|
@ -1,57 +1,61 @@
|
|||||||
Introduction
|
Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
This documentation is written in :term:`reStructuredText` and formatted with :term:`Sphinx`.
|
This documentation is written in :term:`reStructuredText` and formatted with
|
||||||
|
:term:`Sphinx`.
|
||||||
|
|
||||||
|
|
||||||
What is Proxmox Backup
|
What is Proxmox Backup Server
|
||||||
----------------------
|
-----------------------------
|
||||||
|
|
||||||
Proxmox Backup is an enterprise class client-server backup software,
|
Proxmox Backup Server is an enterprise-class client-server backup software that
|
||||||
specially optimized for the `Proxmox Virtual Environment`_ to backup
|
backups :term:`virtual machine`\ s, :term:`container`\ s, and physical hosts.
|
||||||
:term:`virtual machine`\ s and :term:`container`\ s. It is also
|
It is specially optimized for the `Proxmox Virtual Environment`_ platform and
|
||||||
possible to backup physical hosts.
|
allows you to backup your data securely, even between remote sites, providing
|
||||||
|
easy management with a web-based user interface.
|
||||||
|
|
||||||
It supports deduplication, compression and authenticated encryption
|
Proxmox Backup Server supports deduplication, compression, and authenticated
|
||||||
(AE_). Using :term:`Rust` as implementation language guarantees high
|
encryption (AE_). Using :term:`Rust` as implementation language guarantees high
|
||||||
performance, low resource usage, and a safe, high quality code base.
|
performance, low resource usage, and a safe, high quality code base.
|
||||||
|
|
||||||
Encryption is done at the client side. This makes backups to not fully
|
It features strong encryption done on the client side. Thus, it's possible to
|
||||||
trusted targets possible.
|
backup data to not fully trusted targets.
|
||||||
|
|
||||||
|
|
||||||
Architecture
|
Architecture
|
||||||
------------
|
------------
|
||||||
|
|
||||||
Proxmox Backup uses a `Client-server model`_. The server is
|
Proxmox Backup Server uses a `client-server model`_. The server stores the
|
||||||
responsible to store the backup data and provides an API to create
|
backup data and provides an API to create backups and restore data. With the
|
||||||
backups and restore data. It is possible to manage disks and
|
API it's also possible to manage disks and other server side resources.
|
||||||
other server side resources using this API.
|
|
||||||
|
|
||||||
A backup client uses this API to access the backed up data,
|
The backup client uses this API to access the backed up data. With the command
|
||||||
i.e. ``proxmox-backup-client`` is a command line tool to create
|
line tool ``proxmox-backup-client`` you can create backups and restore data.
|
||||||
backups and restore data. We deliver an integrated client for
|
For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
|
||||||
QEMU_ with `Proxmox Virtual Environment`_.
|
|
||||||
|
|
||||||
A single backup is allowed to contain several archives. For example,
|
A single backup is allowed to contain several archives. For example, when you
|
||||||
when you backup a :term:`virtual machine`, each disk is stored as a
|
backup a :term:`virtual machine`, each disk is stored as a separate archive
|
||||||
separate archive inside that backup. The VM configuration also gets an
|
inside that backup. The VM configuration itself is stored as an extra file.
|
||||||
extra file. This way, it is easy to access and restore important parts
|
This way, it is easy to access and restore only important parts of the backup
|
||||||
of the backup without having to scan the whole backup.
|
without the need to scan the whole backup.
|
||||||
|
|
||||||
|
|
||||||
Main Features
|
Main Features
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
:Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
||||||
supported. You can backup :term:`virtual machine`\ s and
|
supported and you can easily backup :term:`virtual machine`\ s and
|
||||||
:term:`container`\ s.
|
:term:`container`\ s.
|
||||||
|
|
||||||
:GUI: We provide a graphical, web based user interface.
|
:Performance: The whole software stack is written in :term:`Rust`,
|
||||||
|
to provide high speed and memory efficiency.
|
||||||
|
|
||||||
:Deduplication: Incremental backups produce large amounts of duplicate
|
:Deduplication: Periodic backups produce large amounts of duplicate
|
||||||
data. The deduplication layer removes that redundancy and makes
|
data. The deduplication layer avoids redundancy and minimizes the used
|
||||||
incremental backups small and space efficient.
|
storage space.
|
||||||
|
|
||||||
|
:Incremental backups: Changes between backups are typically low. Reading and
|
||||||
|
sending only the delta reduces storage and network impact of backups.
|
||||||
|
|
||||||
:Data Integrity: The built in `SHA-256`_ checksum algorithm assures the
|
:Data Integrity: The built in `SHA-256`_ checksum algorithm assures the
|
||||||
accuracy and consistency of your backups.
|
accuracy and consistency of your backups.
|
||||||
@ -59,43 +63,43 @@ Main Features
|
|||||||
:Remote Sync: It is possible to efficiently synchronize data to remote
|
:Remote Sync: It is possible to efficiently synchronize data to remote
|
||||||
sites. Only deltas containing new data are transferred.
|
sites. Only deltas containing new data are transferred.
|
||||||
|
|
||||||
:Performance: The whole software stack is written in :term:`Rust`,
|
:Compression: The ultra fast Zstandard_ compression is able to compress
|
||||||
to provide high speed and memory efficiency.
|
|
||||||
|
|
||||||
:Compression: Ultra fast Zstandard_ compression is able to compress
|
|
||||||
several gigabytes of data per second.
|
several gigabytes of data per second.
|
||||||
|
|
||||||
:Encryption: Backups can be encrypted client-side using AES-256 in
|
:Encryption: Backups can be encrypted on the client-side using AES-256 in
|
||||||
GCM_ mode. This authenticated encryption mode (AE_) provides very
|
GCM_ mode. This authenticated encryption mode (AE_) provides very
|
||||||
high performance on modern hardware.
|
high performance on modern hardware.
|
||||||
|
|
||||||
:Open Source: No secrets. You have access to all the source code.
|
:Web interface: Manage Proxmox backups with the integrated web-based user
|
||||||
|
interface.
|
||||||
|
|
||||||
:Support: Commercial support options are available from `Proxmox`_.
|
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||||
|
software. The source code is licensed under AGPL, v3.
|
||||||
|
|
||||||
|
:Support: Enterprise support is available from `Proxmox`_.
|
||||||
|
|
||||||
|
|
||||||
Why Backup?
|
Reasons for Data Backup?
|
||||||
-----------
|
------------------------
|
||||||
|
|
||||||
The primary purpose of a backup is to protect against data loss. Data
|
The main purpose of a backup is to protect against data loss. Data loss can be
|
||||||
loss can be caused by faulty hardware, but also by human error.
|
caused by faulty hardware but also by human error.
|
||||||
|
|
||||||
A common mistake is to delete a file or folder which is still
|
A common mistake is to accidentally delete a file or folder which is still
|
||||||
required. Virtualization can amplify this problem. It is now
|
required. Virtualization can even amplify this problem; it easily happens that
|
||||||
easy to delete a whole virtual machine by pressing a single button.
|
a whole virtual machine is deleted by just pressing a single button.
|
||||||
|
|
||||||
Backups can serve as a toolkit for administrators to temporarily
|
For administrators, backups can serve as a useful toolkit for temporarily
|
||||||
store data. For example, it is common practice to perform full backups
|
storing data. For example, it is common practice to perform full backups before
|
||||||
before installing major software updates. If something goes wrong, you
|
installing major software updates. If something goes wrong, you can easily
|
||||||
can restore the previous state.
|
restore the previous state.
|
||||||
|
|
||||||
Another reason for backups are legal requirements. Some data must be
|
Another reason for backups are legal requirements. Some data, especially
|
||||||
kept in a safe place for several years by law, so that it can be accessed if
|
business records, must be kept in a safe place for several years by law, so
|
||||||
required.
|
that they can be accessed if required.
|
||||||
|
|
||||||
Data loss can be very costly as it can severely restrict your
|
In general, data loss is very costly as it can severely damage your business.
|
||||||
business. Therefore, make sure that you perform a backup regularly
|
Therefore, ensure that you perform regular backups and run restore tests.
|
||||||
and run restore tests.
|
|
||||||
|
|
||||||
|
|
||||||
Software Stack
|
Software Stack
|
||||||
@ -107,14 +111,14 @@ Software Stack
|
|||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
|
||||||
Copyright (C) 2019 Proxmox Server Solutions GmbH
|
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||||
|
|
||||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||||
|
|
||||||
Proxmox Backup is free software: you can redistribute it and/or modify
|
Proxmox Backup Server is free and open source software: you can use it,
|
||||||
it under the terms of the GNU Affero General Public License as
|
redistribute it, and/or modify it under the terms of the GNU Affero General
|
||||||
published by the Free Software Foundation, either version 3 of the
|
Public License as published by the Free Software Foundation, either version 3
|
||||||
License, or (at your option) any later version.
|
of the License, or (at your option) any later version.
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful, but
|
This program is distributed in the hope that it will be useful, but
|
||||||
``WITHOUT ANY WARRANTY``; without even the implied warranty of
|
``WITHOUT ANY WARRANTY``; without even the implied warranty of
|
||||||
|
@ -17,7 +17,7 @@ async fn upload_speed() -> Result<usize, Error> {
|
|||||||
|
|
||||||
let backup_time = chrono::Utc::now();
|
let backup_time = chrono::Utc::now();
|
||||||
|
|
||||||
let client = BackupWriter::start(client, datastore, "host", "speedtest", backup_time, false).await?;
|
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||||
|
|
||||||
println!("start upload speed test");
|
println!("start upload speed test");
|
||||||
let res = client.upload_speedtest().await?;
|
let res = client.upload_speedtest().await?;
|
||||||
|
@ -9,6 +9,7 @@ pub mod status;
|
|||||||
pub mod types;
|
pub mod types;
|
||||||
pub mod version;
|
pub mod version;
|
||||||
pub mod pull;
|
pub mod pull;
|
||||||
|
mod helpers;
|
||||||
|
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::Router;
|
use proxmox::api::Router;
|
||||||
|
@ -394,6 +394,90 @@ pub fn status(
|
|||||||
crate::tools::disks::disk_usage(&datastore.base_path())
|
crate::tools::disks::disk_usage(&datastore.base_path())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"backup-time": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Verify backups.
|
||||||
|
///
|
||||||
|
/// This function can verify a single backup snapshot, all backup from a backup group,
|
||||||
|
/// or all backups in the datastore.
|
||||||
|
pub fn verify(
|
||||||
|
store: String,
|
||||||
|
backup_type: Option<String>,
|
||||||
|
backup_id: Option<String>,
|
||||||
|
backup_time: Option<i64>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
let worker_id;
|
||||||
|
|
||||||
|
let mut backup_dir = None;
|
||||||
|
let mut backup_group = None;
|
||||||
|
|
||||||
|
match (backup_type, backup_id, backup_time) {
|
||||||
|
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||||
|
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
|
||||||
|
let dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
backup_dir = Some(dir);
|
||||||
|
}
|
||||||
|
(Some(backup_type), Some(backup_id), None) => {
|
||||||
|
worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||||
|
let group = BackupGroup::new(backup_type, backup_id);
|
||||||
|
backup_group = Some(group);
|
||||||
|
}
|
||||||
|
(None, None, None) => {
|
||||||
|
worker_id = store.clone();
|
||||||
|
}
|
||||||
|
_ => bail!("parameters do not spefify a backup group or snapshot"),
|
||||||
|
}
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
"verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
|
||||||
|
{
|
||||||
|
let success = if let Some(backup_dir) = backup_dir {
|
||||||
|
verify_backup_dir(&datastore, &backup_dir, &worker)?
|
||||||
|
} else if let Some(backup_group) = backup_group {
|
||||||
|
verify_backup_group(&datastore, &backup_group, &worker)?
|
||||||
|
} else {
|
||||||
|
verify_all_backups(&datastore, &worker)?
|
||||||
|
};
|
||||||
|
if !success {
|
||||||
|
bail!("verfication failed - please check the log for details");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(json!(upid_str))
|
||||||
|
}
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! add_common_prune_prameters {
|
macro_rules! add_common_prune_prameters {
|
||||||
( [ $( $list1:tt )* ] ) => {
|
( [ $( $list1:tt )* ] ) => {
|
||||||
@ -1261,6 +1345,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
|||||||
&Router::new()
|
&Router::new()
|
||||||
.upload(&API_METHOD_UPLOAD_BACKUP_LOG)
|
.upload(&API_METHOD_UPLOAD_BACKUP_LOG)
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
"verify",
|
||||||
|
&Router::new()
|
||||||
|
.post(&API_METHOD_VERIFY)
|
||||||
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
const DATASTORE_INFO_ROUTER: Router = Router::new()
|
const DATASTORE_INFO_ROUTER: Router = Router::new()
|
||||||
|
@ -10,7 +10,7 @@ use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironm
|
|||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::schema::*;
|
use proxmox::api::schema::*;
|
||||||
|
|
||||||
use crate::tools::{self, WrappedReaderStream};
|
use crate::tools;
|
||||||
use crate::server::{WorkerTask, H2Service};
|
use crate::server::{WorkerTask, H2Service};
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
@ -199,7 +199,6 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
|||||||
),
|
),
|
||||||
(
|
(
|
||||||
"dynamic_index", &Router::new()
|
"dynamic_index", &Router::new()
|
||||||
.download(&API_METHOD_DYNAMIC_CHUNK_INDEX)
|
|
||||||
.post(&API_METHOD_CREATE_DYNAMIC_INDEX)
|
.post(&API_METHOD_CREATE_DYNAMIC_INDEX)
|
||||||
.put(&API_METHOD_DYNAMIC_APPEND)
|
.put(&API_METHOD_DYNAMIC_APPEND)
|
||||||
),
|
),
|
||||||
@ -222,10 +221,13 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
|||||||
),
|
),
|
||||||
(
|
(
|
||||||
"fixed_index", &Router::new()
|
"fixed_index", &Router::new()
|
||||||
.download(&API_METHOD_FIXED_CHUNK_INDEX)
|
|
||||||
.post(&API_METHOD_CREATE_FIXED_INDEX)
|
.post(&API_METHOD_CREATE_FIXED_INDEX)
|
||||||
.put(&API_METHOD_FIXED_APPEND)
|
.put(&API_METHOD_FIXED_APPEND)
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
"previous", &Router::new()
|
||||||
|
.download(&API_METHOD_DOWNLOAD_PREVIOUS)
|
||||||
|
),
|
||||||
(
|
(
|
||||||
"speedtest", &Router::new()
|
"speedtest", &Router::new()
|
||||||
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
||||||
@ -610,20 +612,17 @@ fn finish_backup (
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
pub const API_METHOD_DYNAMIC_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
|
||||||
&ApiHandler::AsyncHttp(&dynamic_chunk_index),
|
&ApiHandler::AsyncHttp(&download_previous),
|
||||||
&ObjectSchema::new(
|
&ObjectSchema::new(
|
||||||
r###"
|
"Download archive from previous backup.",
|
||||||
Download the dynamic chunk index from the previous backup.
|
|
||||||
Simply returns an empty list if this is the first backup.
|
|
||||||
"### ,
|
|
||||||
&sorted!([
|
&sorted!([
|
||||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
||||||
]),
|
]),
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
fn dynamic_chunk_index(
|
fn download_previous(
|
||||||
_parts: Parts,
|
_parts: Parts,
|
||||||
_req_body: Body,
|
_req_body: Body,
|
||||||
param: Value,
|
param: Value,
|
||||||
@ -636,130 +635,38 @@ fn dynamic_chunk_index(
|
|||||||
|
|
||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||||
|
|
||||||
if !archive_name.ends_with(".didx") {
|
|
||||||
bail!("wrong archive extension: '{}'", archive_name);
|
|
||||||
}
|
|
||||||
|
|
||||||
let empty_response = {
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.body(Body::empty())?
|
|
||||||
};
|
|
||||||
|
|
||||||
let last_backup = match &env.last_backup {
|
let last_backup = match &env.last_backup {
|
||||||
Some(info) => info,
|
Some(info) => info,
|
||||||
None => return Ok(empty_response),
|
None => bail!("no previous backup"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut path = last_backup.backup_dir.relative_path();
|
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
|
||||||
path.push(&archive_name);
|
path.push(&archive_name);
|
||||||
|
|
||||||
let index = match env.datastore.open_dynamic_reader(path) {
|
{
|
||||||
Ok(index) => index,
|
let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
|
||||||
Err(_) => {
|
ArchiveType::FixedIndex => {
|
||||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
let index = env.datastore.open_fixed_reader(&path)?;
|
||||||
return Ok(empty_response);
|
Some(Box::new(index))
|
||||||
|
}
|
||||||
|
ArchiveType::DynamicIndex => {
|
||||||
|
let index = env.datastore.open_dynamic_reader(&path)?;
|
||||||
|
Some(Box::new(index))
|
||||||
|
}
|
||||||
|
_ => { None }
|
||||||
|
};
|
||||||
|
if let Some(index) = index {
|
||||||
|
env.log(format!("register chunks in '{}' from previous backup.", archive_name));
|
||||||
|
|
||||||
|
for pos in 0..index.index_count() {
|
||||||
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
let size = info.range.end - info.range.start;
|
||||||
|
env.register_chunk(info.digest, size as u32)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
|
||||||
|
|
||||||
let count = index.index_count();
|
|
||||||
for pos in 0..count {
|
|
||||||
let info = index.chunk_info(pos)?;
|
|
||||||
let size = info.size() as u32;
|
|
||||||
env.register_chunk(info.digest, size)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let reader = DigestListEncoder::new(Box::new(index));
|
env.log(format!("download '{}' from previous backup.", archive_name));
|
||||||
|
crate::api2::helpers::create_download_response(path).await
|
||||||
let stream = WrappedReaderStream::new(reader);
|
|
||||||
|
|
||||||
// fixme: set size, content type?
|
|
||||||
let response = http::Response::builder()
|
|
||||||
.status(200)
|
|
||||||
.body(Body::wrap_stream(stream))?;
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[sortable]
|
|
||||||
pub const API_METHOD_FIXED_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
|
||||||
&ApiHandler::AsyncHttp(&fixed_chunk_index),
|
|
||||||
&ObjectSchema::new(
|
|
||||||
r###"
|
|
||||||
Download the fixed chunk index from the previous backup.
|
|
||||||
Simply returns an empty list if this is the first backup.
|
|
||||||
"### ,
|
|
||||||
&sorted!([
|
|
||||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
|
||||||
]),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
fn fixed_chunk_index(
|
|
||||||
_parts: Parts,
|
|
||||||
_req_body: Body,
|
|
||||||
param: Value,
|
|
||||||
_info: &ApiMethod,
|
|
||||||
rpcenv: Box<dyn RpcEnvironment>,
|
|
||||||
) -> ApiResponseFuture {
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
|
||||||
|
|
||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
|
||||||
|
|
||||||
if !archive_name.ends_with(".fidx") {
|
|
||||||
bail!("wrong archive extension: '{}'", archive_name);
|
|
||||||
}
|
|
||||||
|
|
||||||
let empty_response = {
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.body(Body::empty())?
|
|
||||||
};
|
|
||||||
|
|
||||||
let last_backup = match &env.last_backup {
|
|
||||||
Some(info) => info,
|
|
||||||
None => return Ok(empty_response),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut path = last_backup.backup_dir.relative_path();
|
|
||||||
path.push(&archive_name);
|
|
||||||
|
|
||||||
let index = match env.datastore.open_fixed_reader(path) {
|
|
||||||
Ok(index) => index,
|
|
||||||
Err(_) => {
|
|
||||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
|
||||||
return Ok(empty_response);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
|
||||||
|
|
||||||
let count = index.index_count();
|
|
||||||
let image_size = index.index_bytes();
|
|
||||||
for pos in 0..count {
|
|
||||||
let digest = index.index_digest(pos).unwrap();
|
|
||||||
// Note: last chunk can be smaller
|
|
||||||
let start = (pos*index.chunk_size) as u64;
|
|
||||||
let mut end = start + index.chunk_size as u64;
|
|
||||||
if end > image_size { end = image_size; }
|
|
||||||
let size = (end - start) as u32;
|
|
||||||
env.register_chunk(*digest, size)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let reader = DigestListEncoder::new(Box::new(index));
|
|
||||||
|
|
||||||
let stream = WrappedReaderStream::new(reader);
|
|
||||||
|
|
||||||
// fixme: set size, content type?
|
|
||||||
let response = http::Response::builder()
|
|
||||||
.status(200)
|
|
||||||
.body(Body::wrap_stream(stream))?;
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}.boxed()
|
}.boxed()
|
||||||
}
|
}
|
||||||
|
23
src/api2/helpers.rs
Normal file
23
src/api2/helpers.rs
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
use anyhow::Error;
|
||||||
|
use futures::*;
|
||||||
|
use hyper::{Body, Response, StatusCode, header};
|
||||||
|
use proxmox::http_err;
|
||||||
|
|
||||||
|
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||||
|
let file = tokio::fs::File::open(path.clone())
|
||||||
|
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
|
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||||
|
|
||||||
|
let body = Body::wrap_stream(payload);
|
||||||
|
|
||||||
|
// fixme: set other headers ?
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
|
.body(body)
|
||||||
|
.unwrap())
|
||||||
|
}
|
@ -26,10 +26,10 @@ pub mod zfs;
|
|||||||
schema: NODE_SCHEMA,
|
schema: NODE_SCHEMA,
|
||||||
},
|
},
|
||||||
skipsmart: {
|
skipsmart: {
|
||||||
description: "Skip smart checks.",
|
description: "Skip smart checks.",
|
||||||
type: bool,
|
type: bool,
|
||||||
optional: true,
|
optional: true,
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
"usage-type": {
|
"usage-type": {
|
||||||
type: DiskUsageType,
|
type: DiskUsageType,
|
||||||
|
@ -17,6 +17,7 @@ use crate::server::{WorkerTask, H2Service};
|
|||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::config::acl::PRIV_DATASTORE_READ;
|
use crate::config::acl::PRIV_DATASTORE_READ;
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
use crate::api2::helpers;
|
||||||
|
|
||||||
mod environment;
|
mod environment;
|
||||||
use environment::*;
|
use environment::*;
|
||||||
@ -187,26 +188,9 @@ fn download_file(
|
|||||||
path.push(env.backup_dir.relative_path());
|
path.push(env.backup_dir.relative_path());
|
||||||
path.push(&file_name);
|
path.push(&file_name);
|
||||||
|
|
||||||
let path2 = path.clone();
|
env.log(format!("download {:?}", path.clone()));
|
||||||
let path3 = path.clone();
|
|
||||||
|
|
||||||
let file = tokio::fs::File::open(path)
|
helpers::create_download_response(path).await
|
||||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
env.log(format!("download {:?}", path3));
|
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
|
||||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
|
||||||
|
|
||||||
let body = Body::wrap_stream(payload);
|
|
||||||
|
|
||||||
// fixme: set other headers ?
|
|
||||||
Ok(Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
|
||||||
.body(body)
|
|
||||||
.unwrap())
|
|
||||||
}.boxed()
|
}.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,6 +198,9 @@ pub use prune::*;
|
|||||||
mod datastore;
|
mod datastore;
|
||||||
pub use datastore::*;
|
pub use datastore::*;
|
||||||
|
|
||||||
|
mod verify;
|
||||||
|
pub use verify::*;
|
||||||
|
|
||||||
mod catalog_shell;
|
mod catalog_shell;
|
||||||
pub use catalog_shell::*;
|
pub use catalog_shell::*;
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
|||||||
Self {
|
Self {
|
||||||
store: Some(store),
|
store: Some(store),
|
||||||
index,
|
index,
|
||||||
read_buffer: Vec::with_capacity(1024*1024),
|
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||||
current_chunk_idx: 0,
|
current_chunk_idx: 0,
|
||||||
current_chunk_digest: [0u8; 32],
|
current_chunk_digest: [0u8; 32],
|
||||||
state: AsyncIndexReaderState::NoData,
|
state: AsyncIndexReaderState::NoData,
|
||||||
@ -44,9 +44,10 @@ impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S, I> AsyncRead for AsyncIndexReader<S, I> where
|
impl<S, I> AsyncRead for AsyncIndexReader<S, I>
|
||||||
S: AsyncReadChunk + Unpin + 'static,
|
where
|
||||||
I: IndexFile + Unpin
|
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||||
|
I: IndexFile + Unpin,
|
||||||
{
|
{
|
||||||
fn poll_read(
|
fn poll_read(
|
||||||
self: Pin<&mut Self>,
|
self: Pin<&mut Self>,
|
||||||
@ -57,7 +58,7 @@ I: IndexFile + Unpin
|
|||||||
loop {
|
loop {
|
||||||
match &mut this.state {
|
match &mut this.state {
|
||||||
AsyncIndexReaderState::NoData => {
|
AsyncIndexReaderState::NoData => {
|
||||||
if this.current_chunk_idx >= this.index.index_count() {
|
if this.current_chunk_idx >= this.index.index_count() {
|
||||||
return Poll::Ready(Ok(0));
|
return Poll::Ready(Ok(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,18 +68,18 @@ I: IndexFile + Unpin
|
|||||||
.ok_or(io_format_err!("could not get digest"))?
|
.ok_or(io_format_err!("could not get digest"))?
|
||||||
.clone();
|
.clone();
|
||||||
|
|
||||||
if digest == this.current_chunk_digest {
|
if digest == this.current_chunk_digest {
|
||||||
this.state = AsyncIndexReaderState::HaveData(0);
|
this.state = AsyncIndexReaderState::HaveData(0);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.current_chunk_digest = digest;
|
this.current_chunk_digest = digest;
|
||||||
|
|
||||||
let mut store = match this.store.take() {
|
let store = match this.store.take() {
|
||||||
Some(store) => store,
|
Some(store) => store,
|
||||||
None => {
|
None => {
|
||||||
return Poll::Ready(Err(io_format_err!("could not find store")));
|
return Poll::Ready(Err(io_format_err!("could not find store")));
|
||||||
},
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let future = async move {
|
let future = async move {
|
||||||
@ -88,7 +89,7 @@ I: IndexFile + Unpin
|
|||||||
};
|
};
|
||||||
|
|
||||||
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
||||||
},
|
}
|
||||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||||
match ready!(future.as_mut().poll(cx)) {
|
match ready!(future.as_mut().poll(cx)) {
|
||||||
Ok((store, mut chunk_data)) => {
|
Ok((store, mut chunk_data)) => {
|
||||||
@ -96,12 +97,12 @@ I: IndexFile + Unpin
|
|||||||
this.read_buffer.append(&mut chunk_data);
|
this.read_buffer.append(&mut chunk_data);
|
||||||
this.state = AsyncIndexReaderState::HaveData(0);
|
this.state = AsyncIndexReaderState::HaveData(0);
|
||||||
this.store = Some(store);
|
this.store = Some(store);
|
||||||
},
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
return Poll::Ready(Err(io_err_other(err)));
|
return Poll::Ready(Err(io_err_other(err)));
|
||||||
},
|
}
|
||||||
};
|
};
|
||||||
},
|
}
|
||||||
AsyncIndexReaderState::HaveData(offset) => {
|
AsyncIndexReaderState::HaveData(offset) => {
|
||||||
let offset = *offset;
|
let offset = *offset;
|
||||||
let len = this.read_buffer.len();
|
let len = this.read_buffer.len();
|
||||||
@ -111,7 +112,7 @@ I: IndexFile + Unpin
|
|||||||
buf.len()
|
buf.len()
|
||||||
};
|
};
|
||||||
|
|
||||||
buf[0..n].copy_from_slice(&this.read_buffer[offset..offset+n]);
|
buf[0..n].copy_from_slice(&this.read_buffer[offset..(offset + n)]);
|
||||||
if offset + n == len {
|
if offset + n == len {
|
||||||
this.state = AsyncIndexReaderState::NoData;
|
this.state = AsyncIndexReaderState::NoData;
|
||||||
this.current_chunk_idx += 1;
|
this.current_chunk_idx += 1;
|
||||||
@ -120,7 +121,7 @@ I: IndexFile + Unpin
|
|||||||
}
|
}
|
||||||
|
|
||||||
return Poll::Ready(Ok(n));
|
return Poll::Ready(Ok(n));
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -141,6 +141,14 @@ impl BackupGroup {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for BackupGroup {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let backup_type = self.backup_type();
|
||||||
|
let id = self.backup_id();
|
||||||
|
write!(f, "{}/{}", backup_type, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl std::str::FromStr for BackupGroup {
|
impl std::str::FromStr for BackupGroup {
|
||||||
type Err = Error;
|
type Err = Error;
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ use std::ops::Range;
|
|||||||
use std::os::unix::io::AsRawFd;
|
use std::os::unix::io::AsRawFd;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::task::{Context, Poll};
|
use std::task::Context;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
@ -13,6 +13,7 @@ use proxmox::tools::io::ReadExt;
|
|||||||
use proxmox::tools::uuid::Uuid;
|
use proxmox::tools::uuid::Uuid;
|
||||||
use proxmox::tools::vec;
|
use proxmox::tools::vec;
|
||||||
use proxmox::tools::mmap::Mmap;
|
use proxmox::tools::mmap::Mmap;
|
||||||
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||||
|
|
||||||
use super::chunk_stat::ChunkStat;
|
use super::chunk_stat::ChunkStat;
|
||||||
use super::chunk_store::ChunkStore;
|
use super::chunk_store::ChunkStore;
|
||||||
@ -123,25 +124,6 @@ impl DynamicIndexReader {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::cast_ptr_alignment)]
|
|
||||||
pub fn chunk_info(&self, pos: usize) -> Result<ChunkReadInfo, Error> {
|
|
||||||
if pos >= self.index.len() {
|
|
||||||
bail!("chunk index out of range");
|
|
||||||
}
|
|
||||||
let start = if pos == 0 {
|
|
||||||
0
|
|
||||||
} else {
|
|
||||||
self.index[pos - 1].end()
|
|
||||||
};
|
|
||||||
|
|
||||||
let end = self.index[pos].end();
|
|
||||||
|
|
||||||
Ok(ChunkReadInfo {
|
|
||||||
range: start..end,
|
|
||||||
digest: self.index[pos].digest.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
#[allow(clippy::cast_ptr_alignment)]
|
#[allow(clippy::cast_ptr_alignment)]
|
||||||
fn chunk_end(&self, pos: usize) -> u64 {
|
fn chunk_end(&self, pos: usize) -> u64 {
|
||||||
@ -159,24 +141,6 @@ impl DynamicIndexReader {
|
|||||||
&self.index[pos].digest
|
&self.index[pos].digest
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute checksum and data size
|
|
||||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
|
||||||
let mut csum = openssl::sha::Sha256::new();
|
|
||||||
for entry in &self.index {
|
|
||||||
csum.update(&entry.end_le.to_ne_bytes());
|
|
||||||
csum.update(&entry.digest);
|
|
||||||
}
|
|
||||||
let csum = csum.finish();
|
|
||||||
|
|
||||||
(
|
|
||||||
csum,
|
|
||||||
self.index
|
|
||||||
.last()
|
|
||||||
.map(|entry| entry.end())
|
|
||||||
.unwrap_or(0)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: can we use std::slice::binary_search with Mmap now?
|
// TODO: can we use std::slice::binary_search with Mmap now?
|
||||||
fn binary_search(
|
fn binary_search(
|
||||||
&self,
|
&self,
|
||||||
@ -224,6 +188,34 @@ impl IndexFile for DynamicIndexReader {
|
|||||||
self.chunk_end(self.index.len() - 1)
|
self.chunk_end(self.index.len() - 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||||
|
let mut csum = openssl::sha::Sha256::new();
|
||||||
|
let mut chunk_end = 0;
|
||||||
|
for pos in 0..self.index_count() {
|
||||||
|
let info = self.chunk_info(pos).unwrap();
|
||||||
|
chunk_end = info.range.end;
|
||||||
|
csum.update(&chunk_end.to_le_bytes());
|
||||||
|
csum.update(&info.digest);
|
||||||
|
}
|
||||||
|
let csum = csum.finish();
|
||||||
|
(csum, chunk_end)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::cast_ptr_alignment)]
|
||||||
|
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||||
|
if pos >= self.index.len() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let start = if pos == 0 { 0 } else { self.index[pos - 1].end() };
|
||||||
|
|
||||||
|
let end = self.index[pos].end();
|
||||||
|
|
||||||
|
Some(ChunkReadInfo {
|
||||||
|
range: start..end,
|
||||||
|
digest: self.index[pos].digest.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CachedChunk {
|
struct CachedChunk {
|
||||||
@ -263,7 +255,10 @@ struct ChunkCacher<'a, S> {
|
|||||||
|
|
||||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
||||||
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
||||||
let info = self.index.chunk_info(index)?;
|
let info = match self.index.chunk_info(index) {
|
||||||
|
Some(info) => info,
|
||||||
|
None => bail!("chunk index out of range"),
|
||||||
|
};
|
||||||
let range = info.range;
|
let range = info.range;
|
||||||
let data = self.store.read_chunk(&info.digest)?;
|
let data = self.store.read_chunk(&info.digest)?;
|
||||||
CachedChunk::new(range, data).map(Some)
|
CachedChunk::new(range, data).map(Some)
|
||||||
@ -416,19 +411,26 @@ impl<R: ReadChunk> LocalDynamicReadAt<R> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<R: ReadChunk> pxar::accessor::ReadAt for LocalDynamicReadAt<R> {
|
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
|
||||||
fn poll_read_at(
|
fn start_read_at<'a>(
|
||||||
self: Pin<&Self>,
|
self: Pin<&'a Self>,
|
||||||
_cx: &mut Context,
|
_cx: &mut Context,
|
||||||
buf: &mut [u8],
|
buf: &'a mut [u8],
|
||||||
offset: u64,
|
offset: u64,
|
||||||
) -> Poll<io::Result<usize>> {
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
tokio::task::block_in_place(move || {
|
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||||
let mut reader = self.inner.lock().unwrap();
|
let mut reader = self.inner.lock().unwrap();
|
||||||
reader.seek(SeekFrom::Start(offset))?;
|
reader.seek(SeekFrom::Start(offset))?;
|
||||||
Poll::Ready(Ok(reader.read(buf)?))
|
Ok(reader.read(buf)?)
|
||||||
})
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_complete<'a>(
|
||||||
|
self: Pin<&'a Self>,
|
||||||
|
_op: ReadAtOperation<'a>,
|
||||||
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||||
|
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::convert::TryInto;
|
|
||||||
use std::io::{Seek, SeekFrom};
|
use std::io::{Seek, SeekFrom};
|
||||||
|
|
||||||
use super::chunk_stat::*;
|
use super::chunk_stat::*;
|
||||||
use super::chunk_store::*;
|
use super::chunk_store::*;
|
||||||
use super::IndexFile;
|
use super::{IndexFile, ChunkReadInfo};
|
||||||
use crate::tools::{self, epoch_now_u64};
|
use crate::tools::{self, epoch_now_u64};
|
||||||
|
|
||||||
use chrono::{Local, TimeZone};
|
use chrono::{Local, TimeZone};
|
||||||
@ -147,38 +146,6 @@ impl FixedIndexReader {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
|
||||||
if pos >= self.index_length {
|
|
||||||
bail!("chunk index out of range");
|
|
||||||
}
|
|
||||||
let start = (pos * self.chunk_size) as u64;
|
|
||||||
let mut end = start + self.chunk_size as u64;
|
|
||||||
|
|
||||||
if end > self.size {
|
|
||||||
end = self.size;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
|
||||||
unsafe {
|
|
||||||
std::ptr::copy_nonoverlapping(
|
|
||||||
self.index.add(pos * 32),
|
|
||||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
|
||||||
32,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((start, end, unsafe { digest.assume_init() }))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
|
||||||
if pos >= self.index_length {
|
|
||||||
panic!("chunk index out of range");
|
|
||||||
}
|
|
||||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 32), 32) };
|
|
||||||
slice.try_into().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn chunk_end(&self, pos: usize) -> u64 {
|
fn chunk_end(&self, pos: usize) -> u64 {
|
||||||
if pos >= self.index_length {
|
if pos >= self.index_length {
|
||||||
@ -193,20 +160,6 @@ impl FixedIndexReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute checksum and data size
|
|
||||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
|
||||||
let mut csum = openssl::sha::Sha256::new();
|
|
||||||
let mut chunk_end = 0;
|
|
||||||
for pos in 0..self.index_length {
|
|
||||||
chunk_end = self.chunk_end(pos);
|
|
||||||
let digest = self.chunk_digest(pos);
|
|
||||||
csum.update(digest);
|
|
||||||
}
|
|
||||||
let csum = csum.finish();
|
|
||||||
|
|
||||||
(csum, chunk_end)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_info(&self) {
|
pub fn print_info(&self) {
|
||||||
println!("Size: {}", self.size);
|
println!("Size: {}", self.size);
|
||||||
println!("ChunkSize: {}", self.chunk_size);
|
println!("ChunkSize: {}", self.chunk_size);
|
||||||
@ -234,6 +187,38 @@ impl IndexFile for FixedIndexReader {
|
|||||||
fn index_bytes(&self) -> u64 {
|
fn index_bytes(&self) -> u64 {
|
||||||
self.size
|
self.size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||||
|
if pos >= self.index_length {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = (pos * self.chunk_size) as u64;
|
||||||
|
let mut end = start + self.chunk_size as u64;
|
||||||
|
|
||||||
|
if end > self.size {
|
||||||
|
end = self.size;
|
||||||
|
}
|
||||||
|
|
||||||
|
let digest = self.index_digest(pos).unwrap();
|
||||||
|
Some(ChunkReadInfo {
|
||||||
|
range: start..end,
|
||||||
|
digest: *digest,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||||
|
let mut csum = openssl::sha::Sha256::new();
|
||||||
|
let mut chunk_end = 0;
|
||||||
|
for pos in 0..self.index_count() {
|
||||||
|
let info = self.chunk_info(pos).unwrap();
|
||||||
|
chunk_end = info.range.end;
|
||||||
|
csum.update(&info.digest);
|
||||||
|
}
|
||||||
|
let csum = csum.finish();
|
||||||
|
|
||||||
|
(csum, chunk_end)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct FixedIndexWriter {
|
pub struct FixedIndexWriter {
|
||||||
@ -511,18 +496,17 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
|||||||
|
|
||||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||||
let index = &self.index;
|
let index = &self.index;
|
||||||
let (start, end, digest) = index.chunk_info(idx)?;
|
let info = match index.chunk_info(idx) {
|
||||||
|
Some(info) => info,
|
||||||
|
None => bail!("chunk index out of range"),
|
||||||
|
};
|
||||||
|
|
||||||
// fixme: avoid copy
|
// fixme: avoid copy
|
||||||
|
|
||||||
let data = self.store.read_chunk(&digest)?;
|
let data = self.store.read_chunk(&info.digest)?;
|
||||||
|
let size = info.range.end - info.range.start;
|
||||||
if (end - start) != data.len() as u64 {
|
if size != data.len() as u64 {
|
||||||
bail!(
|
bail!("read chunk with wrong size ({} != {}", size, data.len());
|
||||||
"read chunk with wrong size ({} != {}",
|
|
||||||
(end - start),
|
|
||||||
data.len()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self.read_buffer.clear();
|
self.read_buffer.clear();
|
||||||
@ -530,8 +514,7 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
|||||||
|
|
||||||
self.buffered_chunk_idx = idx;
|
self.buffered_chunk_idx = idx;
|
||||||
|
|
||||||
self.buffered_chunk_start = start as u64;
|
self.buffered_chunk_start = info.range.start as u64;
|
||||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,11 +1,5 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::pin::Pin;
|
|
||||||
use std::task::{Context, Poll};
|
|
||||||
|
|
||||||
use bytes::{Bytes, BytesMut};
|
|
||||||
use anyhow::{format_err, Error};
|
|
||||||
use futures::*;
|
|
||||||
|
|
||||||
pub struct ChunkReadInfo {
|
pub struct ChunkReadInfo {
|
||||||
pub range: Range<u64>,
|
pub range: Range<u64>,
|
||||||
@ -26,6 +20,10 @@ pub trait IndexFile {
|
|||||||
fn index_count(&self) -> usize;
|
fn index_count(&self) -> usize;
|
||||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
||||||
fn index_bytes(&self) -> u64;
|
fn index_bytes(&self) -> u64;
|
||||||
|
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||||
|
|
||||||
|
/// Compute index checksum and size
|
||||||
|
fn compute_csum(&self) -> ([u8; 32], u64);
|
||||||
|
|
||||||
/// Returns most often used chunks
|
/// Returns most often used chunks
|
||||||
fn find_most_used_chunks(&self, max: usize) -> HashMap<[u8; 32], usize> {
|
fn find_most_used_chunks(&self, max: usize) -> HashMap<[u8; 32], usize> {
|
||||||
@ -59,111 +57,3 @@ pub trait IndexFile {
|
|||||||
map
|
map
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Encode digest list from an `IndexFile` into a binary stream
|
|
||||||
///
|
|
||||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
|
||||||
pub struct DigestListEncoder {
|
|
||||||
index: Box<dyn IndexFile + Send + Sync>,
|
|
||||||
pos: usize,
|
|
||||||
count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DigestListEncoder {
|
|
||||||
|
|
||||||
pub fn new(index: Box<dyn IndexFile + Send + Sync>) -> Self {
|
|
||||||
let count = index.index_count();
|
|
||||||
Self { index, pos: 0, count }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::io::Read for DigestListEncoder {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
|
||||||
if buf.len() < 32 {
|
|
||||||
panic!("read buffer too small");
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.pos < self.count {
|
|
||||||
let mut written = 0;
|
|
||||||
loop {
|
|
||||||
let digest = self.index.index_digest(self.pos).unwrap();
|
|
||||||
buf[written..(written + 32)].copy_from_slice(digest);
|
|
||||||
self.pos += 1;
|
|
||||||
written += 32;
|
|
||||||
if self.pos >= self.count {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (written + 32) >= buf.len() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(written)
|
|
||||||
} else {
|
|
||||||
Ok(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decodes a Stream<Item=Bytes> into Stream<Item=<[u8;32]>
|
|
||||||
///
|
|
||||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
|
||||||
|
|
||||||
pub struct DigestListDecoder<S: Unpin> {
|
|
||||||
input: S,
|
|
||||||
buffer: BytesMut,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Unpin> DigestListDecoder<S> {
|
|
||||||
pub fn new(input: S) -> Self {
|
|
||||||
Self { input, buffer: BytesMut::new() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Unpin> Unpin for DigestListDecoder<S> {}
|
|
||||||
|
|
||||||
impl<S: Unpin, E> Stream for DigestListDecoder<S>
|
|
||||||
where
|
|
||||||
S: Stream<Item=Result<Bytes, E>>,
|
|
||||||
E: Into<Error>,
|
|
||||||
{
|
|
||||||
type Item = Result<[u8; 32], Error>;
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
|
||||||
let this = self.get_mut();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
if this.buffer.len() >= 32 {
|
|
||||||
let left = this.buffer.split_to(32);
|
|
||||||
|
|
||||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
|
||||||
unsafe {
|
|
||||||
(*digest.as_mut_ptr()).copy_from_slice(&left[..]);
|
|
||||||
return Poll::Ready(Some(Ok(digest.assume_init())));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match Pin::new(&mut this.input).poll_next(cx) {
|
|
||||||
Poll::Pending => {
|
|
||||||
return Poll::Pending;
|
|
||||||
}
|
|
||||||
Poll::Ready(Some(Err(err))) => {
|
|
||||||
return Poll::Ready(Some(Err(err.into())));
|
|
||||||
}
|
|
||||||
Poll::Ready(Some(Ok(data))) => {
|
|
||||||
this.buffer.extend_from_slice(&data);
|
|
||||||
// continue
|
|
||||||
}
|
|
||||||
Poll::Ready(None) => {
|
|
||||||
let rest = this.buffer.len();
|
|
||||||
if rest == 0 {
|
|
||||||
return Poll::Ready(None);
|
|
||||||
}
|
|
||||||
return Poll::Ready(Some(Err(format_err!(
|
|
||||||
"got small digest ({} != 32).",
|
|
||||||
rest,
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -11,10 +11,10 @@ use super::datastore::DataStore;
|
|||||||
/// The ReadChunk trait allows reading backup data chunks (local or remote)
|
/// The ReadChunk trait allows reading backup data chunks (local or remote)
|
||||||
pub trait ReadChunk {
|
pub trait ReadChunk {
|
||||||
/// Returns the encoded chunk data
|
/// Returns the encoded chunk data
|
||||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error>;
|
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error>;
|
||||||
|
|
||||||
/// Returns the decoded chunk data
|
/// Returns the decoded chunk data
|
||||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error>;
|
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@ -33,7 +33,7 @@ impl LocalChunkReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ReadChunk for LocalChunkReader {
|
impl ReadChunk for LocalChunkReader {
|
||||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
let (path, _) = self.store.chunk_path(digest);
|
let (path, _) = self.store.chunk_path(digest);
|
||||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||||
let chunk = DataBlob::from_raw(raw_data)?;
|
let chunk = DataBlob::from_raw(raw_data)?;
|
||||||
@ -42,7 +42,7 @@ impl ReadChunk for LocalChunkReader {
|
|||||||
Ok(chunk)
|
Ok(chunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||||
|
|
||||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
@ -56,20 +56,20 @@ impl ReadChunk for LocalChunkReader {
|
|||||||
pub trait AsyncReadChunk: Send {
|
pub trait AsyncReadChunk: Send {
|
||||||
/// Returns the encoded chunk data
|
/// Returns the encoded chunk data
|
||||||
fn read_raw_chunk<'a>(
|
fn read_raw_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>>;
|
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>>;
|
||||||
|
|
||||||
/// Returns the decoded chunk data
|
/// Returns the decoded chunk data
|
||||||
fn read_chunk<'a>(
|
fn read_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>>;
|
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsyncReadChunk for LocalChunkReader {
|
impl AsyncReadChunk for LocalChunkReader {
|
||||||
fn read_raw_chunk<'a>(
|
fn read_raw_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||||
Box::pin(async move{
|
Box::pin(async move{
|
||||||
@ -84,7 +84,7 @@ impl AsyncReadChunk for LocalChunkReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn read_chunk<'a>(
|
fn read_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
|
196
src/backup/verify.rs
Normal file
196
src/backup/verify.rs
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
|
use super::{
|
||||||
|
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||||
|
ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0,
|
||||||
|
FileInfo, ArchiveType, archive_type,
|
||||||
|
};
|
||||||
|
|
||||||
|
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?;
|
||||||
|
|
||||||
|
let csum = openssl::sha::sha256(blob.raw_data());
|
||||||
|
if raw_size != info.size {
|
||||||
|
bail!("wrong size ({} != {})", info.size, raw_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if csum != info.csum {
|
||||||
|
bail!("wrong index checksum");
|
||||||
|
}
|
||||||
|
|
||||||
|
blob.verify_crc()?;
|
||||||
|
|
||||||
|
let magic = blob.magic();
|
||||||
|
|
||||||
|
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
blob.decode(None)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_index_chunks(
|
||||||
|
datastore: &DataStore,
|
||||||
|
index: Box<dyn IndexFile>,
|
||||||
|
worker: &WorkerTask,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
for pos in 0..index.index_count() {
|
||||||
|
|
||||||
|
worker.fail_on_abort()?;
|
||||||
|
|
||||||
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
let size = info.range.end - info.range.start;
|
||||||
|
datastore.verify_stored_chunk(&info.digest, size)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let mut path = backup_dir.relative_path();
|
||||||
|
path.push(&info.filename);
|
||||||
|
|
||||||
|
let index = datastore.open_fixed_reader(&path)?;
|
||||||
|
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
if size != info.size {
|
||||||
|
bail!("wrong size ({} != {})", info.size, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if csum != info.csum {
|
||||||
|
bail!("wrong index checksum");
|
||||||
|
}
|
||||||
|
|
||||||
|
verify_index_chunks(datastore, Box::new(index), worker)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||||
|
let mut path = backup_dir.relative_path();
|
||||||
|
path.push(&info.filename);
|
||||||
|
|
||||||
|
let index = datastore.open_dynamic_reader(&path)?;
|
||||||
|
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
if size != info.size {
|
||||||
|
bail!("wrong size ({} != {})", info.size, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if csum != info.csum {
|
||||||
|
bail!("wrong index checksum");
|
||||||
|
}
|
||||||
|
|
||||||
|
verify_index_chunks(datastore, Box::new(index), worker)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify a single backup snapshot
|
||||||
|
///
|
||||||
|
/// This checks all archives inside a backup snapshot.
|
||||||
|
/// Errors are logged to the worker log.
|
||||||
|
///
|
||||||
|
/// Returns
|
||||||
|
/// - Ok(true) if verify is successful
|
||||||
|
/// - Ok(false) if there were verification errors
|
||||||
|
/// - Err(_) if task was aborted
|
||||||
|
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
||||||
|
|
||||||
|
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||||
|
Ok((manifest, _)) => manifest,
|
||||||
|
Err(err) => {
|
||||||
|
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
worker.log(format!("verify {}:{}", datastore.name(), backup_dir));
|
||||||
|
|
||||||
|
let mut error_count = 0;
|
||||||
|
|
||||||
|
for info in manifest.files() {
|
||||||
|
let result = proxmox::try_block!({
|
||||||
|
worker.log(format!(" check {}", info.filename));
|
||||||
|
match archive_type(&info.filename)? {
|
||||||
|
ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info, worker),
|
||||||
|
ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info, worker),
|
||||||
|
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
worker.fail_on_abort()?;
|
||||||
|
|
||||||
|
if let Err(err) = result {
|
||||||
|
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||||
|
error_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(error_count == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify all backups inside a backup group
|
||||||
|
///
|
||||||
|
/// Errors are logged to the worker log.
|
||||||
|
///
|
||||||
|
/// Returns
|
||||||
|
/// - Ok(true) if verify is successful
|
||||||
|
/// - Ok(false) if there were verification errors
|
||||||
|
/// - Err(_) if task was aborted
|
||||||
|
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> {
|
||||||
|
|
||||||
|
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||||
|
Ok(list) => list,
|
||||||
|
Err(err) => {
|
||||||
|
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||||
|
|
||||||
|
let mut error_count = 0;
|
||||||
|
|
||||||
|
BackupInfo::sort_list(&mut list, false); // newest first
|
||||||
|
for info in list {
|
||||||
|
if !verify_backup_dir(datastore, &info.backup_dir, worker)? {
|
||||||
|
error_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(error_count == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify all backups inside a datastore
|
||||||
|
///
|
||||||
|
/// Errors are logged to the worker log.
|
||||||
|
///
|
||||||
|
/// Returns
|
||||||
|
/// - Ok(true) if verify is successful
|
||||||
|
/// - Ok(false) if there were verification errors
|
||||||
|
/// - Err(_) if task was aborted
|
||||||
|
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> {
|
||||||
|
|
||||||
|
let list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||||
|
Ok(list) => list,
|
||||||
|
Err(err) => {
|
||||||
|
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
worker.log(format!("verify datastore {}", datastore.name()));
|
||||||
|
|
||||||
|
let mut error_count = 0;
|
||||||
|
for group in list {
|
||||||
|
if !verify_backup_group(datastore, &group, worker)? {
|
||||||
|
error_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(error_count == 0)
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -319,6 +319,40 @@ async fn pull_datastore(
|
|||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"store": {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Verify backups
|
||||||
|
async fn verify(
|
||||||
|
store: String,
|
||||||
|
param: Value,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
let mut client = connect()?;
|
||||||
|
|
||||||
|
let args = json!({});
|
||||||
|
|
||||||
|
let path = format!("api2/json/admin/datastore/{}/verify", store);
|
||||||
|
|
||||||
|
let result = client.post(&path, Some(args)).await?;
|
||||||
|
|
||||||
|
view_task_result(client, result, &output_format).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
|
||||||
proxmox_backup::tools::setup_safe_path_env();
|
proxmox_backup::tools::setup_safe_path_env();
|
||||||
@ -342,8 +376,16 @@ fn main() {
|
|||||||
.completion_cb("local-store", config::datastore::complete_datastore_name)
|
.completion_cb("local-store", config::datastore::complete_datastore_name)
|
||||||
.completion_cb("remote", config::remote::complete_remote_name)
|
.completion_cb("remote", config::remote::complete_remote_name)
|
||||||
.completion_cb("remote-store", complete_remote_datastore_name)
|
.completion_cb("remote-store", complete_remote_datastore_name)
|
||||||
|
)
|
||||||
|
.insert(
|
||||||
|
"verify",
|
||||||
|
CliCommand::new(&API_METHOD_VERIFY)
|
||||||
|
.arg_param(&["store"])
|
||||||
|
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
let mut rpcenv = CliEnvironment::new();
|
let mut rpcenv = CliEnvironment::new();
|
||||||
rpcenv.set_user(Some(String::from("root@pam")));
|
rpcenv.set_user(Some(String::from("root@pam")));
|
||||||
|
|
||||||
|
81
src/bin/proxmox_backup_client/benchmark.rs
Normal file
81
src/bin/proxmox_backup_client/benchmark.rs
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use chrono::{TimeZone, Utc};
|
||||||
|
|
||||||
|
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
use proxmox_backup::backup::{
|
||||||
|
load_and_decrypt_key,
|
||||||
|
CryptConfig,
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
record_repository,
|
||||||
|
connect,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
keyfile: {
|
||||||
|
schema: KEYFILE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Run benchmark tests
|
||||||
|
pub async fn benchmark(
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
|
||||||
|
let crypt_config = match keyfile {
|
||||||
|
None => None,
|
||||||
|
Some(path) => {
|
||||||
|
let (key, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
|
||||||
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
|
Some(Arc::new(crypt_config))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
||||||
|
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
let client = BackupWriter::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
"host",
|
||||||
|
"benshmark",
|
||||||
|
backup_time,
|
||||||
|
false,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
println!("Start upload speed test");
|
||||||
|
let speed = client.upload_speedtest().await?;
|
||||||
|
|
||||||
|
println!("Upload speed: {} MiB/s", speed);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
245
src/bin/proxmox_backup_client/catalog.rs
Normal file
245
src/bin/proxmox_backup_client/catalog.rs
Normal file
@ -0,0 +1,245 @@
|
|||||||
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
|
use std::io::{Seek, SeekFrom};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::{api, cli::*};
|
||||||
|
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
record_repository,
|
||||||
|
load_and_decrypt_key,
|
||||||
|
api_datastore_latest_snapshot,
|
||||||
|
complete_repository,
|
||||||
|
complete_backup_snapshot,
|
||||||
|
complete_group_or_snapshot,
|
||||||
|
complete_pxar_archive_name,
|
||||||
|
connect,
|
||||||
|
BackupDir,
|
||||||
|
BackupGroup,
|
||||||
|
BufferedDynamicReader,
|
||||||
|
BufferedDynamicReadAt,
|
||||||
|
CatalogReader,
|
||||||
|
CATALOG_NAME,
|
||||||
|
CryptConfig,
|
||||||
|
DynamicIndexReader,
|
||||||
|
IndexFile,
|
||||||
|
Shell,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::key::get_encryption_key_password;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
snapshot: {
|
||||||
|
type: String,
|
||||||
|
description: "Snapshot path.",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Dump catalog.
|
||||||
|
async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
|
||||||
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
|
||||||
|
let crypt_config = match keyfile {
|
||||||
|
None => None,
|
||||||
|
Some(path) => {
|
||||||
|
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&snapshot.group().backup_type(),
|
||||||
|
&snapshot.group().backup_id(),
|
||||||
|
snapshot.backup_time(),
|
||||||
|
true,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
let manifest = client.download_manifest().await?;
|
||||||
|
|
||||||
|
let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
|
||||||
|
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||||
|
|
||||||
|
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
|
||||||
|
let mut catalogfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
std::io::copy(&mut reader, &mut catalogfile)
|
||||||
|
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
||||||
|
|
||||||
|
catalogfile.seek(SeekFrom::Start(0))?;
|
||||||
|
|
||||||
|
let mut catalog_reader = CatalogReader::new(catalogfile);
|
||||||
|
|
||||||
|
catalog_reader.dump()?;
|
||||||
|
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"snapshot": {
|
||||||
|
type: String,
|
||||||
|
description: "Group/Snapshot path.",
|
||||||
|
},
|
||||||
|
"archive-name": {
|
||||||
|
type: String,
|
||||||
|
description: "Backup archive name.",
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
optional: true,
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
},
|
||||||
|
"keyfile": {
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
description: "Path to encryption key.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Shell to interactively inspect and restore snapshots.
|
||||||
|
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||||
|
|
||||||
|
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||||
|
let group: BackupGroup = path.parse()?;
|
||||||
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
|
} else {
|
||||||
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||||
|
};
|
||||||
|
|
||||||
|
let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
|
||||||
|
let crypt_config = match keyfile {
|
||||||
|
None => None,
|
||||||
|
Some(path) => {
|
||||||
|
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||||
|
format!("{}.didx", archive_name)
|
||||||
|
} else {
|
||||||
|
bail!("Can only mount pxar archives.");
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&backup_type,
|
||||||
|
&backup_id,
|
||||||
|
backup_time,
|
||||||
|
true,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
let manifest = client.download_manifest().await?;
|
||||||
|
|
||||||
|
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let archive_size = reader.archive_size();
|
||||||
|
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||||
|
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||||
|
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||||
|
|
||||||
|
client.download(CATALOG_NAME, &mut tmpfile).await?;
|
||||||
|
let index = DynamicIndexReader::new(tmpfile)
|
||||||
|
.map_err(|err| format_err!("unable to read catalog index - {}", err))?;
|
||||||
|
|
||||||
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(CATALOG_NAME, &csum, size)?;
|
||||||
|
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||||
|
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let mut catalogfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
std::io::copy(&mut reader, &mut catalogfile)
|
||||||
|
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
||||||
|
|
||||||
|
catalogfile.seek(SeekFrom::Start(0))?;
|
||||||
|
let catalog_reader = CatalogReader::new(catalogfile);
|
||||||
|
let state = Shell::new(
|
||||||
|
catalog_reader,
|
||||||
|
&server_archive_name,
|
||||||
|
decoder,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
println!("Starting interactive shell");
|
||||||
|
state.shell().await?;
|
||||||
|
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn catalog_mgmt_cli() -> CliCommandMap {
|
||||||
|
let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
|
||||||
|
.arg_param(&["snapshot", "archive-name"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||||
|
.completion_cb("snapshot", complete_group_or_snapshot);
|
||||||
|
|
||||||
|
let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
|
||||||
|
.arg_param(&["snapshot"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("snapshot", complete_backup_snapshot);
|
||||||
|
|
||||||
|
CliCommandMap::new()
|
||||||
|
.insert("dump", catalog_dump_cmd_def)
|
||||||
|
.insert("shell", catalog_shell_cmd_def)
|
||||||
|
}
|
277
src/bin/proxmox_backup_client/key.rs
Normal file
277
src/bin/proxmox_backup_client/key.rs
Normal file
@ -0,0 +1,277 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use chrono::{Local, TimeZone};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::cli::{CliCommand, CliCommandMap};
|
||||||
|
use proxmox::sys::linux::tty;
|
||||||
|
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||||
|
|
||||||
|
use proxmox_backup::backup::{
|
||||||
|
encrypt_key_with_passphrase, load_and_decrypt_key, store_key_config, KeyConfig,
|
||||||
|
};
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
pub fn master_pubkey_path() -> Result<PathBuf, Error> {
|
||||||
|
let base = BaseDirectories::with_prefix("proxmox-backup")?;
|
||||||
|
|
||||||
|
// usually $HOME/.config/proxmox-backup/master-public.pem
|
||||||
|
let path = base.place_config_file("master-public.pem")?;
|
||||||
|
|
||||||
|
Ok(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn default_encryption_key_path() -> Result<PathBuf, Error> {
|
||||||
|
let base = BaseDirectories::with_prefix("proxmox-backup")?;
|
||||||
|
|
||||||
|
// usually $HOME/.config/proxmox-backup/encryption-key.json
|
||||||
|
let path = base.place_config_file("encryption-key.json")?;
|
||||||
|
|
||||||
|
Ok(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||||
|
// fixme: implement other input methods
|
||||||
|
|
||||||
|
use std::env::VarError::*;
|
||||||
|
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
|
||||||
|
Ok(p) => return Ok(p.as_bytes().to_vec()),
|
||||||
|
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
|
||||||
|
Err(NotPresent) => {
|
||||||
|
// Try another method
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're on a TTY, query the user for a password
|
||||||
|
if tty::stdin_isatty() {
|
||||||
|
return Ok(tty::read_password("Encryption Key Password: ")?);
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("no password input mechanism available");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience helper to get the default key file path only if it exists.
|
||||||
|
pub fn optional_default_key_path() -> Result<Option<PathBuf>, Error> {
|
||||||
|
let path = default_encryption_key_path()?;
|
||||||
|
Ok(if path.exists() {
|
||||||
|
Some(path)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
default: "scrypt",
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Key derivation function for password protected encryption keys.
|
||||||
|
pub enum Kdf {
|
||||||
|
/// Do not encrypt the key.
|
||||||
|
None,
|
||||||
|
|
||||||
|
/// Encrypt they key with a password using SCrypt.
|
||||||
|
Scrypt,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Kdf {
|
||||||
|
#[inline]
|
||||||
|
fn default() -> Self {
|
||||||
|
Kdf::Scrypt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
description:
|
||||||
|
"Output file. Without this the key will become the new default encryption key.",
|
||||||
|
optional: true,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new encryption key.
|
||||||
|
fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||||
|
let path = match path {
|
||||||
|
Some(path) => PathBuf::from(path),
|
||||||
|
None => default_encryption_key_path()?,
|
||||||
|
};
|
||||||
|
|
||||||
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
|
let key = proxmox::sys::linux::random_data(32)?;
|
||||||
|
|
||||||
|
match kdf {
|
||||||
|
Kdf::None => {
|
||||||
|
let created = Local.timestamp(Local::now().timestamp(), 0);
|
||||||
|
|
||||||
|
store_key_config(
|
||||||
|
&path,
|
||||||
|
false,
|
||||||
|
KeyConfig {
|
||||||
|
kdf: None,
|
||||||
|
created,
|
||||||
|
modified: created,
|
||||||
|
data: key,
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
Kdf::Scrypt => {
|
||||||
|
// always read passphrase from tty
|
||||||
|
if !tty::stdin_isatty() {
|
||||||
|
bail!("unable to read passphrase - no tty");
|
||||||
|
}
|
||||||
|
|
||||||
|
let password = tty::read_and_verify_password("Encryption Key Password: ")?;
|
||||||
|
|
||||||
|
let key_config = encrypt_key_with_passphrase(&key, &password)?;
|
||||||
|
|
||||||
|
store_key_config(&path, false, key_config)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
description: "Key file. Without this the default key's password will be changed.",
|
||||||
|
optional: true,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Change the encryption key's password.
|
||||||
|
fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||||
|
let path = match path {
|
||||||
|
Some(path) => PathBuf::from(path),
|
||||||
|
None => default_encryption_key_path()?,
|
||||||
|
};
|
||||||
|
|
||||||
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
|
if !tty::stdin_isatty() {
|
||||||
|
bail!("unable to change passphrase - no tty");
|
||||||
|
}
|
||||||
|
|
||||||
|
let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
|
||||||
|
match kdf {
|
||||||
|
Kdf::None => {
|
||||||
|
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||||
|
|
||||||
|
store_key_config(
|
||||||
|
&path,
|
||||||
|
true,
|
||||||
|
KeyConfig {
|
||||||
|
kdf: None,
|
||||||
|
created, // keep original value
|
||||||
|
modified,
|
||||||
|
data: key.to_vec(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
Kdf::Scrypt => {
|
||||||
|
let password = tty::read_and_verify_password("New Password: ")?;
|
||||||
|
|
||||||
|
let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
|
||||||
|
new_key_config.created = created; // keep original value
|
||||||
|
|
||||||
|
store_key_config(&path, true, new_key_config)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
path: {
|
||||||
|
description: "Path to the PEM formatted RSA public key.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Import an RSA public key used to put an encrypted version of the symmetric backup encryption
|
||||||
|
/// key onto the backup server along with each backup.
|
||||||
|
fn import_master_pubkey(path: String) -> Result<(), Error> {
|
||||||
|
let pem_data = file_get_contents(&path)?;
|
||||||
|
|
||||||
|
if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
|
||||||
|
bail!("Unable to decode PEM data - {}", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
let target_path = master_pubkey_path()?;
|
||||||
|
|
||||||
|
replace_file(&target_path, &pem_data, CreateOptions::new())?;
|
||||||
|
|
||||||
|
println!("Imported public master key to {:?}", target_path);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
/// Create an RSA public/private key pair used to put an encrypted version of the symmetric backup
|
||||||
|
/// encryption key onto the backup server along with each backup.
|
||||||
|
fn create_master_key() -> Result<(), Error> {
|
||||||
|
// we need a TTY to query the new password
|
||||||
|
if !tty::stdin_isatty() {
|
||||||
|
bail!("unable to create master key - no tty");
|
||||||
|
}
|
||||||
|
|
||||||
|
let rsa = openssl::rsa::Rsa::generate(4096)?;
|
||||||
|
let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
|
||||||
|
|
||||||
|
let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
|
||||||
|
|
||||||
|
let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
|
||||||
|
let filename_pub = "master-public.pem";
|
||||||
|
println!("Writing public master key to {}", filename_pub);
|
||||||
|
replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
|
||||||
|
|
||||||
|
let cipher = openssl::symm::Cipher::aes_256_cbc();
|
||||||
|
let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
|
||||||
|
|
||||||
|
let filename_priv = "master-private.pem";
|
||||||
|
println!("Writing private master key to {}", filename_priv);
|
||||||
|
replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cli() -> CliCommandMap {
|
||||||
|
let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
|
||||||
|
.arg_param(&["path"])
|
||||||
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
|
let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_CHANGE_PASSPHRASE)
|
||||||
|
.arg_param(&["path"])
|
||||||
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
|
let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_CREATE_MASTER_KEY);
|
||||||
|
let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_IMPORT_MASTER_PUBKEY)
|
||||||
|
.arg_param(&["path"])
|
||||||
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
|
CliCommandMap::new()
|
||||||
|
.insert("create", key_create_cmd_def)
|
||||||
|
.insert("create-master-key", key_create_master_key_cmd_def)
|
||||||
|
.insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
|
||||||
|
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
||||||
|
}
|
10
src/bin/proxmox_backup_client/mod.rs
Normal file
10
src/bin/proxmox_backup_client/mod.rs
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
mod benchmark;
|
||||||
|
pub use benchmark::*;
|
||||||
|
mod mount;
|
||||||
|
pub use mount::*;
|
||||||
|
mod task;
|
||||||
|
pub use task::*;
|
||||||
|
mod catalog;
|
||||||
|
pub use catalog::*;
|
||||||
|
|
||||||
|
pub mod key;
|
195
src/bin/proxmox_backup_client/mount.rs
Normal file
195
src/bin/proxmox_backup_client/mount.rs
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::os::unix::io::RawFd;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
|
use nix::unistd::{fork, ForkResult, pipe};
|
||||||
|
use futures::select;
|
||||||
|
use futures::future::FutureExt;
|
||||||
|
|
||||||
|
use proxmox::{sortable, identity};
|
||||||
|
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
|
||||||
|
|
||||||
|
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
use proxmox_backup::backup::{
|
||||||
|
load_and_decrypt_key,
|
||||||
|
CryptConfig,
|
||||||
|
IndexFile,
|
||||||
|
BackupDir,
|
||||||
|
BackupGroup,
|
||||||
|
BufferedDynamicReader,
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
complete_pxar_archive_name,
|
||||||
|
complete_group_or_snapshot,
|
||||||
|
complete_repository,
|
||||||
|
record_repository,
|
||||||
|
connect,
|
||||||
|
api_datastore_latest_snapshot,
|
||||||
|
BufferedDynamicReadAt,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::Sync(&mount),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Mount pxar archive.",
|
||||||
|
&sorted!([
|
||||||
|
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
||||||
|
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
||||||
|
("target", false, &StringSchema::new("Target directory path.").schema()),
|
||||||
|
("repository", true, &REPO_URL_SCHEMA),
|
||||||
|
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||||
|
("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
pub fn mount_cmd_def() -> CliCommand {
|
||||||
|
|
||||||
|
CliCommand::new(&API_METHOD_MOUNT)
|
||||||
|
.arg_param(&["snapshot", "archive-name", "target"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("snapshot", complete_group_or_snapshot)
|
||||||
|
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||||
|
.completion_cb("target", tools::complete_file_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mount(
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||||
|
if verbose {
|
||||||
|
// This will stay in foreground with debug output enabled as None is
|
||||||
|
// passed for the RawFd.
|
||||||
|
return proxmox_backup::tools::runtime::main(mount_do(param, None));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process should be deamonized.
|
||||||
|
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
||||||
|
let pipe = pipe()?;
|
||||||
|
match fork() {
|
||||||
|
Ok(ForkResult::Parent { .. }) => {
|
||||||
|
nix::unistd::close(pipe.1).unwrap();
|
||||||
|
// Blocks the parent process until we are ready to go in the child
|
||||||
|
let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
Ok(ForkResult::Child) => {
|
||||||
|
nix::unistd::close(pipe.0).unwrap();
|
||||||
|
nix::unistd::setsid().unwrap();
|
||||||
|
proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
|
||||||
|
}
|
||||||
|
Err(_) => bail!("failed to daemonize process"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||||
|
let target = tools::required_string_param(¶m, "target")?;
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
|
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||||
|
let group: BackupGroup = path.parse()?;
|
||||||
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
|
} else {
|
||||||
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||||
|
};
|
||||||
|
|
||||||
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
let crypt_config = match keyfile {
|
||||||
|
None => None,
|
||||||
|
Some(path) => {
|
||||||
|
let (key, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
|
||||||
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||||
|
format!("{}.didx", archive_name)
|
||||||
|
} else {
|
||||||
|
bail!("Can only mount pxar archives.");
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&backup_type,
|
||||||
|
&backup_id,
|
||||||
|
backup_time,
|
||||||
|
true,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
let manifest = client.download_manifest().await?;
|
||||||
|
|
||||||
|
if server_archive_name.ends_with(".didx") {
|
||||||
|
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let archive_size = reader.archive_size();
|
||||||
|
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||||
|
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||||
|
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||||
|
let options = OsStr::new("ro,default_permissions");
|
||||||
|
|
||||||
|
let session = proxmox_backup::pxar::fuse::Session::mount(
|
||||||
|
decoder,
|
||||||
|
&options,
|
||||||
|
false,
|
||||||
|
Path::new(target),
|
||||||
|
)
|
||||||
|
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||||
|
|
||||||
|
if let Some(pipe) = pipe {
|
||||||
|
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||||
|
// Finish creation of daemon by redirecting filedescriptors.
|
||||||
|
let nullfd = nix::fcntl::open(
|
||||||
|
"/dev/null",
|
||||||
|
nix::fcntl::OFlag::O_RDWR,
|
||||||
|
nix::sys::stat::Mode::empty(),
|
||||||
|
).unwrap();
|
||||||
|
nix::unistd::dup2(nullfd, 0).unwrap();
|
||||||
|
nix::unistd::dup2(nullfd, 1).unwrap();
|
||||||
|
nix::unistd::dup2(nullfd, 2).unwrap();
|
||||||
|
if nullfd > 2 {
|
||||||
|
nix::unistd::close(nullfd).unwrap();
|
||||||
|
}
|
||||||
|
// Signal the parent process that we are done with the setup and it can
|
||||||
|
// terminate.
|
||||||
|
nix::unistd::write(pipe, &[0u8])?;
|
||||||
|
nix::unistd::close(pipe).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||||
|
select! {
|
||||||
|
res = session.fuse() => res?,
|
||||||
|
_ = interrupt.recv().fuse() => {
|
||||||
|
// exit on interrupted
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
bail!("unknown archive file extension (expected .pxar)");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
148
src/bin/proxmox_backup_client/task.rs
Normal file
148
src/bin/proxmox_backup_client/task.rs
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
use anyhow::{Error};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use proxmox::api::{api, cli::*};
|
||||||
|
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
use proxmox_backup::api2::types::UPID_SCHEMA;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
complete_repository,
|
||||||
|
connect,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
limit: {
|
||||||
|
description: "The maximal number of tasks to list.",
|
||||||
|
type: Integer,
|
||||||
|
optional: true,
|
||||||
|
minimum: 1,
|
||||||
|
maximum: 1000,
|
||||||
|
default: 50,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
all: {
|
||||||
|
type: Boolean,
|
||||||
|
description: "Also list stopped tasks.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// List running server tasks for this repo user
|
||||||
|
async fn task_list(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
||||||
|
let running = !param["all"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
|
let args = json!({
|
||||||
|
"running": running,
|
||||||
|
"start": 0,
|
||||||
|
"limit": limit,
|
||||||
|
"userfilter": repo.user(),
|
||||||
|
"store": repo.store(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
||||||
|
let mut data = result["data"].take();
|
||||||
|
|
||||||
|
let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||||
|
|
||||||
|
let options = default_table_format_options()
|
||||||
|
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
||||||
|
.column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
|
||||||
|
.column(ColumnConfig::new("upid"))
|
||||||
|
.column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
|
||||||
|
|
||||||
|
format_and_print_result_full(&mut data, schema, &output_format, &options);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
upid: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Display the task log.
|
||||||
|
async fn task_log(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let upid = tools::required_string_param(¶m, "upid")?;
|
||||||
|
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
display_task_log(client, upid, true).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
upid: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Try to stop a specific task.
|
||||||
|
async fn task_stop(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let upid_str = tools::required_string_param(¶m, "upid")?;
|
||||||
|
|
||||||
|
let mut client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
||||||
|
let _ = client.delete(&path, None).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn task_mgmt_cli() -> CliCommandMap {
|
||||||
|
|
||||||
|
let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
|
||||||
|
.completion_cb("repository", complete_repository);
|
||||||
|
|
||||||
|
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
|
||||||
|
.arg_param(&["upid"]);
|
||||||
|
|
||||||
|
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
|
||||||
|
.arg_param(&["upid"]);
|
||||||
|
|
||||||
|
CliCommandMap::new()
|
||||||
|
.insert("log", task_log_cmd_def)
|
||||||
|
.insert("list", task_list_cmd_def)
|
||||||
|
.insert("stop", task_stop_cmd_def)
|
||||||
|
}
|
@ -86,7 +86,7 @@ pub fn datastore_commands() -> CommandLineInterface {
|
|||||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||||
.completion_cb("gc-schedule", config::datastore::complete_calendar_event)
|
.completion_cb("gc-schedule", config::datastore::complete_calendar_event)
|
||||||
.completion_cb("prune-schedule", config::datastore::complete_calendar_event)
|
.completion_cb("prune-schedule", config::datastore::complete_calendar_event)
|
||||||
)
|
)
|
||||||
.insert("remove",
|
.insert("remove",
|
||||||
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
|
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
|
||||||
.arg_param(&["name"])
|
.arg_param(&["name"])
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
@ -22,6 +23,7 @@ pub struct BackupWriter {
|
|||||||
h2: H2Client,
|
h2: H2Client,
|
||||||
abort: AbortHandle,
|
abort: AbortHandle,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for BackupWriter {
|
impl Drop for BackupWriter {
|
||||||
@ -38,12 +40,13 @@ pub struct BackupStats {
|
|||||||
|
|
||||||
impl BackupWriter {
|
impl BackupWriter {
|
||||||
|
|
||||||
fn new(h2: H2Client, abort: AbortHandle, verbose: bool) -> Arc<Self> {
|
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
|
||||||
Arc::new(Self { h2, abort, verbose })
|
Arc::new(Self { h2, abort, crypt_config, verbose })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start(
|
pub async fn start(
|
||||||
client: HttpClient,
|
client: HttpClient,
|
||||||
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
datastore: &str,
|
datastore: &str,
|
||||||
backup_type: &str,
|
backup_type: &str,
|
||||||
backup_id: &str,
|
backup_id: &str,
|
||||||
@ -64,7 +67,7 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
||||||
|
|
||||||
Ok(BackupWriter::new(h2, abort, debug))
|
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get(
|
pub async fn get(
|
||||||
@ -159,16 +162,19 @@ impl BackupWriter {
|
|||||||
&self,
|
&self,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
|
||||||
compress: bool,
|
compress: bool,
|
||||||
sign_only: bool,
|
crypt_or_sign: Option<bool>,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
let blob = if let Some(ref crypt_config) = crypt_config {
|
let blob = if let Some(ref crypt_config) = self.crypt_config {
|
||||||
if sign_only {
|
if let Some(encrypt) = crypt_or_sign {
|
||||||
DataBlob::create_signed(&data, crypt_config, compress)?
|
if encrypt {
|
||||||
|
DataBlob::encode(&data, Some(crypt_config), compress)?
|
||||||
|
} else {
|
||||||
|
DataBlob::create_signed(&data, crypt_config, compress)?
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
DataBlob::encode(&data, Some(crypt_config), compress)?
|
DataBlob::encode(&data, None, compress)?
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
DataBlob::encode(&data, None, compress)?
|
DataBlob::encode(&data, None, compress)?
|
||||||
@ -187,8 +193,8 @@ impl BackupWriter {
|
|||||||
&self,
|
&self,
|
||||||
src_path: P,
|
src_path: P,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
|
||||||
compress: bool,
|
compress: bool,
|
||||||
|
crypt_or_sign: Option<bool>,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
let src_path = src_path.as_ref();
|
let src_path = src_path.as_ref();
|
||||||
@ -203,25 +209,16 @@ impl BackupWriter {
|
|||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
||||||
|
|
||||||
let blob = DataBlob::encode(&contents, crypt_config.as_ref().map(AsRef::as_ref), compress)?;
|
self.upload_blob_from_data(contents, file_name, compress, crypt_or_sign).await
|
||||||
let raw_data = blob.into_inner();
|
|
||||||
let size = raw_data.len() as u64;
|
|
||||||
let csum = openssl::sha::sha256(&raw_data);
|
|
||||||
let param = json!({
|
|
||||||
"encoded-size": size,
|
|
||||||
"file-name": file_name,
|
|
||||||
});
|
|
||||||
self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
|
|
||||||
Ok(BackupStats { size, csum })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_stream(
|
pub async fn upload_stream(
|
||||||
&self,
|
&self,
|
||||||
|
previous_manifest: Option<Arc<BackupManifest>>,
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||||
prefix: &str,
|
prefix: &str,
|
||||||
fixed_size: Option<u64>,
|
fixed_size: Option<u64>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
||||||
|
|
||||||
@ -233,7 +230,18 @@ impl BackupWriter {
|
|||||||
let index_path = format!("{}_index", prefix);
|
let index_path = format!("{}_index", prefix);
|
||||||
let close_path = format!("{}_close", prefix);
|
let close_path = format!("{}_close", prefix);
|
||||||
|
|
||||||
self.download_chunk_list(&index_path, archive_name, known_chunks.clone()).await?;
|
if let Some(manifest) = previous_manifest {
|
||||||
|
// try, but ignore errors
|
||||||
|
match archive_type(archive_name) {
|
||||||
|
Ok(ArchiveType::FixedIndex) => {
|
||||||
|
let _ = self.download_previous_fixed_index(archive_name, &manifest, known_chunks.clone()).await;
|
||||||
|
}
|
||||||
|
Ok(ArchiveType::DynamicIndex) => {
|
||||||
|
let _ = self.download_previous_dynamic_index(archive_name, &manifest, known_chunks.clone()).await;
|
||||||
|
}
|
||||||
|
_ => { /* do nothing */ }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
||||||
|
|
||||||
@ -244,7 +252,7 @@ impl BackupWriter {
|
|||||||
stream,
|
stream,
|
||||||
&prefix,
|
&prefix,
|
||||||
known_chunks.clone(),
|
known_chunks.clone(),
|
||||||
crypt_config,
|
self.crypt_config.clone(),
|
||||||
self.verbose,
|
self.verbose,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@ -374,41 +382,93 @@ impl BackupWriter {
|
|||||||
(verify_queue_tx, verify_result_rx)
|
(verify_queue_tx, verify_result_rx)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn download_chunk_list(
|
pub async fn download_previous_fixed_index(
|
||||||
&self,
|
&self,
|
||||||
path: &str,
|
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
|
manifest: &BackupManifest,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<FixedIndexReader, Error> {
|
||||||
|
|
||||||
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
let param = json!({ "archive-name": archive_name });
|
let param = json!({ "archive-name": archive_name });
|
||||||
let request = H2Client::request_builder("localhost", "GET", path, Some(param), None).unwrap();
|
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
||||||
|
|
||||||
let h2request = self.h2.send_request(request, None).await?;
|
let index = FixedIndexReader::new(tmpfile)
|
||||||
let resp = h2request.await?;
|
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
|
||||||
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(archive_name, &csum, size)?;
|
||||||
|
|
||||||
let status = resp.status();
|
// add index chunks to known chunks
|
||||||
|
let mut known_chunks = known_chunks.lock().unwrap();
|
||||||
if !status.is_success() {
|
for i in 0..index.index_count() {
|
||||||
H2Client::h2api_response(resp).await?; // raise error
|
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||||
unreachable!();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut body = resp.into_body();
|
|
||||||
let mut flow_control = body.flow_control().clone();
|
|
||||||
|
|
||||||
let mut stream = DigestListDecoder::new(body.map_err(Error::from));
|
|
||||||
|
|
||||||
while let Some(chunk) = stream.try_next().await? {
|
|
||||||
let _ = flow_control.release_capacity(chunk.len());
|
|
||||||
known_chunks.lock().unwrap().insert(chunk);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.verbose {
|
if self.verbose {
|
||||||
println!("{}: known chunks list length is {}", archive_name, known_chunks.lock().unwrap().len());
|
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn download_previous_dynamic_index(
|
||||||
|
&self,
|
||||||
|
archive_name: &str,
|
||||||
|
manifest: &BackupManifest,
|
||||||
|
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
|
) -> Result<DynamicIndexReader, Error> {
|
||||||
|
|
||||||
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
let param = json!({ "archive-name": archive_name });
|
||||||
|
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
||||||
|
|
||||||
|
let index = DynamicIndexReader::new(tmpfile)
|
||||||
|
.map_err(|err| format_err!("unable to read dynmamic index '{}' - {}", archive_name, err))?;
|
||||||
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(archive_name, &csum, size)?;
|
||||||
|
|
||||||
|
// add index chunks to known chunks
|
||||||
|
let mut known_chunks = known_chunks.lock().unwrap();
|
||||||
|
for i in 0..index.index_count() {
|
||||||
|
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.verbose {
|
||||||
|
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Download backup manifest (index.json) of last backup
|
||||||
|
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||||
|
|
||||||
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
|
|
||||||
|
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||||
|
self.h2.download("previous", Some(param), &mut raw_data).await?;
|
||||||
|
|
||||||
|
let blob = DataBlob::from_raw(raw_data)?;
|
||||||
|
blob.verify_crc()?;
|
||||||
|
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
let json: Value = serde_json::from_slice(&data[..])?;
|
||||||
|
let manifest = BackupManifest::try_from(json)?;
|
||||||
|
|
||||||
|
Ok(manifest)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn upload_chunk_info_stream(
|
fn upload_chunk_info_stream(
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
@ -10,11 +10,12 @@ use crate::backup::{AsyncReadChunk, CryptConfig, DataBlob, ReadChunk};
|
|||||||
use crate::tools::runtime::block_on;
|
use crate::tools::runtime::block_on;
|
||||||
|
|
||||||
/// Read chunks from remote host using ``BackupReader``
|
/// Read chunks from remote host using ``BackupReader``
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct RemoteChunkReader {
|
pub struct RemoteChunkReader {
|
||||||
client: Arc<BackupReader>,
|
client: Arc<BackupReader>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
cache_hint: HashMap<[u8; 32], usize>,
|
cache_hint: HashMap<[u8; 32], usize>,
|
||||||
cache: HashMap<[u8; 32], Vec<u8>>,
|
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RemoteChunkReader {
|
impl RemoteChunkReader {
|
||||||
@ -30,11 +31,11 @@ impl RemoteChunkReader {
|
|||||||
client,
|
client,
|
||||||
crypt_config,
|
crypt_config,
|
||||||
cache_hint,
|
cache_hint,
|
||||||
cache: HashMap::new(),
|
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
||||||
|
|
||||||
self.client
|
self.client
|
||||||
@ -49,12 +50,12 @@ impl RemoteChunkReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ReadChunk for RemoteChunkReader {
|
impl ReadChunk for RemoteChunkReader {
|
||||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
block_on(Self::read_raw_chunk(self, digest))
|
block_on(Self::read_raw_chunk(self, digest))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||||
if let Some(raw_data) = self.cache.get(digest) {
|
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
|
||||||
return Ok(raw_data.to_vec());
|
return Ok(raw_data.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,7 +67,7 @@ impl ReadChunk for RemoteChunkReader {
|
|||||||
|
|
||||||
let use_cache = self.cache_hint.contains_key(digest);
|
let use_cache = self.cache_hint.contains_key(digest);
|
||||||
if use_cache {
|
if use_cache {
|
||||||
self.cache.insert(*digest, raw_data.to_vec());
|
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(raw_data)
|
Ok(raw_data)
|
||||||
@ -75,18 +76,18 @@ impl ReadChunk for RemoteChunkReader {
|
|||||||
|
|
||||||
impl AsyncReadChunk for RemoteChunkReader {
|
impl AsyncReadChunk for RemoteChunkReader {
|
||||||
fn read_raw_chunk<'a>(
|
fn read_raw_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||||
Box::pin(Self::read_raw_chunk(self, digest))
|
Box::pin(Self::read_raw_chunk(self, digest))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_chunk<'a>(
|
fn read_chunk<'a>(
|
||||||
&'a mut self,
|
&'a self,
|
||||||
digest: &'a [u8; 32],
|
digest: &'a [u8; 32],
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
if let Some(raw_data) = self.cache.get(digest) {
|
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
|
||||||
return Ok(raw_data.to_vec());
|
return Ok(raw_data.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,7 +99,7 @@ impl AsyncReadChunk for RemoteChunkReader {
|
|||||||
|
|
||||||
let use_cache = self.cache_hint.contains_key(digest);
|
let use_cache = self.cache_hint.contains_key(digest);
|
||||||
if use_cache {
|
if use_cache {
|
||||||
self.cache.insert(*digest, raw_data.to_vec());
|
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(raw_data)
|
Ok(raw_data)
|
||||||
|
@ -2,7 +2,7 @@ use std::collections::{HashSet, HashMap};
|
|||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
use std::ffi::{CStr, CString, OsStr};
|
use std::ffi::{CStr, CString, OsStr};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io::{self, Write};
|
use std::io::{self, Read, Write};
|
||||||
use std::os::unix::ffi::OsStrExt;
|
use std::os::unix::ffi::OsStrExt;
|
||||||
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
|
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
@ -20,6 +20,7 @@ use pxar::encoder::LinkOffset;
|
|||||||
use proxmox::c_str;
|
use proxmox::c_str;
|
||||||
use proxmox::sys::error::SysError;
|
use proxmox::sys::error::SysError;
|
||||||
use proxmox::tools::fd::RawFdNum;
|
use proxmox::tools::fd::RawFdNum;
|
||||||
|
use proxmox::tools::vec;
|
||||||
|
|
||||||
use crate::pxar::catalog::BackupCatalogWriter;
|
use crate::pxar::catalog::BackupCatalogWriter;
|
||||||
use crate::pxar::Flags;
|
use crate::pxar::Flags;
|
||||||
@ -35,6 +36,7 @@ fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
|
|||||||
Ok(fs_stat.f_type)
|
Ok(fs_stat.f_type)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
pub fn is_virtual_file_system(magic: i64) -> bool {
|
pub fn is_virtual_file_system(magic: i64) -> bool {
|
||||||
use proxmox::sys::linux::magic::*;
|
use proxmox::sys::linux::magic::*;
|
||||||
|
|
||||||
@ -114,6 +116,7 @@ struct Archiver<'a, 'b> {
|
|||||||
device_set: Option<HashSet<u64>>,
|
device_set: Option<HashSet<u64>>,
|
||||||
hardlinks: HashMap<HardLinkInfo, (PathBuf, LinkOffset)>,
|
hardlinks: HashMap<HardLinkInfo, (PathBuf, LinkOffset)>,
|
||||||
errors: ErrorReporter,
|
errors: ErrorReporter,
|
||||||
|
file_copy_buffer: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
type Encoder<'a, 'b> = pxar::encoder::Encoder<'a, &'b mut dyn pxar::encoder::SeqWrite>;
|
type Encoder<'a, 'b> = pxar::encoder::Encoder<'a, &'b mut dyn pxar::encoder::SeqWrite>;
|
||||||
@ -178,6 +181,7 @@ where
|
|||||||
device_set,
|
device_set,
|
||||||
hardlinks: HashMap::new(),
|
hardlinks: HashMap::new(),
|
||||||
errors: ErrorReporter,
|
errors: ErrorReporter,
|
||||||
|
file_copy_buffer: vec::undefined(4 * 1024 * 1024),
|
||||||
};
|
};
|
||||||
|
|
||||||
archiver.archive_dir_contents(&mut encoder, source_dir, true)?;
|
archiver.archive_dir_contents(&mut encoder, source_dir, true)?;
|
||||||
@ -244,11 +248,15 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// openat() wrapper which allows but logs `EACCES` and turns `ENOENT` into `None`.
|
/// openat() wrapper which allows but logs `EACCES` and turns `ENOENT` into `None`.
|
||||||
|
///
|
||||||
|
/// The `existed` flag is set when iterating through a directory to note that we know the file
|
||||||
|
/// is supposed to exist and we should warn if it doesnt'.
|
||||||
fn open_file(
|
fn open_file(
|
||||||
&mut self,
|
&mut self,
|
||||||
parent: RawFd,
|
parent: RawFd,
|
||||||
file_name: &CStr,
|
file_name: &CStr,
|
||||||
oflags: OFlag,
|
oflags: OFlag,
|
||||||
|
existed: bool,
|
||||||
) -> Result<Option<Fd>, Error> {
|
) -> Result<Option<Fd>, Error> {
|
||||||
match Fd::openat(
|
match Fd::openat(
|
||||||
&unsafe { RawFdNum::from_raw_fd(parent) },
|
&unsafe { RawFdNum::from_raw_fd(parent) },
|
||||||
@ -257,9 +265,14 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
Mode::empty(),
|
Mode::empty(),
|
||||||
) {
|
) {
|
||||||
Ok(fd) => Ok(Some(fd)),
|
Ok(fd) => Ok(Some(fd)),
|
||||||
Err(nix::Error::Sys(Errno::ENOENT)) => Ok(None),
|
Err(nix::Error::Sys(Errno::ENOENT)) => {
|
||||||
|
if existed {
|
||||||
|
self.report_vanished_file()?;
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
Err(nix::Error::Sys(Errno::EACCES)) => {
|
Err(nix::Error::Sys(Errno::EACCES)) => {
|
||||||
write!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
|
writeln!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
Err(other) => Err(Error::from(other)),
|
Err(other) => Err(Error::from(other)),
|
||||||
@ -271,6 +284,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
parent,
|
parent,
|
||||||
c_str!(".pxarexclude"),
|
c_str!(".pxarexclude"),
|
||||||
OFlag::O_RDONLY | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
OFlag::O_RDONLY | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
||||||
|
false,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let old_pattern_count = self.patterns.len();
|
let old_pattern_count = self.patterns.len();
|
||||||
@ -283,7 +297,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
let line = match line {
|
let line = match line {
|
||||||
Ok(line) => line,
|
Ok(line) => line,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let _ = write!(
|
let _ = writeln!(
|
||||||
self.errors,
|
self.errors,
|
||||||
"ignoring .pxarexclude after read error in {:?}: {}",
|
"ignoring .pxarexclude after read error in {:?}: {}",
|
||||||
self.path,
|
self.path,
|
||||||
@ -303,7 +317,7 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Exclude) {
|
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Exclude) {
|
||||||
Ok(pattern) => self.patterns.push(pattern),
|
Ok(pattern) => self.patterns.push(pattern),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let _ = write!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -406,7 +420,25 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn report_vanished_file(&mut self) -> Result<(), Error> {
|
fn report_vanished_file(&mut self) -> Result<(), Error> {
|
||||||
write!(self.errors, "warning: file vanished while reading: {:?}", self.path)?;
|
writeln!(self.errors, "warning: file vanished while reading: {:?}", self.path)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report_file_shrunk_while_reading(&mut self) -> Result<(), Error> {
|
||||||
|
writeln!(
|
||||||
|
self.errors,
|
||||||
|
"warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
|
||||||
|
self.path,
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report_file_grew_while_reading(&mut self) -> Result<(), Error> {
|
||||||
|
writeln!(
|
||||||
|
self.errors,
|
||||||
|
"warning: file size increased while reading: {:?}, file will be truncated!",
|
||||||
|
self.path,
|
||||||
|
)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -430,14 +462,12 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
parent,
|
parent,
|
||||||
c_file_name,
|
c_file_name,
|
||||||
open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
||||||
|
true,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let fd = match fd {
|
let fd = match fd {
|
||||||
Some(fd) => fd,
|
Some(fd) => fd,
|
||||||
None => {
|
None => return Ok(()),
|
||||||
self.report_vanished_file()?;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic)?;
|
let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic)?;
|
||||||
@ -591,8 +621,29 @@ impl<'a, 'b> Archiver<'a, 'b> {
|
|||||||
file_size: u64,
|
file_size: u64,
|
||||||
) -> Result<LinkOffset, Error> {
|
) -> Result<LinkOffset, Error> {
|
||||||
let mut file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
let mut file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
||||||
let offset = encoder.add_file(metadata, file_name, file_size, &mut file)?;
|
let mut remaining = file_size;
|
||||||
Ok(offset)
|
let mut out = encoder.create_file(metadata, file_name, file_size)?;
|
||||||
|
while remaining != 0 {
|
||||||
|
let mut got = file.read(&mut self.file_copy_buffer[..])?;
|
||||||
|
if got as u64 > remaining {
|
||||||
|
self.report_file_grew_while_reading()?;
|
||||||
|
got = remaining as usize;
|
||||||
|
}
|
||||||
|
out.write_all(&self.file_copy_buffer[..got])?;
|
||||||
|
remaining -= got as u64;
|
||||||
|
}
|
||||||
|
if remaining > 0 {
|
||||||
|
self.report_file_shrunk_while_reading()?;
|
||||||
|
let to_zero = remaining.min(self.file_copy_buffer.len() as u64) as usize;
|
||||||
|
vec::clear(&mut self.file_copy_buffer[..to_zero]);
|
||||||
|
while remaining != 0 {
|
||||||
|
let fill = remaining.min(self.file_copy_buffer.len() as u64) as usize;
|
||||||
|
out.write_all(&self.file_copy_buffer[..fill])?;
|
||||||
|
remaining -= fill as u64;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(out.file_offset())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_symlink(
|
fn add_symlink(
|
||||||
|
@ -323,7 +323,7 @@ fn get_index(username: Option<String>, token: Option<String>, template: &Handleb
|
|||||||
|
|
||||||
if let Some(query_str) = parts.uri.query() {
|
if let Some(query_str) = parts.uri.query() {
|
||||||
for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
|
for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
|
||||||
if k == "debug" && v == "1" || v == "true" {
|
if k == "debug" && v != "0" && v != "false" {
|
||||||
debug = true;
|
debug = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -213,6 +213,8 @@ pub fn upid_read_status(upid: &UPID) -> Result<String, Error> {
|
|||||||
Some(rest) => {
|
Some(rest) => {
|
||||||
if rest == "OK" {
|
if rest == "OK" {
|
||||||
status = String::from(rest);
|
status = String::from(rest);
|
||||||
|
} else if rest.starts_with("WARNINGS: ") {
|
||||||
|
status = String::from(rest);
|
||||||
} else if rest.starts_with("ERROR: ") {
|
} else if rest.starts_with("ERROR: ") {
|
||||||
status = String::from(&rest[7..]);
|
status = String::from(&rest[7..]);
|
||||||
}
|
}
|
||||||
@ -234,7 +236,7 @@ pub struct TaskListInfo {
|
|||||||
pub upid_str: String,
|
pub upid_str: String,
|
||||||
/// Task `(endtime, status)` if already finished
|
/// Task `(endtime, status)` if already finished
|
||||||
///
|
///
|
||||||
/// The `status` ise iether `unknown`, `OK`, or `ERROR: ...`
|
/// The `status` is either `unknown`, `OK`, `WARN`, or `ERROR: ...`
|
||||||
pub state: Option<(i64, String)>, // endtime, status
|
pub state: Option<(i64, String)>, // endtime, status
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -385,6 +387,7 @@ impl std::fmt::Display for WorkerTask {
|
|||||||
struct WorkerTaskData {
|
struct WorkerTaskData {
|
||||||
logger: FileLogger,
|
logger: FileLogger,
|
||||||
progress: f64, // 0..1
|
progress: f64, // 0..1
|
||||||
|
warn_count: u64,
|
||||||
pub abort_listeners: Vec<oneshot::Sender<()>>,
|
pub abort_listeners: Vec<oneshot::Sender<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -424,6 +427,7 @@ impl WorkerTask {
|
|||||||
data: Mutex::new(WorkerTaskData {
|
data: Mutex::new(WorkerTaskData {
|
||||||
logger,
|
logger,
|
||||||
progress: 0.0,
|
progress: 0.0,
|
||||||
|
warn_count: 0,
|
||||||
abort_listeners: vec![],
|
abort_listeners: vec![],
|
||||||
}),
|
}),
|
||||||
});
|
});
|
||||||
@ -507,8 +511,11 @@ impl WorkerTask {
|
|||||||
/// Log task result, remove task from running list
|
/// Log task result, remove task from running list
|
||||||
pub fn log_result(&self, result: &Result<(), Error>) {
|
pub fn log_result(&self, result: &Result<(), Error>) {
|
||||||
|
|
||||||
|
let warn_count = self.data.lock().unwrap().warn_count;
|
||||||
if let Err(err) = result {
|
if let Err(err) = result {
|
||||||
self.log(&format!("TASK ERROR: {}", err));
|
self.log(&format!("TASK ERROR: {}", err));
|
||||||
|
} else if warn_count > 0 {
|
||||||
|
self.log(format!("TASK WARNINGS: {}", warn_count));
|
||||||
} else {
|
} else {
|
||||||
self.log("TASK OK");
|
self.log("TASK OK");
|
||||||
}
|
}
|
||||||
@ -524,6 +531,13 @@ impl WorkerTask {
|
|||||||
data.logger.log(msg);
|
data.logger.log(msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Log a message as warning.
|
||||||
|
pub fn warn<S: AsRef<str>>(&self, msg: S) {
|
||||||
|
let mut data = self.data.lock().unwrap();
|
||||||
|
data.logger.log(format!("WARN: {}", msg.as_ref()));
|
||||||
|
data.warn_count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
/// Set progress indicator
|
/// Set progress indicator
|
||||||
pub fn progress(&self, progress: f64) {
|
pub fn progress(&self, progress: f64) {
|
||||||
if progress >= 0.0 && progress <= 1.0 {
|
if progress >= 0.0 && progress <= 1.0 {
|
||||||
|
@ -181,7 +181,7 @@ fn test_zfs_parse_list() -> Result<(), Error> {
|
|||||||
assert_eq!(data, expect);
|
assert_eq!(data, expect);
|
||||||
|
|
||||||
let output = "\
|
let output = "\
|
||||||
rpool 535260299264 402852388864 132407910400 - - 22 75 1.00 ONLINE -
|
rpool 535260299264 402852388864 132407910400 - - 22 75 1.00 ONLINE -
|
||||||
/dev/disk/by-id/ata-Crucial_CT500MX200SSD1_154210EB4078-part3 498216206336 392175546368 106040659968 - - 22 78 - ONLINE
|
/dev/disk/by-id/ata-Crucial_CT500MX200SSD1_154210EB4078-part3 498216206336 392175546368 106040659968 - - 22 78 - ONLINE
|
||||||
special - - - - - - - - -
|
special - - - - - - - - -
|
||||||
/dev/sda2 37044092928 10676842496 26367250432 - - 63 28 - ONLINE
|
/dev/sda2 37044092928 10676842496 26367250432 - - 63 28 - ONLINE
|
||||||
|
@ -41,7 +41,7 @@ pub fn parse_u64(i: &str) -> IResult<&str, u64> {
|
|||||||
map_res(recognize(digit1), str::parse)(i)
|
map_res(recognize(digit1), str::parse)(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parse complete input, generate vervose error message with line numbers
|
/// Parse complete input, generate verbose error message with line numbers
|
||||||
pub fn parse_complete<'a, F, O>(what: &str, i: &'a str, parser: F) -> Result<O, Error>
|
pub fn parse_complete<'a, F, O>(what: &str, i: &'a str, parser: F) -> Result<O, Error>
|
||||||
where F: Fn(&'a str) -> IResult<&'a str, O>,
|
where F: Fn(&'a str) -> IResult<&'a str, O>,
|
||||||
{
|
{
|
||||||
|
@ -64,7 +64,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
'text',
|
'text',
|
||||||
'backup-time'
|
'backup-time'
|
||||||
]);
|
]);
|
||||||
Proxmox.Utils.monStoreErrors(view, view.store, true);
|
Proxmox.Utils.monStoreErrors(view, this.store);
|
||||||
this.reload(); // initial load
|
this.reload(); // initial load
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -79,6 +79,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
let url = `/api2/json/admin/datastore/${view.datastore}/snapshots`;
|
let url = `/api2/json/admin/datastore/${view.datastore}/snapshots`;
|
||||||
this.store.setProxy({
|
this.store.setProxy({
|
||||||
type: 'proxmox',
|
type: 'proxmox',
|
||||||
|
timeout: 300*1000, // 5 minutes, we should make that api call faster
|
||||||
url: url
|
url: url
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -122,10 +123,11 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
return groups;
|
return groups;
|
||||||
},
|
},
|
||||||
|
|
||||||
onLoad: function(store, records, success) {
|
onLoad: function(store, records, success, operation) {
|
||||||
let view = this.getView();
|
let view = this.getView();
|
||||||
|
|
||||||
if (!success) {
|
if (!success) {
|
||||||
|
Proxmox.Utils.setErrorMask(view, Proxmox.Utils.getResponseErrorMessage(operation.getError()));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,6 +178,7 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
expanded: true,
|
expanded: true,
|
||||||
children: children
|
children: children
|
||||||
});
|
});
|
||||||
|
Proxmox.Utils.setErrorMask(view, false);
|
||||||
},
|
},
|
||||||
|
|
||||||
onPrune: function() {
|
onPrune: function() {
|
||||||
@ -197,6 +200,73 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
win.show();
|
win.show();
|
||||||
},
|
},
|
||||||
|
|
||||||
|
onVerify: function() {
|
||||||
|
var view = this.getView();
|
||||||
|
|
||||||
|
if (!view.datastore) return;
|
||||||
|
|
||||||
|
let rec = view.selModel.getSelection()[0];
|
||||||
|
if (!(rec && rec.data)) return;
|
||||||
|
let data = rec.data;
|
||||||
|
|
||||||
|
let params;
|
||||||
|
|
||||||
|
if (data.leaf) {
|
||||||
|
params = {
|
||||||
|
"backup-type": data["backup-type"],
|
||||||
|
"backup-id": data["backup-id"],
|
||||||
|
"backup-time": (data['backup-time'].getTime()/1000).toFixed(0),
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
params = {
|
||||||
|
"backup-type": data.backup_type,
|
||||||
|
"backup-id": data.backup_id,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
params: params,
|
||||||
|
url: `/admin/datastore/${view.datastore}/verify`,
|
||||||
|
method: 'POST',
|
||||||
|
failure: function(response) {
|
||||||
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
|
},
|
||||||
|
success: function(response, options) {
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid: response.result.data,
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
onForget: function() {
|
||||||
|
var view = this.getView();
|
||||||
|
|
||||||
|
let rec = view.selModel.getSelection()[0];
|
||||||
|
if (!(rec && rec.data)) return;
|
||||||
|
let data = rec.data;
|
||||||
|
if (!data.leaf) return;
|
||||||
|
|
||||||
|
if (!view.datastore) return;
|
||||||
|
|
||||||
|
console.log(data);
|
||||||
|
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
params: {
|
||||||
|
"backup-type": data["backup-type"],
|
||||||
|
"backup-id": data["backup-id"],
|
||||||
|
"backup-time": (data['backup-time'].getTime()/1000).toFixed(0),
|
||||||
|
},
|
||||||
|
url: `/admin/datastore/${view.datastore}/snapshots`,
|
||||||
|
method: 'DELETE',
|
||||||
|
waitMsgTarget: view,
|
||||||
|
failure: function(response, opts) {
|
||||||
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
|
},
|
||||||
|
callback: this.reload.bind(this),
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
openBackupFileDownloader: function() {
|
openBackupFileDownloader: function() {
|
||||||
let me = this;
|
let me = this;
|
||||||
let view = me.getView();
|
let view = me.getView();
|
||||||
@ -326,6 +396,14 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
iconCls: 'fa fa-refresh',
|
iconCls: 'fa fa-refresh',
|
||||||
handler: 'reload',
|
handler: 'reload',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Verify'),
|
||||||
|
disabled: true,
|
||||||
|
parentXType: 'pbsDataStoreContent',
|
||||||
|
enableFn: function(record) { return !!record.data; },
|
||||||
|
handler: 'onVerify',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxButton',
|
xtype: 'proxmoxButton',
|
||||||
text: gettext('Prune'),
|
text: gettext('Prune'),
|
||||||
@ -334,6 +412,21 @@ Ext.define('PBS.DataStoreContent', {
|
|||||||
enableFn: function(record) { return !record.data.leaf; },
|
enableFn: function(record) { return !record.data.leaf; },
|
||||||
handler: 'onPrune',
|
handler: 'onPrune',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Forget'),
|
||||||
|
disabled: true,
|
||||||
|
parentXType: 'pbsDataStoreContent',
|
||||||
|
handler: 'onForget',
|
||||||
|
confirmMsg: function(record) {
|
||||||
|
console.log(record);
|
||||||
|
let name = record.data.text;
|
||||||
|
return Ext.String.format(gettext('Are you sure you want to remove snapshot {0}'), `'${name}'`);
|
||||||
|
},
|
||||||
|
enableFn: function(record) {
|
||||||
|
return !!record.data.leaf;
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxButton',
|
xtype: 'proxmoxButton',
|
||||||
text: gettext('Download Files'),
|
text: gettext('Download Files'),
|
||||||
|
@ -40,7 +40,7 @@ Ext.define('PBS.DataStorePanel', {
|
|||||||
|
|
||||||
initComponent: function() {
|
initComponent: function() {
|
||||||
let me = this;
|
let me = this;
|
||||||
me.title = `${gettext("Data Store")}: ${me.datastore}`;
|
me.title = `${gettext("Datastore")}: ${me.datastore}`;
|
||||||
me.callParent();
|
me.callParent();
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
@ -72,10 +72,15 @@ Ext.define('PBS.MainView', {
|
|||||||
let datastore = PBS.Utils.getDataStoreFromPath(path);
|
let datastore = PBS.Utils.getDataStoreFromPath(path);
|
||||||
obj = contentpanel.add({
|
obj = contentpanel.add({
|
||||||
xtype: 'pbsDataStorePanel',
|
xtype: 'pbsDataStorePanel',
|
||||||
|
nodename: 'localhost',
|
||||||
datastore,
|
datastore,
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
obj = contentpanel.add({ xtype: path, border: false });
|
obj = contentpanel.add({
|
||||||
|
xtype: path,
|
||||||
|
nodename: 'localhost',
|
||||||
|
border: false
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
var treelist = me.lookupReference('navtree');
|
var treelist = me.lookupReference('navtree');
|
||||||
@ -195,7 +200,13 @@ Ext.define('PBS.MainView', {
|
|||||||
xtype: 'versioninfo'
|
xtype: 'versioninfo'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
flex: 1
|
padding: 5,
|
||||||
|
html: '<a href="https://bugzilla.proxmox.com" target="_blank">BETA</a>',
|
||||||
|
baseCls: 'x-plain',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
flex: 1,
|
||||||
|
baseCls: 'x-plain',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
baseCls: 'x-plain',
|
baseCls: 'x-plain',
|
||||||
|
@ -19,6 +19,7 @@ JSSRC= \
|
|||||||
window/ACLEdit.js \
|
window/ACLEdit.js \
|
||||||
window/DataStoreEdit.js \
|
window/DataStoreEdit.js \
|
||||||
window/CreateDirectory.js \
|
window/CreateDirectory.js \
|
||||||
|
window/ZFSCreate.js \
|
||||||
window/FileBrowser.js \
|
window/FileBrowser.js \
|
||||||
window/BackupFileDownloader.js \
|
window/BackupFileDownloader.js \
|
||||||
dashboard/DataStoreStatistics.js \
|
dashboard/DataStoreStatistics.js \
|
||||||
@ -26,6 +27,7 @@ JSSRC= \
|
|||||||
dashboard/RunningTasks.js \
|
dashboard/RunningTasks.js \
|
||||||
dashboard/TaskSummary.js \
|
dashboard/TaskSummary.js \
|
||||||
Utils.js \
|
Utils.js \
|
||||||
|
ZFSList.js \
|
||||||
DirectoryList.js \
|
DirectoryList.js \
|
||||||
LoginView.js \
|
LoginView.js \
|
||||||
VersionInfo.js \
|
VersionInfo.js \
|
||||||
|
@ -69,12 +69,18 @@ Ext.define('PBS.store.NavigationStore', {
|
|||||||
path: 'pbsDirectoryList',
|
path: 'pbsDirectoryList',
|
||||||
leaf: true,
|
leaf: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
text: "ZFS",
|
||||||
|
iconCls: 'fa fa-th-large',
|
||||||
|
path: 'pbsZFSList',
|
||||||
|
leaf: true,
|
||||||
|
},
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Data Store'),
|
text: gettext('Datastore'),
|
||||||
iconCls: 'fa fa-archive',
|
iconCls: 'fa fa-archive',
|
||||||
path: 'pbsDataStoreConfig',
|
path: 'pbsDataStoreConfig',
|
||||||
expanded: true,
|
expanded: true,
|
||||||
|
27
www/Utils.js
27
www/Utils.js
@ -33,22 +33,18 @@ Ext.define('PBS.Utils', {
|
|||||||
},
|
},
|
||||||
|
|
||||||
render_datastore_worker_id: function(id, what) {
|
render_datastore_worker_id: function(id, what) {
|
||||||
const result = id.match(/^(\S+)_([^_\s]+)_([^_\s]+)$/);
|
const res = id.match(/^(\S+?)_(\S+?)_(\S+?)(_(.+))?$/);
|
||||||
if (result) {
|
|
||||||
let datastore = result[1], type = result[2], id = result[3];
|
|
||||||
return `Datastore ${datastore} - ${what} ${type}/${id}`;
|
|
||||||
}
|
|
||||||
return what;
|
|
||||||
},
|
|
||||||
render_datastore_time_worker_id: function(id, what) {
|
|
||||||
const res = id.match(/^(\S+)_([^_\s]+)_([^_\s]+)_([^_\s]+)$/);
|
|
||||||
if (res) {
|
if (res) {
|
||||||
let datastore = res[1], type = res[2], id = res[3];
|
let datastore = res[1], type = res[2], id = res[3];
|
||||||
let datetime = Ext.Date.parse(parseInt(res[4], 16), 'U');
|
if (res[4] !== undefined) {
|
||||||
let utctime = PBS.Utils.render_datetime_utc(datetime);
|
let datetime = Ext.Date.parse(parseInt(res[5], 16), 'U');
|
||||||
return `Datastore ${datastore} - ${what} ${type}/${id}/${utctime}`;
|
let utctime = PBS.Utils.render_datetime_utc(datetime);
|
||||||
|
return `Datastore ${datastore} ${what} ${type}/${id}/${utctime}`;
|
||||||
|
} else {
|
||||||
|
return `Datastore ${datastore} ${what} ${type}/${id}`;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return what;
|
return `Datastore ${what} ${id}`;
|
||||||
},
|
},
|
||||||
|
|
||||||
constructor: function() {
|
constructor: function() {
|
||||||
@ -62,11 +58,14 @@ Ext.define('PBS.Utils', {
|
|||||||
prune: (type, id) => {
|
prune: (type, id) => {
|
||||||
return PBS.Utils.render_datastore_worker_id(id, gettext('Prune'));
|
return PBS.Utils.render_datastore_worker_id(id, gettext('Prune'));
|
||||||
},
|
},
|
||||||
|
verify: (type, id) => {
|
||||||
|
return PBS.Utils.render_datastore_worker_id(id, gettext('Verify'));
|
||||||
|
},
|
||||||
backup: (type, id) => {
|
backup: (type, id) => {
|
||||||
return PBS.Utils.render_datastore_worker_id(id, gettext('Backup'));
|
return PBS.Utils.render_datastore_worker_id(id, gettext('Backup'));
|
||||||
},
|
},
|
||||||
reader: (type, id) => {
|
reader: (type, id) => {
|
||||||
return PBS.Utils.render_datastore_time_worker_id(id, gettext('Read objects'));
|
return PBS.Utils.render_datastore_worker_id(id, gettext('Read objects'));
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
136
www/ZFSList.js
Normal file
136
www/ZFSList.js
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
Ext.define('PBS.admin.ZFSList', {
|
||||||
|
extend: 'Ext.grid.Panel',
|
||||||
|
xtype: 'pbsZFSList',
|
||||||
|
|
||||||
|
stateful: true,
|
||||||
|
stateId: 'grid-node-zfs',
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
openCreateWindow: function() {
|
||||||
|
let me = this;
|
||||||
|
Ext.create('PBS.window.CreateZFS', {
|
||||||
|
nodename: me.nodename,
|
||||||
|
listeners: {
|
||||||
|
destroy: function() { me.reload(); },
|
||||||
|
}
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
|
||||||
|
openDetailWindow: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let selection = view.getSelection();
|
||||||
|
if (!selection || selection.length < 1) return;
|
||||||
|
|
||||||
|
let rec = selection[0];
|
||||||
|
let zpool = rec.get('name');
|
||||||
|
|
||||||
|
Ext.create('Proxmox.window.ZFSDetail', {
|
||||||
|
zpool,
|
||||||
|
nodename: view.nodename,
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
|
||||||
|
reload: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let store = view.getStore();
|
||||||
|
store.load();
|
||||||
|
store.sort();
|
||||||
|
},
|
||||||
|
|
||||||
|
init: function(view) {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
if (!view.nodename) {
|
||||||
|
throw "no nodename given";
|
||||||
|
}
|
||||||
|
|
||||||
|
let url = `/api2/json/nodes/${view.nodename}/disks/zfs`;
|
||||||
|
view.getStore().getProxy().setUrl(url)
|
||||||
|
|
||||||
|
Proxmox.Utils.monStoreErrors(view, view.getStore(), true);
|
||||||
|
|
||||||
|
me.reload();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
text: gettext('Name'),
|
||||||
|
dataIndex: 'name',
|
||||||
|
flex: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Size'),
|
||||||
|
renderer: Proxmox.Utils.format_size,
|
||||||
|
dataIndex: 'size'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Free'),
|
||||||
|
renderer: Proxmox.Utils.format_size,
|
||||||
|
dataIndex: 'free'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Allocated'),
|
||||||
|
renderer: Proxmox.Utils.format_size,
|
||||||
|
dataIndex: 'alloc'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Fragmentation'),
|
||||||
|
renderer: function(value) {
|
||||||
|
return value.toString() + '%';
|
||||||
|
},
|
||||||
|
dataIndex: 'frag'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Health'),
|
||||||
|
renderer: Proxmox.Utils.render_zfs_health,
|
||||||
|
dataIndex: 'health'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: gettext('Deduplication'),
|
||||||
|
hidden: true,
|
||||||
|
renderer: function(value) {
|
||||||
|
return value.toFixed(2).toString() + 'x';
|
||||||
|
},
|
||||||
|
dataIndex: 'dedup'
|
||||||
|
}
|
||||||
|
],
|
||||||
|
|
||||||
|
rootVisible: false,
|
||||||
|
useArrows: true,
|
||||||
|
|
||||||
|
tbar: [
|
||||||
|
{
|
||||||
|
text: gettext('Reload'),
|
||||||
|
iconCls: 'fa fa-refresh',
|
||||||
|
handler: 'reload',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: gettext('Create') + ': ZFS',
|
||||||
|
handler: 'openCreateWindow',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: gettext('Detail'),
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
disabled: true,
|
||||||
|
handler: 'openDetailWindow',
|
||||||
|
}
|
||||||
|
],
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
itemdblclick: 'openDetailWindow',
|
||||||
|
},
|
||||||
|
|
||||||
|
store: {
|
||||||
|
fields: ['name', 'size', 'free', 'alloc', 'dedup', 'frag', 'health'],
|
||||||
|
proxy: {
|
||||||
|
type: 'proxmox',
|
||||||
|
},
|
||||||
|
sorters: 'name'
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
@ -25,7 +25,7 @@ Ext.define('PBS.DataStoreConfig', {
|
|||||||
extend: 'Ext.grid.GridPanel',
|
extend: 'Ext.grid.GridPanel',
|
||||||
alias: 'widget.pbsDataStoreConfig',
|
alias: 'widget.pbsDataStoreConfig',
|
||||||
|
|
||||||
title: gettext('Data Store Configuration'),
|
title: gettext('Datastore Configuration'),
|
||||||
|
|
||||||
controller: {
|
controller: {
|
||||||
xclass: 'Ext.app.ViewController',
|
xclass: 'Ext.app.ViewController',
|
||||||
@ -58,6 +58,27 @@ Ext.define('PBS.DataStoreConfig', {
|
|||||||
}).show();
|
}).show();
|
||||||
},
|
},
|
||||||
|
|
||||||
|
onVerify: function() {
|
||||||
|
var view = this.getView();
|
||||||
|
|
||||||
|
let rec = view.selModel.getSelection()[0];
|
||||||
|
if (!(rec && rec.data)) return;
|
||||||
|
let data = rec.data;
|
||||||
|
|
||||||
|
Proxmox.Utils.API2Request({
|
||||||
|
url: `/admin/datastore/${data.name}/verify`,
|
||||||
|
method: 'POST',
|
||||||
|
failure: function(response) {
|
||||||
|
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
|
||||||
|
},
|
||||||
|
success: function(response, options) {
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
upid: response.result.data,
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
garbageCollect: function() {
|
garbageCollect: function() {
|
||||||
let me = this;
|
let me = this;
|
||||||
let view = me.getView();
|
let view = me.getView();
|
||||||
@ -115,6 +136,12 @@ Ext.define('PBS.DataStoreConfig', {
|
|||||||
},
|
},
|
||||||
// remove_btn
|
// remove_btn
|
||||||
'-',
|
'-',
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
text: gettext('Verify'),
|
||||||
|
disabled: true,
|
||||||
|
handler: 'onVerify',
|
||||||
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxButton',
|
xtype: 'proxmoxButton',
|
||||||
text: gettext('Start GC'),
|
text: gettext('Start GC'),
|
||||||
|
@ -2,7 +2,19 @@ Ext.define('pbs-datastore-statistics', {
|
|||||||
extend: 'Ext.data.Model',
|
extend: 'Ext.data.Model',
|
||||||
|
|
||||||
fields: [
|
fields: [
|
||||||
'store', 'total', 'used', 'avail', 'estimated-full-date', 'history',
|
'store', 'total', 'used', 'avail', 'estimated-full-date',
|
||||||
|
{
|
||||||
|
name: 'history',
|
||||||
|
convert: function(values) {
|
||||||
|
let last = null;
|
||||||
|
return values.map(v => {
|
||||||
|
if (v !== undefined && v !== null) {
|
||||||
|
last = v;
|
||||||
|
}
|
||||||
|
return last;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: 'usage',
|
name: 'usage',
|
||||||
calculate: function(data) {
|
calculate: function(data) {
|
||||||
|
@ -56,10 +56,16 @@ Ext.define('PBS.LongestTasks', {
|
|||||||
type: 'diff',
|
type: 'diff',
|
||||||
autoDestroy: true,
|
autoDestroy: true,
|
||||||
autoDestroyRstore: true,
|
autoDestroyRstore: true,
|
||||||
sorters: {
|
sorters: [
|
||||||
property: 'duration',
|
{
|
||||||
direction: 'DESC',
|
property: 'duration',
|
||||||
},
|
direction: 'DESC',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
property: 'upid',
|
||||||
|
direction: 'ASC',
|
||||||
|
},
|
||||||
|
],
|
||||||
rstore: {
|
rstore: {
|
||||||
storeid: 'proxmox-tasks-dash',
|
storeid: 'proxmox-tasks-dash',
|
||||||
type: 'store',
|
type: 'store',
|
||||||
|
@ -16,7 +16,7 @@ Ext.define('PBS.form.DataStoreSelector', {
|
|||||||
listConfig: {
|
listConfig: {
|
||||||
columns: [
|
columns: [
|
||||||
{
|
{
|
||||||
header: gettext('DataStore'),
|
header: gettext('Datastore'),
|
||||||
sortable: true,
|
sortable: true,
|
||||||
dataIndex: 'store',
|
dataIndex: 'store',
|
||||||
renderer: Ext.String.htmlEncode,
|
renderer: Ext.String.htmlEncode,
|
||||||
|
@ -39,7 +39,7 @@ Ext.define('PBS.window.CreateDirectory', {
|
|||||||
{
|
{
|
||||||
xtype: 'proxmoxcheckbox',
|
xtype: 'proxmoxcheckbox',
|
||||||
name: 'add-datastore',
|
name: 'add-datastore',
|
||||||
fieldLabel: gettext('Add Data Store'),
|
fieldLabel: gettext('Add as Datastore'),
|
||||||
value: '1',
|
value: '1',
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
@ -52,7 +52,7 @@ Ext.define("PBS.window.FileBrowser", {
|
|||||||
extend: "Ext.window.Window",
|
extend: "Ext.window.Window",
|
||||||
|
|
||||||
width: 800,
|
width: 800,
|
||||||
height: 400,
|
height: 600,
|
||||||
|
|
||||||
modal: true,
|
modal: true,
|
||||||
|
|
||||||
@ -142,8 +142,13 @@ Ext.define("PBS.window.FileBrowser", {
|
|||||||
'backup-type': view['backup-type'],
|
'backup-type': view['backup-type'],
|
||||||
'backup-time': view['backup-time'],
|
'backup-time': view['backup-time'],
|
||||||
});
|
});
|
||||||
store.load();
|
store.load(() => {
|
||||||
store.getRoot().expand();
|
let root = store.getRoot();
|
||||||
|
root.expand(); // always expand invisible root node
|
||||||
|
if (root.childNodes.length === 1) {
|
||||||
|
root.firstChild.expand();
|
||||||
|
}
|
||||||
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
control: {
|
control: {
|
||||||
|
95
www/window/ZFSCreate.js
Normal file
95
www/window/ZFSCreate.js
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
Ext.define('PBS.window.CreateZFS', {
|
||||||
|
extend: 'Proxmox.window.Edit',
|
||||||
|
xtype: 'pbsCreateZFS',
|
||||||
|
|
||||||
|
subject: 'ZFS',
|
||||||
|
|
||||||
|
showProgress: true,
|
||||||
|
|
||||||
|
onlineHelp: 'chapter_zfs',
|
||||||
|
|
||||||
|
width: 800,
|
||||||
|
|
||||||
|
url: '/nodes/localhost/disks/zfs',
|
||||||
|
method: 'POST',
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'inputpanel',
|
||||||
|
onGetValues: function(values) {
|
||||||
|
return values;
|
||||||
|
},
|
||||||
|
column1: [
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxtextfield',
|
||||||
|
name: 'name',
|
||||||
|
fieldLabel: gettext('Name'),
|
||||||
|
minLength: 3,
|
||||||
|
allowBlank: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxcheckbox',
|
||||||
|
name: 'add-datastore',
|
||||||
|
fieldLabel: gettext('Add as Datastore'),
|
||||||
|
value: '1'
|
||||||
|
}
|
||||||
|
],
|
||||||
|
column2: [
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxKVComboBox',
|
||||||
|
fieldLabel: gettext('RAID Level'),
|
||||||
|
name: 'raidlevel',
|
||||||
|
value: 'single',
|
||||||
|
comboItems: [
|
||||||
|
['single', gettext('Single Disk')],
|
||||||
|
['mirror', 'Mirror'],
|
||||||
|
['raid10', 'RAID10'],
|
||||||
|
['raidz', 'RAIDZ'],
|
||||||
|
['raidz2', 'RAIDZ2'],
|
||||||
|
['raidz3', 'RAIDZ3']
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxKVComboBox',
|
||||||
|
fieldLabel: gettext('Compression'),
|
||||||
|
name: 'compression',
|
||||||
|
value: 'on',
|
||||||
|
comboItems: [
|
||||||
|
['on', 'on'],
|
||||||
|
['off', 'off'],
|
||||||
|
['gzip', 'gzip'],
|
||||||
|
['lz4', 'lz4'],
|
||||||
|
['lzjb', 'lzjb'],
|
||||||
|
['zle', 'zle']
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'proxmoxintegerfield',
|
||||||
|
fieldLabel: gettext('ashift'),
|
||||||
|
minValue: 9,
|
||||||
|
maxValue: 16,
|
||||||
|
value: '12',
|
||||||
|
name: 'ashift'
|
||||||
|
}
|
||||||
|
],
|
||||||
|
columnB: [
|
||||||
|
{
|
||||||
|
xtype: 'pmxMultiDiskSelector',
|
||||||
|
name: 'devices',
|
||||||
|
nodename: 'localhost',
|
||||||
|
typeParameter: 'usage-type',
|
||||||
|
valueField: 'name',
|
||||||
|
height: 200,
|
||||||
|
emptyText: gettext('No Disks unused'),
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
padding: '5 0 0 0',
|
||||||
|
userCls: 'pmx-hint',
|
||||||
|
value: 'Note: ZFS is not compatible with disks backed by a hardware ' +
|
||||||
|
'RAID controller. For details see ' +
|
||||||
|
'<a target="_blank" href="' + Proxmox.Utils.get_help_link('chapter_zfs') + '">the reference documentation</a>.',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
Reference in New Issue
Block a user