Compare commits
53 Commits
Author | SHA1 | Date | |
---|---|---|---|
c4430a937d | |||
237314ad0d | |||
caf76ec592 | |||
0af8c26b74 | |||
825dfe7e0d | |||
30a0809553 | |||
6ee3035523 | |||
b627ebbf40 | |||
ef4bdf6b8b | |||
54722acada | |||
0e2bf3aa1d | |||
365126efa9 | |||
03d4c9217d | |||
8498290848 | |||
654db565cb | |||
51f83548ed | |||
5847a6bdb5 | |||
313e5e2047 | |||
7914e62b10 | |||
84d3284609 | |||
70fab5b46e | |||
e36135031d | |||
5a5ee0326e | |||
776dabfb2e | |||
5c4755ad08 | |||
7c1666289d | |||
cded320e92 | |||
b31cdec225 | |||
591b120d35 | |||
e8913fea12 | |||
355a41a763 | |||
5bd4825432 | |||
8f7e5b028a | |||
2a29d9a1ee | |||
e056966bc7 | |||
ef0ea4ba05 | |||
2892624783 | |||
2c10410b0d | |||
d1d74c4367 | |||
8b7f3b8f1d | |||
3f6c2efb8d | |||
227f36497a | |||
5ef4c7bcd3 | |||
70d00e0149 | |||
dcf155dac9 | |||
3c5b523631 | |||
6396bace3d | |||
713a128adf | |||
affc224aca | |||
6f82d32977 | |||
2a06e08618 | |||
1057b1f5a5 | |||
af76234112 |
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "1.0.9"
|
version = "1.0.11"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -52,7 +52,7 @@ proxmox = { version = "0.11.0", features = [ "sortable-macro", "api-macro", "web
|
|||||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
proxmox-fuse = "0.1.1"
|
proxmox-fuse = "0.1.1"
|
||||||
pxar = { version = "0.9.0", features = [ "tokio-io" ] }
|
pxar = { version = "0.10.0", features = [ "tokio-io" ] }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "7"
|
rustyline = "7"
|
||||||
|
29
debian/changelog
vendored
29
debian/changelog
vendored
@ -1,3 +1,32 @@
|
|||||||
|
rust-proxmox-backup (1.0.11-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix feature flag logic in pxar create
|
||||||
|
|
||||||
|
* tools/zip: add missing start_disk field for zip64 extension to improve
|
||||||
|
compatibility with some strict archive tools
|
||||||
|
|
||||||
|
* tape: speedup backup by doing read/write in parallel
|
||||||
|
|
||||||
|
* tape: store datastore name in tape archives and media catalog
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 18 Mar 2021 12:36:01 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.10-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape: improve MediaPool allocation by sorting tapes by creation time and
|
||||||
|
label text
|
||||||
|
|
||||||
|
* api: tape backup: continue on vanishing snapshots, as a prune during long
|
||||||
|
running tape backup jobs is OK
|
||||||
|
|
||||||
|
* tape: fix scsi volume_statistics and cartridge_memory for quantum drives
|
||||||
|
|
||||||
|
* typo fixes all over the place
|
||||||
|
|
||||||
|
* d/postinst: restart, not reload, when updating from a to old version
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 11 Mar 2021 08:24:31 +0100
|
||||||
|
|
||||||
rust-proxmox-backup (1.0.9-1) unstable; urgency=medium
|
rust-proxmox-backup (1.0.9-1) unstable; urgency=medium
|
||||||
|
|
||||||
* client: track key source, print when used
|
* client: track key source, print when used
|
||||||
|
4
debian/control
vendored
4
debian/control
vendored
@ -41,8 +41,8 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-proxmox-0.11+sortable-macro-dev,
|
librust-proxmox-0.11+sortable-macro-dev,
|
||||||
librust-proxmox-0.11+websocket-dev,
|
librust-proxmox-0.11+websocket-dev,
|
||||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-pxar-0.9+default-dev,
|
librust-pxar-0.10+default-dev,
|
||||||
librust-pxar-0.9+tokio-io-dev,
|
librust-pxar-0.10+tokio-io-dev,
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
librust-rustyline-7+default-dev,
|
librust-rustyline-7+default-dev,
|
||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
|
15
debian/postinst
vendored
15
debian/postinst
vendored
@ -6,13 +6,21 @@ set -e
|
|||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
configure)
|
configure)
|
||||||
# need to have user backup in the tapoe group
|
# need to have user backup in the tape group
|
||||||
usermod -a -G tape backup
|
usermod -a -G tape backup
|
||||||
|
|
||||||
# modeled after dh_systemd_start output
|
# modeled after dh_systemd_start output
|
||||||
systemctl --system daemon-reload >/dev/null || true
|
systemctl --system daemon-reload >/dev/null || true
|
||||||
if [ -n "$2" ]; then
|
if [ -n "$2" ]; then
|
||||||
|
if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
|
||||||
|
# there was an issue with reloading and systemd being confused in older daemon versions
|
||||||
|
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
|
||||||
|
# FIXME: remove with PBS 2.1
|
||||||
|
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
|
||||||
|
_dh_action=try-restart
|
||||||
|
else
|
||||||
_dh_action=try-reload-or-restart
|
_dh_action=try-reload-or-restart
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
_dh_action=start
|
_dh_action=start
|
||||||
fi
|
fi
|
||||||
@ -40,12 +48,17 @@ case "$1" in
|
|||||||
/etc/proxmox-backup/remote.cfg || true
|
/etc/proxmox-backup/remote.cfg || true
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
# FIXME: remove with 2.0
|
||||||
|
if [ -d "/var/lib/proxmox-backup/tape" ] &&
|
||||||
|
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then
|
||||||
|
chmod 0750 /var/lib/proxmox-backup/tape || true
|
||||||
fi
|
fi
|
||||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||||
echo "Fixing up termproxy user id in task log..."
|
echo "Fixing up termproxy user id in task log..."
|
||||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
abort-upgrade|abort-remove|abort-deconfigure)
|
abort-upgrade|abort-remove|abort-deconfigure)
|
||||||
|
@ -65,10 +65,10 @@ Main Features
|
|||||||
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
||||||
several gigabytes of data per second.
|
several gigabytes of data per second.
|
||||||
|
|
||||||
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
:Encryption: Backups can be encrypted on the client-side, using AES-256 GCM_.
|
||||||
Galois/Counter Mode (GCM_). This authenticated encryption (AE_) mode
|
This authenticated encryption (AE_) mode provides very high performance on
|
||||||
provides very high performance on modern hardware. In addition to client-side
|
modern hardware. In addition to client-side encryption, all data is
|
||||||
encryption, all data is transferred via a secure TLS connection.
|
transferred via a secure TLS connection.
|
||||||
|
|
||||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||||
user interface.
|
user interface.
|
||||||
@ -76,8 +76,16 @@ Main Features
|
|||||||
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||||
software. The source code is licensed under AGPL, v3.
|
software. The source code is licensed under AGPL, v3.
|
||||||
|
|
||||||
:Support: Enterprise support will be available from `Proxmox`_ once the beta
|
:No Limits: Proxmox Backup Server has no artifical limits for backup storage or
|
||||||
phase is over.
|
backup-clients.
|
||||||
|
|
||||||
|
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
||||||
|
form of `Proxmox Backup Server Subscription Plans
|
||||||
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
|
||||||
|
subscription level get access to the Proxmox Backup :ref:`Enterprise
|
||||||
|
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
|
||||||
|
Standard or Premium subscription, users have access to the :ref:`Proxmox
|
||||||
|
Customer Portal <get_help_enterprise_support>`.
|
||||||
|
|
||||||
|
|
||||||
Reasons for Data Backup?
|
Reasons for Data Backup?
|
||||||
@ -117,8 +125,8 @@ Proxmox Backup Server consists of multiple components:
|
|||||||
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||||
any `Linux amd64` environment
|
any `Linux amd64` environment
|
||||||
|
|
||||||
Aside from the web interface, everything is written in the Rust programming
|
Aside from the web interface, most parts of Proxmox Backup Server are written in
|
||||||
language.
|
the Rust programming language.
|
||||||
|
|
||||||
"The Rust programming language helps you write faster, more reliable software.
|
"The Rust programming language helps you write faster, more reliable software.
|
||||||
High-level ergonomics and low-level control are often at odds in programming
|
High-level ergonomics and low-level control are often at odds in programming
|
||||||
@ -134,6 +142,17 @@ language.
|
|||||||
Getting Help
|
Getting Help
|
||||||
------------
|
------------
|
||||||
|
|
||||||
|
.. _get_help_enterprise_support:
|
||||||
|
|
||||||
|
Enterprise Support
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Users with a `Proxmox Backup Server Basic, Standard or Premium Subscription Plan
|
||||||
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_ have access to the
|
||||||
|
Proxmox Customer Portal. The Customer Portal provides support with guaranteed
|
||||||
|
response times from the Proxmox developers.
|
||||||
|
For more information or for volume discounts, please contact office@proxmox.com.
|
||||||
|
|
||||||
Community Support Forum
|
Community Support Forum
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
@ -69,10 +69,12 @@ Here, the output should be:
|
|||||||
|
|
||||||
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
.. _sysadmin_package_repos_enterprise:
|
||||||
|
|
||||||
`Proxmox Backup`_ Enterprise Repository
|
`Proxmox Backup`_ Enterprise Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
This will be the default, stable, and recommended repository. It is available for
|
This is the stable, recommended repository. It is available for
|
||||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||||
enabled by default:
|
enabled by default:
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
All command supports the following parameters to specify the tape device:
|
All commands support the following parameters to specify the tape device:
|
||||||
|
|
||||||
--device <path> Path to the Linux tape device
|
--device <path> Path to the Linux tape device
|
||||||
|
|
||||||
--drive <name> Use drive from Proxmox Backup Server configuration.
|
--drive <name> Use drive from Proxmox Backup Server configuration.
|
||||||
|
|
||||||
|
|
||||||
Commands generating output supports the ``--output-format``
|
Commands which generate output support the ``--output-format``
|
||||||
parameter. It accepts the following values:
|
parameter. It accepts the following values:
|
||||||
|
|
||||||
:``text``: Text format (default). Human readable.
|
:``text``: Text format (default). Human readable.
|
||||||
|
@ -4,7 +4,7 @@ Tape Backup
|
|||||||
===========
|
===========
|
||||||
|
|
||||||
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
||||||
production usage. To enable the GUI, you need to issue the
|
production use. To enable it in the GUI, you need to issue the
|
||||||
following command (as root user on the console):
|
following command (as root user on the console):
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -14,36 +14,36 @@ Tape Backup
|
|||||||
Proxmox tape backup provides an easy way to store datastore content
|
Proxmox tape backup provides an easy way to store datastore content
|
||||||
onto magnetic tapes. This increases data safety because you get:
|
onto magnetic tapes. This increases data safety because you get:
|
||||||
|
|
||||||
- an additional copy of the data
|
- an additional copy of the data,
|
||||||
- to a different media type (tape)
|
- on a different media type (tape),
|
||||||
- to an additional location (you can move tapes off-site)
|
- to an additional location (you can move tapes off-site)
|
||||||
|
|
||||||
In most restore jobs, only data from the last backup job is restored.
|
In most restore jobs, only data from the last backup job is restored.
|
||||||
Restore requests further decline the older the data
|
Restore requests further decline, the older the data
|
||||||
gets. Considering this, tape backup may also help to reduce disk
|
gets. Considering this, tape backup may also help to reduce disk
|
||||||
usage, because you can safely remove data from disk once archived on
|
usage, because you can safely remove data from disk, once it's archived on
|
||||||
tape. This is especially true if you need to keep data for several
|
tape. This is especially true if you need to retain data for several
|
||||||
years.
|
years.
|
||||||
|
|
||||||
Tape backups do not provide random access to the stored data. Instead,
|
Tape backups do not provide random access to the stored data. Instead,
|
||||||
you need to restore the data to disk before you can access it
|
you need to restore the data to disk, before you can access it
|
||||||
again. Also, if you store your tapes off-site (using some kind of tape
|
again. Also, if you store your tapes off-site (using some kind of tape
|
||||||
vaulting service), you need to bring them on-site before you can do any
|
vaulting service), you need to bring them back on-site, before you can do any
|
||||||
restore. So please consider that restores from tapes can take much
|
restores. So please consider that restoring from tape can take much
|
||||||
longer than restores from disk.
|
longer than restoring from disk.
|
||||||
|
|
||||||
|
|
||||||
Tape Technology Primer
|
Tape Technology Primer
|
||||||
----------------------
|
----------------------
|
||||||
|
|
||||||
.. _Linear Tape Open: https://en.wikipedia.org/wiki/Linear_Tape-Open
|
.. _Linear Tape-Open: https://en.wikipedia.org/wiki/Linear_Tape-Open
|
||||||
|
|
||||||
As of 2021, the only broadly available tape technology standard is
|
As of 2021, the only widely available tape technology standard is
|
||||||
`Linear Tape Open`_, and different vendors offers LTO Ultrium tape
|
`Linear Tape-Open`_ (LTO). Different vendors offer LTO Ultrium tape
|
||||||
drives, auto-loaders and LTO tape cartridges.
|
drives, auto-loaders, and LTO tape cartridges.
|
||||||
|
|
||||||
There are a few vendors offering proprietary drives with
|
There are a few vendors that offer proprietary drives with
|
||||||
slight advantages in performance and capacity, but they have
|
slight advantages in performance and capacity. Nevertheless, they have
|
||||||
significant disadvantages:
|
significant disadvantages:
|
||||||
|
|
||||||
- proprietary (single vendor)
|
- proprietary (single vendor)
|
||||||
@ -53,13 +53,13 @@ So we currently do not test such drives.
|
|||||||
|
|
||||||
In general, LTO tapes offer the following advantages:
|
In general, LTO tapes offer the following advantages:
|
||||||
|
|
||||||
- Durable (30 years)
|
- Durability (30 year lifespan)
|
||||||
- High Capacity (12 TB)
|
- High Capacity (12 TB)
|
||||||
- Relatively low cost per TB
|
- Relatively low cost per TB
|
||||||
- Cold Media
|
- Cold Media
|
||||||
- Movable (storable inside vault)
|
- Movable (storable inside vault)
|
||||||
- Multiple vendors (for both media and drives)
|
- Multiple vendors (for both media and drives)
|
||||||
- Build in AES-CGM Encryption engine
|
- Build in AES-GCM Encryption engine
|
||||||
|
|
||||||
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
||||||
tape compression feature has no advantage.
|
tape compression feature has no advantage.
|
||||||
@ -68,41 +68,40 @@ tape compression feature has no advantage.
|
|||||||
Supported Hardware
|
Supported Hardware
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
Proxmox Backup Server supports `Linear Tape Open`_ generation 4 (LTO4)
|
Proxmox Backup Server supports `Linear Tape-Open`_ generation 4 (LTO-4)
|
||||||
or later. In general, all SCSI2 tape drives supported by the Linux
|
or later. In general, all SCSI-2 tape drives supported by the Linux
|
||||||
kernel should work, but feature like hardware encryptions needs LTO4
|
kernel should work, but features like hardware encryption need LTO-4
|
||||||
or later.
|
or later.
|
||||||
|
|
||||||
Tape changer support is done using the Linux 'mtx' command line
|
Tape changing is carried out using the Linux 'mtx' command line
|
||||||
tool. So any changer device supported by that tool should work.
|
tool, so any changer device supported by this tool should work.
|
||||||
|
|
||||||
|
|
||||||
Drive Performance
|
Drive Performance
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Current LTO-8 tapes provide read/write speeds up to 360 MB/s. This means,
|
Current LTO-8 tapes provide read/write speeds of up to 360 MB/s. This means,
|
||||||
that it still takes a minimum of 9 hours to completely write or
|
that it still takes a minimum of 9 hours to completely write or
|
||||||
read a single tape (even at maximum speed).
|
read a single tape (even at maximum speed).
|
||||||
|
|
||||||
The only way to speed up that data rate is to use more than one
|
The only way to speed up that data rate is to use more than one
|
||||||
drive. That way you can run several backup jobs in parallel, or run
|
drive. That way, you can run several backup jobs in parallel, or run
|
||||||
restore jobs while the other dives are used for backups.
|
restore jobs while the other dives are used for backups.
|
||||||
|
|
||||||
Also consider that you need to read data first from your datastore
|
Also consider that you first need to read data from your datastore
|
||||||
(disk). But a single spinning disk is unable to deliver data at this
|
(disk). However, a single spinning disk is unable to deliver data at this
|
||||||
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
||||||
so it takes 33 hours to read 12TB to fill up an LTO-8 tape. If you want
|
so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you want
|
||||||
to run your tape at full speed, please make sure that the source
|
to write to your tape at full speed, please make sure that the source
|
||||||
datastore is able to deliver that performance (e.g, by using SSDs).
|
datastore is able to deliver that performance (e.g, by using SSDs).
|
||||||
|
|
||||||
|
|
||||||
Terminology
|
Terminology
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
:Tape Labels: are used to uniquely identify a tape. You normally use
|
:Tape Labels: are used to uniquely identify a tape. You would normally apply a
|
||||||
some sticky paper labels and apply them on the front of the
|
sticky paper label to the front of the cartridge. We additionally store the
|
||||||
cartridge. We additionally store the label text magnetically on the
|
label text magnetically on the tape (first file on tape).
|
||||||
tape (first file on tape).
|
|
||||||
|
|
||||||
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
||||||
|
|
||||||
@ -116,10 +115,10 @@ Terminology
|
|||||||
Specification`_.
|
Specification`_.
|
||||||
|
|
||||||
You can either buy such barcode labels from your cartridge vendor,
|
You can either buy such barcode labels from your cartridge vendor,
|
||||||
or print them yourself. You can use our `LTO Barcode Generator`_ App
|
or print them yourself. You can use our `LTO Barcode Generator`_
|
||||||
for that.
|
app, if you would like to print them yourself.
|
||||||
|
|
||||||
.. Note:: Physical labels and the associated adhesive shall have an
|
.. Note:: Physical labels and the associated adhesive should have an
|
||||||
environmental performance to match or exceed the environmental
|
environmental performance to match or exceed the environmental
|
||||||
specifications of the cartridge to which it is applied.
|
specifications of the cartridge to which it is applied.
|
||||||
|
|
||||||
@ -133,7 +132,7 @@ Terminology
|
|||||||
media pool).
|
media pool).
|
||||||
|
|
||||||
:Tape drive: The device used to read and write data to the tape. There
|
:Tape drive: The device used to read and write data to the tape. There
|
||||||
are standalone drives, but drives often ship within tape libraries.
|
are standalone drives, but drives are usually shipped within tape libraries.
|
||||||
|
|
||||||
:Tape changer: A device which can change the tapes inside a tape drive
|
:Tape changer: A device which can change the tapes inside a tape drive
|
||||||
(tape robot). They are usually part of a tape library.
|
(tape robot). They are usually part of a tape library.
|
||||||
@ -142,10 +141,10 @@ Terminology
|
|||||||
|
|
||||||
:`Tape library`_: A storage device that contains one or more tape drives,
|
:`Tape library`_: A storage device that contains one or more tape drives,
|
||||||
a number of slots to hold tape cartridges, a barcode reader to
|
a number of slots to hold tape cartridges, a barcode reader to
|
||||||
identify tape cartridges and an automated method for loading tapes
|
identify tape cartridges, and an automated method for loading tapes
|
||||||
(a robot).
|
(a robot).
|
||||||
|
|
||||||
This is also commonly known as 'autoloader', 'tape robot' or 'tape jukebox'.
|
This is also commonly known as an 'autoloader', 'tape robot' or 'tape jukebox'.
|
||||||
|
|
||||||
:Inventory: The inventory stores the list of known tapes (with
|
:Inventory: The inventory stores the list of known tapes (with
|
||||||
additional status information).
|
additional status information).
|
||||||
@ -153,14 +152,14 @@ Terminology
|
|||||||
:Catalog: A media catalog stores information about the media content.
|
:Catalog: A media catalog stores information about the media content.
|
||||||
|
|
||||||
|
|
||||||
Tape Quickstart
|
Tape Quick Start
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
1. Configure your tape hardware (drives and changers)
|
1. Configure your tape hardware (drives and changers)
|
||||||
|
|
||||||
2. Configure one or more media pools
|
2. Configure one or more media pools
|
||||||
|
|
||||||
3. Label your tape cartridges.
|
3. Label your tape cartridges
|
||||||
|
|
||||||
4. Start your first tape backup job ...
|
4. Start your first tape backup job ...
|
||||||
|
|
||||||
@ -169,7 +168,7 @@ Configuration
|
|||||||
-------------
|
-------------
|
||||||
|
|
||||||
Please note that you can configure anything using the graphical user
|
Please note that you can configure anything using the graphical user
|
||||||
interface or the command line interface. Both methods results in the
|
interface or the command line interface. Both methods result in the
|
||||||
same configuration.
|
same configuration.
|
||||||
|
|
||||||
.. _tape_changer_config:
|
.. _tape_changer_config:
|
||||||
@ -180,7 +179,7 @@ Tape changers
|
|||||||
Tape changers (robots) are part of a `Tape Library`_. You can skip
|
Tape changers (robots) are part of a `Tape Library`_. You can skip
|
||||||
this step if you are using a standalone drive.
|
this step if you are using a standalone drive.
|
||||||
|
|
||||||
Linux is able to auto detect those devices, and you can get a list
|
Linux is able to auto detect these devices, and you can get a list
|
||||||
of available devices using:
|
of available devices using:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -192,7 +191,7 @@ of available devices using:
|
|||||||
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||||
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
||||||
|
|
||||||
In order to use that device with Proxmox, you need to create a
|
In order to use a device with Proxmox Backup Server, you need to create a
|
||||||
configuration entry:
|
configuration entry:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -201,11 +200,11 @@ configuration entry:
|
|||||||
|
|
||||||
Where ``sl3`` is an arbitrary name you can choose.
|
Where ``sl3`` is an arbitrary name you can choose.
|
||||||
|
|
||||||
.. Note:: Please use stable device path names from inside
|
.. Note:: Please use the persistent device path names from inside
|
||||||
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||||
different device after reboot, and that is not what you want.
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
You can show the final configuration with:
|
You can display the final configuration with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -255,12 +254,12 @@ Tape libraries usually provide some special import/export slots (also
|
|||||||
called "mail slots"). Tapes inside those slots are accessible from
|
called "mail slots"). Tapes inside those slots are accessible from
|
||||||
outside, making it easy to add/remove tapes to/from the library. Those
|
outside, making it easy to add/remove tapes to/from the library. Those
|
||||||
tapes are considered to be "offline", so backup jobs will not use
|
tapes are considered to be "offline", so backup jobs will not use
|
||||||
them. Those special slots are auto-detected and marked as
|
them. Those special slots are auto-detected and marked as an
|
||||||
``import-export`` slot in the status command.
|
``import-export`` slot in the status command.
|
||||||
|
|
||||||
It's worth noting that some of the smaller tape libraries don't have
|
It's worth noting that some of the smaller tape libraries don't have
|
||||||
such slots. While they have something called "Mail Slot", that slot
|
such slots. While they have something called a "Mail Slot", that slot
|
||||||
is just a way to grab the tape from the gripper. But they are unable
|
is just a way to grab the tape from the gripper. They are unable
|
||||||
to hold media while the robot does other things. They also do not
|
to hold media while the robot does other things. They also do not
|
||||||
expose that "Mail Slot" over the SCSI interface, so you wont see them in
|
expose that "Mail Slot" over the SCSI interface, so you wont see them in
|
||||||
the status output.
|
the status output.
|
||||||
@ -322,7 +321,7 @@ configuration entry:
|
|||||||
|
|
||||||
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
||||||
|
|
||||||
.. Note:: Please use stable device path names from inside
|
.. Note:: Please use the persistent device path names from inside
|
||||||
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
|
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
|
||||||
different device after reboot, and that is not what you want.
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
@ -334,10 +333,10 @@ changer device:
|
|||||||
# proxmox-tape drive update mydrive --changer sl3 --changer-drivenum 0
|
# proxmox-tape drive update mydrive --changer sl3 --changer-drivenum 0
|
||||||
|
|
||||||
The ``--changer-drivenum`` is only necessary if the tape library
|
The ``--changer-drivenum`` is only necessary if the tape library
|
||||||
includes more than one drive (The changer status command lists all
|
includes more than one drive (the changer status command lists all
|
||||||
drive numbers).
|
drive numbers).
|
||||||
|
|
||||||
You can show the final configuration with:
|
You can display the final configuration with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -353,7 +352,7 @@ You can show the final configuration with:
|
|||||||
└─────────┴────────────────────────────────┘
|
└─────────┴────────────────────────────────┘
|
||||||
|
|
||||||
.. NOTE:: The ``changer-drivenum`` value 0 is not stored in the
|
.. NOTE:: The ``changer-drivenum`` value 0 is not stored in the
|
||||||
configuration, because that is the default.
|
configuration, because it is the default.
|
||||||
|
|
||||||
To list all configured drives use:
|
To list all configured drives use:
|
||||||
|
|
||||||
@ -383,7 +382,7 @@ For testing, you can simply query the drive status with:
|
|||||||
└───────────┴────────────────────────┘
|
└───────────┴────────────────────────┘
|
||||||
|
|
||||||
.. NOTE:: Blocksize should always be 0 (variable block size
|
.. NOTE:: Blocksize should always be 0 (variable block size
|
||||||
mode). This is the default anyways.
|
mode). This is the default anyway.
|
||||||
|
|
||||||
|
|
||||||
.. _tape_media_pool_config:
|
.. _tape_media_pool_config:
|
||||||
@ -399,11 +398,11 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
A media set is a group of continuously written tapes, used to split
|
A media set is a group of continuously written tapes, used to split
|
||||||
the larger pool into smaller, restorable units. One or more backup
|
the larger pool into smaller, restorable units. One or more backup
|
||||||
jobs write to a media set, producing an ordered group of
|
jobs write to a media set, producing an ordered group of
|
||||||
tapes. Media sets are identified by an unique ID. That ID and the
|
tapes. Media sets are identified by a unique ID. That ID and the
|
||||||
sequence number is stored on each tape of that set (tape label).
|
sequence number are stored on each tape of that set (tape label).
|
||||||
|
|
||||||
Media sets are the basic unit for restore tasks, i.e. you need all
|
Media sets are the basic unit for restore tasks. This means that you need
|
||||||
tapes in the set to restore the media set content. Data is fully
|
every tape in the set to restore the media set contents. Data is fully
|
||||||
deduplicated inside a media set.
|
deduplicated inside a media set.
|
||||||
|
|
||||||
|
|
||||||
@ -414,20 +413,20 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
|
|
||||||
- Try to use the current media set.
|
- Try to use the current media set.
|
||||||
|
|
||||||
This setting produce one large media set. While this is very
|
This setting produces one large media set. While this is very
|
||||||
space efficient (deduplication, no unused space), it can lead to
|
space efficient (deduplication, no unused space), it can lead to
|
||||||
long restore times, because restore jobs needs to read all tapes in the
|
long restore times, because restore jobs need to read all tapes in the
|
||||||
set.
|
set.
|
||||||
|
|
||||||
.. NOTE:: Data is fully deduplicated inside a media set. That
|
.. NOTE:: Data is fully deduplicated inside a media set. This
|
||||||
also means that data is randomly distributed over the tapes in
|
also means that data is randomly distributed over the tapes in
|
||||||
the set. So even if you restore a single VM, this may have to
|
the set. Thus, even if you restore a single VM, data may have to be
|
||||||
read data from all tapes inside the media set.
|
read from all tapes inside the media set.
|
||||||
|
|
||||||
Larger media sets are also more error prone, because a single
|
Larger media sets are also more error-prone, because a single
|
||||||
damaged media makes the restore fail.
|
damaged tape makes the restore fail.
|
||||||
|
|
||||||
Usage scenario: Mostly used with tape libraries, and you manually
|
Usage scenario: Mostly used with tape libraries. You manually
|
||||||
trigger new set creation by running a backup job with the
|
trigger new set creation by running a backup job with the
|
||||||
``--export`` option.
|
``--export`` option.
|
||||||
|
|
||||||
@ -436,13 +435,13 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
|
|
||||||
- Always create a new media set.
|
- Always create a new media set.
|
||||||
|
|
||||||
With this setting each backup job creates a new media set. This
|
With this setting, each backup job creates a new media set. This
|
||||||
is less space efficient, because the last media from the last set
|
is less space efficient, because the media from the last set
|
||||||
may not be fully written, leaving the remaining space unused.
|
may not be fully written, leaving the remaining space unused.
|
||||||
|
|
||||||
The advantage is that this procudes media sets of minimal
|
The advantage is that this procudes media sets of minimal
|
||||||
size. Small set are easier to handle, you can move sets to an
|
size. Small sets are easier to handle, can be moved more conveniently
|
||||||
off-site vault, and restore is much faster.
|
to an off-site vault, and can be restored much faster.
|
||||||
|
|
||||||
.. NOTE:: Retention period starts with the creation time of the
|
.. NOTE:: Retention period starts with the creation time of the
|
||||||
media set.
|
media set.
|
||||||
@ -468,11 +467,11 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
|
|
||||||
- Current set contains damaged or retired tapes.
|
- Current set contains damaged or retired tapes.
|
||||||
|
|
||||||
- Media pool encryption changed
|
- Media pool encryption has changed
|
||||||
|
|
||||||
- Database consistency errors, e.g. if the inventory does not
|
- Database consistency errors, for example, if the inventory does not
|
||||||
contain required media info, or contain conflicting infos
|
contain the required media information, or it contains conflicting
|
||||||
(outdated data).
|
information (outdated data).
|
||||||
|
|
||||||
.. topic:: Retention Policy
|
.. topic:: Retention Policy
|
||||||
|
|
||||||
@ -489,26 +488,27 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
|
|
||||||
.. topic:: Hardware Encryption
|
.. topic:: Hardware Encryption
|
||||||
|
|
||||||
LTO4 (or later) tape drives support hardware encryption. If you
|
LTO-4 (or later) tape drives support hardware encryption. If you
|
||||||
configure the media pool to use encryption, all data written to the
|
configure the media pool to use encryption, all data written to the
|
||||||
tapes is encrypted using the configured key.
|
tapes is encrypted using the configured key.
|
||||||
|
|
||||||
That way, unauthorized users cannot read data from the media,
|
This way, unauthorized users cannot read data from the media,
|
||||||
e.g. if you loose a media while shipping to an offsite location.
|
for example, if you loose a tape while shipping to an offsite location.
|
||||||
|
|
||||||
.. Note:: If the backup client also encrypts data, data on tape
|
.. Note:: If the backup client also encrypts data, data on the tape
|
||||||
will be double encrypted.
|
will be double encrypted.
|
||||||
|
|
||||||
The password protected key is stored on each media, so it is
|
The password protected key is stored on each medium, so that it is
|
||||||
possbible to `restore the key <tape_restore_encryption_key_>`_ using the password. Please make sure
|
possbible to `restore the key <tape_restore_encryption_key_>`_ using
|
||||||
you remember the password in case you need to restore the key.
|
the password. Please make sure to remember the password, in case
|
||||||
|
you need to restore the key.
|
||||||
|
|
||||||
|
|
||||||
.. NOTE:: We use global content namespace, i.e. we do not store the
|
.. NOTE:: We use global content namespace, meaning we do not store the
|
||||||
source datastore, so it is impossible to distinguish store1:/vm/100
|
source datastore name. Because of this, it is impossible to distinguish
|
||||||
from store2:/vm/100. Please use different media pools if the
|
store1:/vm/100 from store2:/vm/100. Please use different media pools
|
||||||
sources are from different name spaces with conflicting names
|
if the sources are from different namespaces with conflicting names
|
||||||
(E.g. if the sources are from different Proxmox VE clusters).
|
(for example, if the sources are from different Proxmox VE clusters).
|
||||||
|
|
||||||
|
|
||||||
The following command creates a new media pool:
|
The following command creates a new media pool:
|
||||||
@ -520,7 +520,7 @@ The following command creates a new media pool:
|
|||||||
# proxmox-tape pool create daily --drive mydrive
|
# proxmox-tape pool create daily --drive mydrive
|
||||||
|
|
||||||
|
|
||||||
Additional option can be set later using the update command:
|
Additional option can be set later, using the update command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -544,8 +544,8 @@ Tape Backup Jobs
|
|||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To automate tape backup, you can configure tape backup jobs which
|
To automate tape backup, you can configure tape backup jobs which
|
||||||
store datastore content to a media pool at a specific time
|
write datastore content to a media pool, based on a specific time schedule.
|
||||||
schedule. Required settings are:
|
The required settings are:
|
||||||
|
|
||||||
- ``store``: The datastore you want to backup
|
- ``store``: The datastore you want to backup
|
||||||
|
|
||||||
@ -564,14 +564,14 @@ use:
|
|||||||
# proxmox-tape backup-job create job2 --store vmstore1 \
|
# proxmox-tape backup-job create job2 --store vmstore1 \
|
||||||
--pool yourpool --drive yourdrive --schedule daily
|
--pool yourpool --drive yourdrive --schedule daily
|
||||||
|
|
||||||
Backup includes all snapshot from a backup group by default. You can
|
The backup includes all snapshots from a backup group by default. You can
|
||||||
set the ``latest-only`` flag to include only the latest snapshots:
|
set the ``latest-only`` flag to include only the latest snapshots:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape backup-job update job2 --latest-only
|
# proxmox-tape backup-job update job2 --latest-only
|
||||||
|
|
||||||
Backup jobs can use email to send tape requests notifications or
|
Backup jobs can use email to send tape request notifications or
|
||||||
report errors. You can set the notification user with:
|
report errors. You can set the notification user with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -581,7 +581,7 @@ report errors. You can set the notification user with:
|
|||||||
.. Note:: The email address is a property of the user (see :ref:`user_mgmt`).
|
.. Note:: The email address is a property of the user (see :ref:`user_mgmt`).
|
||||||
|
|
||||||
It is sometimes useful to eject the tape from the drive after a
|
It is sometimes useful to eject the tape from the drive after a
|
||||||
backup. For a standalone drive, the ``eject-media`` option eject the
|
backup. For a standalone drive, the ``eject-media`` option ejects the
|
||||||
tape, making sure that the following backup cannot use the tape
|
tape, making sure that the following backup cannot use the tape
|
||||||
(unless someone manually loads the tape again). For tape libraries,
|
(unless someone manually loads the tape again). For tape libraries,
|
||||||
this option unloads the tape to a free slot, which provides better
|
this option unloads the tape to a free slot, which provides better
|
||||||
@ -591,9 +591,9 @@ dust protection than inside a drive:
|
|||||||
|
|
||||||
# proxmox-tape backup-job update job2 --eject-media
|
# proxmox-tape backup-job update job2 --eject-media
|
||||||
|
|
||||||
.. Note:: For failed jobs, the tape remain in the drive.
|
.. Note:: For failed jobs, the tape remains in the drive.
|
||||||
|
|
||||||
For tape libraries, the ``export-media`` options moves all tapes from
|
For tape libraries, the ``export-media`` option moves all tapes from
|
||||||
the media set to an export slot, making sure that the following backup
|
the media set to an export slot, making sure that the following backup
|
||||||
cannot use the tapes. An operator can pick up those tapes and move them
|
cannot use the tapes. An operator can pick up those tapes and move them
|
||||||
to a vault.
|
to a vault.
|
||||||
@ -622,9 +622,9 @@ To remove a job, please use:
|
|||||||
Administration
|
Administration
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
Many sub-command of the ``proxmox-tape`` command line tools take a
|
Many sub-commands of the ``proxmox-tape`` command line tools take a
|
||||||
parameter called ``--drive``, which specifies the tape drive you want
|
parameter called ``--drive``, which specifies the tape drive you want
|
||||||
to work on. For convenience, you can set that in an environment
|
to work on. For convenience, you can set this in an environment
|
||||||
variable:
|
variable:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -639,27 +639,27 @@ parameter from commands that needs a changer device, for example:
|
|||||||
|
|
||||||
# proxmox-tape changer status
|
# proxmox-tape changer status
|
||||||
|
|
||||||
Should displays the changer status of the changer device associated with
|
should display the changer status of the changer device associated with
|
||||||
drive ``mydrive``.
|
drive ``mydrive``.
|
||||||
|
|
||||||
|
|
||||||
Label Tapes
|
Label Tapes
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
By default, tape cartidges all looks the same, so you need to put a
|
By default, tape cartridges all look the same, so you need to put a
|
||||||
label on them for unique identification. So first, put a sticky paper
|
label on them for unique identification. First, put a sticky paper
|
||||||
label with some human readable text on the cartridge.
|
label with some human readable text on the cartridge.
|
||||||
|
|
||||||
If you use a `Tape Library`_, you should use an 8 character string
|
If you use a `Tape Library`_, you should use an 8 character string
|
||||||
encoded as `Code 39`_, as definded in the `LTO Ultrium Cartridge Label
|
encoded as `Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
||||||
Specification`_. You can either bye such barcode labels from your
|
Specification`_. You can either buy such barcode labels from your
|
||||||
cartidge vendor, or print them yourself. You can use our `LTO Barcode
|
cartridge vendor, or print them yourself. You can use our `LTO Barcode
|
||||||
Generator`_ App for that.
|
Generator`_ app to print them.
|
||||||
|
|
||||||
Next, you need to write that same label text to the tape, so that the
|
Next, you need to write that same label text to the tape, so that the
|
||||||
software can uniquely identify the tape too.
|
software can uniquely identify the tape too.
|
||||||
|
|
||||||
For a standalone drive, manually insert the new tape cartidge into the
|
For a standalone drive, manually insert the new tape cartridge into the
|
||||||
drive and run:
|
drive and run:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -668,7 +668,7 @@ drive and run:
|
|||||||
|
|
||||||
You may omit the ``--pool`` argument to allow the tape to be used by any pool.
|
You may omit the ``--pool`` argument to allow the tape to be used by any pool.
|
||||||
|
|
||||||
.. Note:: For safety reasons, this command fails if the tape contain
|
.. Note:: For safety reasons, this command fails if the tape contains
|
||||||
any data. If you want to overwrite it anyway, erase the tape first.
|
any data. If you want to overwrite it anyway, erase the tape first.
|
||||||
|
|
||||||
You can verify success by reading back the label:
|
You can verify success by reading back the label:
|
||||||
@ -718,7 +718,7 @@ The following options are available:
|
|||||||
--eject-media Eject media upon job completion.
|
--eject-media Eject media upon job completion.
|
||||||
|
|
||||||
It is normally good practice to eject the tape after use. This unmounts the
|
It is normally good practice to eject the tape after use. This unmounts the
|
||||||
tape from the drive and prevents the tape from getting dirty with dust.
|
tape from the drive and prevents the tape from getting dusty.
|
||||||
|
|
||||||
--export-media-set Export media set upon job completion.
|
--export-media-set Export media set upon job completion.
|
||||||
|
|
||||||
@ -737,7 +737,7 @@ catalogs, you need to restore them first. Please note that you need
|
|||||||
the catalog to find your data, but restoring a complete media-set does
|
the catalog to find your data, but restoring a complete media-set does
|
||||||
not need media catalogs.
|
not need media catalogs.
|
||||||
|
|
||||||
The following command shows the media content (from catalog):
|
The following command lists the media content (from catalog):
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -841,7 +841,7 @@ database. Further restore jobs automatically use any available key.
|
|||||||
Tape Cleaning
|
Tape Cleaning
|
||||||
~~~~~~~~~~~~~
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
LTO tape drives requires regular cleaning. This is done by loading a
|
LTO tape drives require regular cleaning. This is done by loading a
|
||||||
cleaning cartridge into the drive, which is a manual task for
|
cleaning cartridge into the drive, which is a manual task for
|
||||||
standalone drives.
|
standalone drives.
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ fn get_tfa_entry(userid: Userid, id: String) -> Result<TypedTfaInfo, Error> {
|
|||||||
|
|
||||||
if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) {
|
if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) {
|
||||||
match {
|
match {
|
||||||
// scope to prevent the temprary iter from borrowing across the whole match
|
// scope to prevent the temporary iter from borrowing across the whole match
|
||||||
let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id);
|
let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id);
|
||||||
entry.map(|(ty, index, _)| (ty, index))
|
entry.map(|(ty, index, _)| (ty, index))
|
||||||
} {
|
} {
|
||||||
@ -259,7 +259,7 @@ fn delete_tfa(
|
|||||||
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
|
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
|
||||||
|
|
||||||
match {
|
match {
|
||||||
// scope to prevent the temprary iter from borrowing across the whole match
|
// scope to prevent the temporary iter from borrowing across the whole match
|
||||||
let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id);
|
let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id);
|
||||||
entry.map(|(ty, index, _)| (ty, index))
|
entry.map(|(ty, index, _)| (ty, index))
|
||||||
} {
|
} {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//! Datastore Syncronization Job Management
|
//! Datastore Synchronization Job Management
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
@ -119,7 +119,7 @@ pub fn change_passphrase(
|
|||||||
let kdf = kdf.unwrap_or_default();
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
if let Kdf::None = kdf {
|
if let Kdf::None = kdf {
|
||||||
bail!("Please specify a key derivation funktion (none is not allowed here).");
|
bail!("Please specify a key derivation function (none is not allowed here).");
|
||||||
}
|
}
|
||||||
|
|
||||||
let _lock = open_file_locked(
|
let _lock = open_file_locked(
|
||||||
@ -187,7 +187,7 @@ pub fn create_key(
|
|||||||
let kdf = kdf.unwrap_or_default();
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
if let Kdf::None = kdf {
|
if let Kdf::None = kdf {
|
||||||
bail!("Please specify a key derivation funktion (none is not allowed here).");
|
bail!("Please specify a key derivation function (none is not allowed here).");
|
||||||
}
|
}
|
||||||
|
|
||||||
let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?;
|
let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?;
|
||||||
|
@ -85,7 +85,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
notify: {
|
notify: {
|
||||||
type: bool,
|
type: bool,
|
||||||
description: r#"Send notification mail about new package updates availanle to the
|
description: r#"Send notification mail about new package updates available to the
|
||||||
email address configured for 'root@pam')."#,
|
email address configured for 'root@pam')."#,
|
||||||
default: false,
|
default: false,
|
||||||
optional: true,
|
optional: true,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::{Mutex, Arc};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@ -16,6 +16,7 @@ use proxmox::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
task_log,
|
task_log,
|
||||||
|
task_warn,
|
||||||
config::{
|
config::{
|
||||||
self,
|
self,
|
||||||
cached_user_info::CachedUserInfo,
|
cached_user_info::CachedUserInfo,
|
||||||
@ -42,6 +43,7 @@ use crate::{
|
|||||||
DataStore,
|
DataStore,
|
||||||
BackupDir,
|
BackupDir,
|
||||||
BackupInfo,
|
BackupInfo,
|
||||||
|
StoreProgress,
|
||||||
},
|
},
|
||||||
api2::types::{
|
api2::types::{
|
||||||
Authid,
|
Authid,
|
||||||
@ -389,32 +391,63 @@ fn backup_worker(
|
|||||||
|
|
||||||
group_list.sort_unstable();
|
group_list.sort_unstable();
|
||||||
|
|
||||||
|
let group_count = group_list.len();
|
||||||
|
task_log!(worker, "found {} groups", group_count);
|
||||||
|
|
||||||
|
let mut progress = StoreProgress::new(group_count as u64);
|
||||||
|
|
||||||
let latest_only = setup.latest_only.unwrap_or(false);
|
let latest_only = setup.latest_only.unwrap_or(false);
|
||||||
|
|
||||||
if latest_only {
|
if latest_only {
|
||||||
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
||||||
}
|
}
|
||||||
|
|
||||||
for group in group_list {
|
let datastore_name = datastore.name();
|
||||||
|
|
||||||
|
let mut errors = false;
|
||||||
|
|
||||||
|
for (group_number, group) in group_list.into_iter().enumerate() {
|
||||||
|
progress.done_groups = group_number as u64;
|
||||||
|
progress.done_snapshots = 0;
|
||||||
|
progress.group_snapshots = 0;
|
||||||
|
|
||||||
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
||||||
|
|
||||||
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
||||||
|
|
||||||
if latest_only {
|
if latest_only {
|
||||||
|
progress.group_snapshots = 1;
|
||||||
if let Some(info) = snapshot_list.pop() {
|
if let Some(info) = snapshot_list.pop() {
|
||||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||||
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
task_log!(worker, "backup snapshot {}", info.backup_dir);
|
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
||||||
backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
|
errors = true;
|
||||||
|
}
|
||||||
|
progress.done_snapshots = 1;
|
||||||
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"percentage done: {}",
|
||||||
|
progress
|
||||||
|
);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for info in snapshot_list {
|
progress.group_snapshots = snapshot_list.len() as u64;
|
||||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
||||||
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||||
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
task_log!(worker, "backup snapshot {}", info.backup_dir);
|
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
||||||
backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
|
errors = true;
|
||||||
|
}
|
||||||
|
progress.done_snapshots = snapshot_number as u64 + 1;
|
||||||
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"percentage done: {}",
|
||||||
|
progress
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -427,6 +460,10 @@ fn backup_worker(
|
|||||||
pool_writer.eject_media(worker)?;
|
pool_writer.eject_media(worker)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if errors {
|
||||||
|
bail!("Tape backup finished with some errors. Please check the task log.");
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -460,39 +497,61 @@ pub fn backup_snapshot(
|
|||||||
pool_writer: &mut PoolWriter,
|
pool_writer: &mut PoolWriter,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
snapshot: BackupDir,
|
snapshot: BackupDir,
|
||||||
) -> Result<(), Error> {
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
task_log!(worker, "start backup {}:{}", datastore.name(), snapshot);
|
task_log!(worker, "backup snapshot {}", snapshot);
|
||||||
|
|
||||||
let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?;
|
let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) {
|
||||||
|
Ok(reader) => reader,
|
||||||
|
Err(err) => {
|
||||||
|
// ignore missing snapshots and continue
|
||||||
|
task_warn!(worker, "failed opening snapshot '{}': {}", snapshot, err);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
|
let snapshot_reader = Arc::new(Mutex::new(snapshot_reader));
|
||||||
|
|
||||||
|
let (reader_thread, chunk_iter) = pool_writer.spawn_chunk_reader_thread(
|
||||||
|
datastore.clone(),
|
||||||
|
snapshot_reader.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut chunk_iter = chunk_iter.peekable();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
// test is we have remaining chunks
|
// test is we have remaining chunks
|
||||||
if chunk_iter.peek().is_none() {
|
match chunk_iter.peek() {
|
||||||
break;
|
None => break,
|
||||||
|
Some(Ok(_)) => { /* Ok */ },
|
||||||
|
Some(Err(err)) => bail!("{}", err),
|
||||||
}
|
}
|
||||||
|
|
||||||
let uuid = pool_writer.load_writable_media(worker)?;
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
|
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?;
|
||||||
|
|
||||||
if leom {
|
if leom {
|
||||||
pool_writer.set_media_status_full(&uuid)?;
|
pool_writer.set_media_status_full(&uuid)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Err(_) = reader_thread.join() {
|
||||||
|
bail!("chunk reader thread failed");
|
||||||
|
}
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
let uuid = pool_writer.load_writable_media(worker)?;
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
|
let snapshot_reader = snapshot_reader.lock().unwrap();
|
||||||
|
|
||||||
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
||||||
|
|
||||||
if !done {
|
if !done {
|
||||||
@ -511,5 +570,5 @@ pub fn backup_snapshot(
|
|||||||
|
|
||||||
task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
|
task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
|
||||||
|
|
||||||
Ok(())
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
@ -220,7 +220,7 @@ pub async fn load_slot(drive: String, source_slot: u64) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
description: "The import-export slot number the media was transfered to.",
|
description: "The import-export slot number the media was transferred to.",
|
||||||
type: u64,
|
type: u64,
|
||||||
minimum: 1,
|
minimum: 1,
|
||||||
},
|
},
|
||||||
@ -782,7 +782,7 @@ pub fn clean_drive(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.log("Drive cleaned sucessfully");
|
worker.log("Drive cleaned successfully");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
@ -943,7 +943,7 @@ pub fn update_inventory(
|
|||||||
}
|
}
|
||||||
Ok((Some(media_id), _key_config)) => {
|
Ok((Some(media_id), _key_config)) => {
|
||||||
if label_text != media_id.label.label_text {
|
if label_text != media_id.label.label_text {
|
||||||
worker.warn(format!("label text missmatch ({} != {})", label_text, media_id.label.label_text));
|
worker.warn(format!("label text mismatch ({} != {})", label_text, media_id.label.label_text));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid));
|
worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid));
|
||||||
@ -1012,7 +1012,10 @@ fn barcode_label_media_worker(
|
|||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let (mut changer, changer_name) = required_media_changer(drive_config, &drive)?;
|
let (mut changer, changer_name) = required_media_changer(drive_config, &drive)?;
|
||||||
|
|
||||||
let label_text_list = changer.online_media_label_texts()?;
|
let mut label_text_list = changer.online_media_label_texts()?;
|
||||||
|
|
||||||
|
// make sure we label them in the right order
|
||||||
|
label_text_list.sort();
|
||||||
|
|
||||||
let state_path = Path::new(TAPE_STATUS_DIR);
|
let state_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
|
||||||
|
@ -432,9 +432,10 @@ pub fn list_content(
|
|||||||
.generate_media_set_name(&set.uuid, template)
|
.generate_media_set_name(&set.uuid, template)
|
||||||
.unwrap_or_else(|_| set.uuid.to_string());
|
.unwrap_or_else(|_| set.uuid.to_string());
|
||||||
|
|
||||||
let catalog = MediaCatalog::open(status_path, &media_id.label.uuid, false, false)?;
|
let catalog = MediaCatalog::open(status_path, &media_id, false, false)?;
|
||||||
|
|
||||||
for snapshot in catalog.snapshot_index().keys() {
|
for (store, content) in catalog.content() {
|
||||||
|
for snapshot in content.snapshot_index.keys() {
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
if let Some(ref backup_type) = filter.backup_type {
|
if let Some(ref backup_type) = filter.backup_type {
|
||||||
@ -453,10 +454,12 @@ pub fn list_content(
|
|||||||
media_set_ctime: set.ctime,
|
media_set_ctime: set.ctime,
|
||||||
seq_nr: set.seq_nr,
|
seq_nr: set.seq_nr,
|
||||||
snapshot: snapshot.to_owned(),
|
snapshot: snapshot.to_owned(),
|
||||||
|
store: store.to_owned(),
|
||||||
backup_time: backup_dir.backup_time(),
|
backup_time: backup_dir.backup_time(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
@ -497,7 +500,7 @@ pub fn get_media_status(uuid: Uuid) -> Result<MediaStatus, Error> {
|
|||||||
/// Update media status (None, 'full', 'damaged' or 'retired')
|
/// Update media status (None, 'full', 'damaged' or 'retired')
|
||||||
///
|
///
|
||||||
/// It is not allowed to set status to 'writable' or 'unknown' (those
|
/// It is not allowed to set status to 'writable' or 'unknown' (those
|
||||||
/// are internaly managed states).
|
/// are internally managed states).
|
||||||
pub fn update_media_status(uuid: Uuid, status: Option<MediaStatus>) -> Result<(), Error> {
|
pub fn update_media_status(uuid: Uuid, status: Option<MediaStatus>) -> Result<(), Error> {
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
@ -40,6 +40,7 @@ use crate::{
|
|||||||
cached_user_info::CachedUserInfo,
|
cached_user_info::CachedUserInfo,
|
||||||
acl::{
|
acl::{
|
||||||
PRIV_DATASTORE_BACKUP,
|
PRIV_DATASTORE_BACKUP,
|
||||||
|
PRIV_DATASTORE_MODIFY,
|
||||||
PRIV_TAPE_READ,
|
PRIV_TAPE_READ,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -70,11 +71,15 @@ use crate::{
|
|||||||
file_formats::{
|
file_formats::{
|
||||||
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
|
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
|
||||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
|
||||||
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
|
||||||
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
|
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
|
||||||
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
||||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
||||||
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
|
||||||
MediaContentHeader,
|
MediaContentHeader,
|
||||||
|
ChunkArchiveHeader,
|
||||||
ChunkArchiveDecoder,
|
ChunkArchiveDecoder,
|
||||||
|
SnapshotArchiveHeader,
|
||||||
},
|
},
|
||||||
drive::{
|
drive::{
|
||||||
TapeDriver,
|
TapeDriver,
|
||||||
@ -105,6 +110,10 @@ pub const ROUTER: Router = Router::new()
|
|||||||
type: Userid,
|
type: Userid,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
owner: {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
@ -123,6 +132,7 @@ pub fn restore(
|
|||||||
drive: String,
|
drive: String,
|
||||||
media_set: String,
|
media_set: String,
|
||||||
notify_user: Option<Userid>,
|
notify_user: Option<Userid>,
|
||||||
|
owner: Option<Authid>,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
@ -134,6 +144,18 @@ pub fn restore(
|
|||||||
bail!("no permissions on /datastore/{}", store);
|
bail!("no permissions on /datastore/{}", store);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(ref owner) = owner {
|
||||||
|
let correct_owner = owner == &auth_id
|
||||||
|
|| (owner.is_token()
|
||||||
|
&& !auth_id.is_token()
|
||||||
|
&& owner.user() == auth_id.user());
|
||||||
|
|
||||||
|
// same permission as changing ownership after syncing
|
||||||
|
if !correct_owner && privs & PRIV_DATASTORE_MODIFY == 0 {
|
||||||
|
bail!("no permission to restore as '{}'", owner);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "drive", &drive]);
|
||||||
if (privs & PRIV_TAPE_READ) == 0 {
|
if (privs & PRIV_TAPE_READ) == 0 {
|
||||||
bail!("no permissions on /tape/drive/{}", drive);
|
bail!("no permissions on /tape/drive/{}", drive);
|
||||||
@ -222,6 +244,7 @@ pub fn restore(
|
|||||||
&datastore,
|
&datastore,
|
||||||
&auth_id,
|
&auth_id,
|
||||||
¬ify_user,
|
¬ify_user,
|
||||||
|
&owner,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,6 +275,7 @@ pub fn request_and_restore_media(
|
|||||||
datastore: &DataStore,
|
datastore: &DataStore,
|
||||||
authid: &Authid,
|
authid: &Authid,
|
||||||
notify_user: &Option<Userid>,
|
notify_user: &Option<Userid>,
|
||||||
|
owner: &Option<Authid>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let media_set_uuid = match media_id.media_set_label {
|
let media_set_uuid = match media_id.media_set_label {
|
||||||
@ -284,7 +308,9 @@ pub fn request_and_restore_media(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
restore_media(worker, &mut drive, &info, Some((datastore, authid)), false)
|
let restore_owner = owner.as_ref().unwrap_or(authid);
|
||||||
|
|
||||||
|
restore_media(worker, &mut drive, &info, Some((datastore, restore_owner)), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Restore complete media content and catalog
|
/// Restore complete media content and catalog
|
||||||
@ -340,10 +366,18 @@ fn restore_archive<'a>(
|
|||||||
bail!("unexpected content magic (label)");
|
bail!("unexpected content magic (label)");
|
||||||
}
|
}
|
||||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
|
||||||
let snapshot = reader.read_exact_allocated(header.size as usize)?;
|
bail!("unexpected snapshot archive version (v1.0)");
|
||||||
let snapshot = std::str::from_utf8(&snapshot)
|
}
|
||||||
.map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
|
||||||
task_log!(worker, "Found snapshot archive: {} {}", current_file_number, snapshot);
|
let header_data = reader.read_exact_allocated(header.size as usize)?;
|
||||||
|
|
||||||
|
let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
|
||||||
|
.map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
|
||||||
|
|
||||||
|
let datastore_name = archive_header.store;
|
||||||
|
let snapshot = archive_header.snapshot;
|
||||||
|
|
||||||
|
task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot);
|
||||||
|
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
@ -371,7 +405,7 @@ fn restore_archive<'a>(
|
|||||||
task_log!(worker, "skip incomplete snapshot {}", backup_dir);
|
task_log!(worker, "skip incomplete snapshot {}", backup_dir);
|
||||||
}
|
}
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
|
||||||
catalog.commit_if_large()?;
|
catalog.commit_if_large()?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -381,17 +415,26 @@ fn restore_archive<'a>(
|
|||||||
|
|
||||||
reader.skip_to_end()?; // read all data
|
reader.skip_to_end()?; // read all data
|
||||||
if let Ok(false) = reader.is_incomplete() {
|
if let Ok(false) = reader.is_incomplete() {
|
||||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
|
||||||
catalog.commit_if_large()?;
|
catalog.commit_if_large()?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
|
||||||
|
bail!("unexpected chunk archive version (v1.0)");
|
||||||
|
}
|
||||||
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
|
||||||
|
let header_data = reader.read_exact_allocated(header.size as usize)?;
|
||||||
|
|
||||||
task_log!(worker, "Found chunk archive: {}", current_file_number);
|
let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
|
||||||
|
.map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
|
||||||
|
|
||||||
|
let source_datastore = archive_header.store;
|
||||||
|
|
||||||
|
task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore);
|
||||||
let datastore = target.as_ref().map(|t| t.0);
|
let datastore = target.as_ref().map(|t| t.0);
|
||||||
|
|
||||||
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
|
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
|
||||||
catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?;
|
catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number, &source_datastore)?;
|
||||||
for digest in chunks.iter() {
|
for digest in chunks.iter() {
|
||||||
catalog.register_chunk(&digest)?;
|
catalog.register_chunk(&digest)?;
|
||||||
}
|
}
|
||||||
|
@ -1272,7 +1272,7 @@ pub struct APTUpdateInfo {
|
|||||||
pub enum Notify {
|
pub enum Notify {
|
||||||
/// Never send notification
|
/// Never send notification
|
||||||
Never,
|
Never,
|
||||||
/// Send notifications for failed and sucessful jobs
|
/// Send notifications for failed and successful jobs
|
||||||
Always,
|
Always,
|
||||||
/// Send notifications for failed jobs only
|
/// Send notifications for failed jobs only
|
||||||
Error,
|
Error,
|
||||||
|
@ -21,7 +21,7 @@ pub struct OptionalDeviceIdentification {
|
|||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Debug,Serialize,Deserialize)]
|
#[derive(Debug,Serialize,Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Kind of devive
|
/// Kind of device
|
||||||
pub enum DeviceKind {
|
pub enum DeviceKind {
|
||||||
/// Tape changer (Autoloader, Robot)
|
/// Tape changer (Autoloader, Robot)
|
||||||
Changer,
|
Changer,
|
||||||
|
@ -144,6 +144,8 @@ pub struct MediaContentEntry {
|
|||||||
pub seq_nr: u64,
|
pub seq_nr: u64,
|
||||||
/// Media Pool
|
/// Media Pool
|
||||||
pub pool: String,
|
pub pool: String,
|
||||||
|
/// Datastore Name
|
||||||
|
pub store: String,
|
||||||
/// Backup snapshot
|
/// Backup snapshot
|
||||||
pub snapshot: String,
|
pub snapshot: String,
|
||||||
/// Snapshot creation time (epoch)
|
/// Snapshot creation time (epoch)
|
||||||
|
@ -75,7 +75,7 @@
|
|||||||
//!
|
//!
|
||||||
//! Since PBS allows multiple potentially interfering operations at the
|
//! Since PBS allows multiple potentially interfering operations at the
|
||||||
//! same time (e.g. garbage collect, prune, multiple backup creations
|
//! same time (e.g. garbage collect, prune, multiple backup creations
|
||||||
//! (only in seperate groups), forget, ...), these need to lock against
|
//! (only in separate groups), forget, ...), these need to lock against
|
||||||
//! each other in certain scenarios. There is no overarching global lock
|
//! each other in certain scenarios. There is no overarching global lock
|
||||||
//! though, instead always the finest grained lock possible is used,
|
//! though, instead always the finest grained lock possible is used,
|
||||||
//! because running these operations concurrently is treated as a feature
|
//! because running these operations concurrently is treated as a feature
|
||||||
|
@ -3,15 +3,27 @@ use crate::tools;
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::os::unix::io::RawFd;
|
use std::os::unix::io::RawFd;
|
||||||
|
|
||||||
use std::path::{PathBuf, Path};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use proxmox::const_regex;
|
use proxmox::const_regex;
|
||||||
|
|
||||||
use super::manifest::MANIFEST_BLOB_NAME;
|
use super::manifest::MANIFEST_BLOB_NAME;
|
||||||
|
|
||||||
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
|
macro_rules! BACKUP_ID_RE {
|
||||||
macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
|
() => {
|
||||||
macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
|
r"[A-Za-z0-9_][A-Za-z0-9._\-]*"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
macro_rules! BACKUP_TYPE_RE {
|
||||||
|
() => {
|
||||||
|
r"(?:host|vm|ct)"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
macro_rules! BACKUP_TIME_RE {
|
||||||
|
() => {
|
||||||
|
r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
const_regex! {
|
const_regex! {
|
||||||
BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
|
BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
|
||||||
@ -38,7 +50,6 @@ pub struct BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl std::cmp::Ord for BackupGroup {
|
impl std::cmp::Ord for BackupGroup {
|
||||||
|
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
let type_order = self.backup_type.cmp(&other.backup_type);
|
let type_order = self.backup_type.cmp(&other.backup_type);
|
||||||
if type_order != std::cmp::Ordering::Equal {
|
if type_order != std::cmp::Ordering::Equal {
|
||||||
@ -63,9 +74,11 @@ impl std::cmp::PartialOrd for BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BackupGroup {
|
impl BackupGroup {
|
||||||
|
|
||||||
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
||||||
Self { backup_type: backup_type.into(), backup_id: backup_id.into() }
|
Self {
|
||||||
|
backup_type: backup_type.into(),
|
||||||
|
backup_id: backup_id.into(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn backup_type(&self) -> &str {
|
pub fn backup_type(&self) -> &str {
|
||||||
@ -77,7 +90,6 @@ impl BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn group_path(&self) -> PathBuf {
|
pub fn group_path(&self) -> PathBuf {
|
||||||
|
|
||||||
let mut relative_path = PathBuf::new();
|
let mut relative_path = PathBuf::new();
|
||||||
|
|
||||||
relative_path.push(&self.backup_type);
|
relative_path.push(&self.backup_type);
|
||||||
@ -88,46 +100,65 @@ impl BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
|
pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
|
||||||
|
|
||||||
let mut list = vec![];
|
let mut list = vec![];
|
||||||
|
|
||||||
let mut path = base_path.to_owned();
|
let mut path = base_path.to_owned();
|
||||||
path.push(self.group_path());
|
path.push(self.group_path());
|
||||||
|
|
||||||
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
tools::scandir(
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
libc::AT_FDCWD,
|
||||||
|
&path,
|
||||||
|
&BACKUP_DATE_REGEX,
|
||||||
|
|l2_fd, backup_time, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let backup_dir = BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
|
let backup_dir =
|
||||||
|
BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
|
||||||
let files = list_backup_files(l2_fd, backup_time)?;
|
let files = list_backup_files(l2_fd, backup_time)?;
|
||||||
|
|
||||||
list.push(BackupInfo { backup_dir, files });
|
list.push(BackupInfo { backup_dir, files });
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
},
|
||||||
|
)?;
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
|
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
|
||||||
|
|
||||||
let mut last = None;
|
let mut last = None;
|
||||||
|
|
||||||
let mut path = base_path.to_owned();
|
let mut path = base_path.to_owned();
|
||||||
path.push(self.group_path());
|
path.push(self.group_path());
|
||||||
|
|
||||||
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
tools::scandir(
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
libc::AT_FDCWD,
|
||||||
|
&path,
|
||||||
|
&BACKUP_DATE_REGEX,
|
||||||
|
|l2_fd, backup_time, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let mut manifest_path = PathBuf::from(backup_time);
|
let mut manifest_path = PathBuf::from(backup_time);
|
||||||
manifest_path.push(MANIFEST_BLOB_NAME);
|
manifest_path.push(MANIFEST_BLOB_NAME);
|
||||||
|
|
||||||
use nix::fcntl::{openat, OFlag};
|
use nix::fcntl::{openat, OFlag};
|
||||||
match openat(l2_fd, &manifest_path, OFlag::O_RDONLY, nix::sys::stat::Mode::empty()) {
|
match openat(
|
||||||
|
l2_fd,
|
||||||
|
&manifest_path,
|
||||||
|
OFlag::O_RDONLY,
|
||||||
|
nix::sys::stat::Mode::empty(),
|
||||||
|
) {
|
||||||
Ok(rawfd) => {
|
Ok(rawfd) => {
|
||||||
/* manifest exists --> assume backup was successful */
|
/* manifest exists --> assume backup was successful */
|
||||||
/* close else this leaks! */
|
/* close else this leaks! */
|
||||||
nix::unistd::close(rawfd)?;
|
nix::unistd::close(rawfd)?;
|
||||||
},
|
}
|
||||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => { return Ok(()); }
|
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
bail!("last_successful_backup: unexpected error - {}", err);
|
bail!("last_successful_backup: unexpected error - {}", err);
|
||||||
}
|
}
|
||||||
@ -135,13 +166,16 @@ impl BackupGroup {
|
|||||||
|
|
||||||
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
||||||
if let Some(last_timestamp) = last {
|
if let Some(last_timestamp) = last {
|
||||||
if timestamp > last_timestamp { last = Some(timestamp); }
|
if timestamp > last_timestamp {
|
||||||
|
last = Some(timestamp);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
last = Some(timestamp);
|
last = Some(timestamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(last)
|
Ok(last)
|
||||||
}
|
}
|
||||||
@ -162,7 +196,8 @@ impl std::str::FromStr for BackupGroup {
|
|||||||
///
|
///
|
||||||
/// This parses strings like `vm/100".
|
/// This parses strings like `vm/100".
|
||||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||||
let cap = GROUP_PATH_REGEX.captures(path)
|
let cap = GROUP_PATH_REGEX
|
||||||
|
.captures(path)
|
||||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@ -182,11 +217,10 @@ pub struct BackupDir {
|
|||||||
/// Backup timestamp
|
/// Backup timestamp
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
// backup_time as rfc3339
|
// backup_time as rfc3339
|
||||||
backup_time_string: String
|
backup_time_string: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BackupDir {
|
impl BackupDir {
|
||||||
|
|
||||||
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
T: Into<String>,
|
T: Into<String>,
|
||||||
@ -196,7 +230,11 @@ impl BackupDir {
|
|||||||
BackupDir::with_group(group, backup_time)
|
BackupDir::with_group(group, backup_time)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_rfc3339<T,U,V>(backup_type: T, backup_id: U, backup_time_string: V) -> Result<Self, Error>
|
pub fn with_rfc3339<T, U, V>(
|
||||||
|
backup_type: T,
|
||||||
|
backup_id: U,
|
||||||
|
backup_time_string: V,
|
||||||
|
) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
T: Into<String>,
|
T: Into<String>,
|
||||||
U: Into<String>,
|
U: Into<String>,
|
||||||
@ -205,12 +243,20 @@ impl BackupDir {
|
|||||||
let backup_time_string = backup_time_string.into();
|
let backup_time_string = backup_time_string.into();
|
||||||
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
||||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||||
Ok(Self { group, backup_time, backup_time_string })
|
Ok(Self {
|
||||||
|
group,
|
||||||
|
backup_time,
|
||||||
|
backup_time_string,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
||||||
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
||||||
Ok(Self { group, backup_time, backup_time_string })
|
Ok(Self {
|
||||||
|
group,
|
||||||
|
backup_time,
|
||||||
|
backup_time_string,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn group(&self) -> &BackupGroup {
|
pub fn group(&self) -> &BackupGroup {
|
||||||
@ -226,7 +272,6 @@ impl BackupDir {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn relative_path(&self) -> PathBuf {
|
pub fn relative_path(&self) -> PathBuf {
|
||||||
|
|
||||||
let mut relative_path = self.group.group_path();
|
let mut relative_path = self.group.group_path();
|
||||||
|
|
||||||
relative_path.push(self.backup_time_string.clone());
|
relative_path.push(self.backup_time_string.clone());
|
||||||
@ -247,7 +292,8 @@ impl std::str::FromStr for BackupDir {
|
|||||||
///
|
///
|
||||||
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
||||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
let cap = SNAPSHOT_PATH_REGEX
|
||||||
|
.captures(path)
|
||||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||||
|
|
||||||
BackupDir::with_rfc3339(
|
BackupDir::with_rfc3339(
|
||||||
@ -276,7 +322,6 @@ pub struct BackupInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BackupInfo {
|
impl BackupInfo {
|
||||||
|
|
||||||
pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result<BackupInfo, Error> {
|
pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result<BackupInfo, Error> {
|
||||||
let mut path = base_path.to_owned();
|
let mut path = base_path.to_owned();
|
||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
@ -287,19 +332,24 @@ impl BackupInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Finds the latest backup inside a backup group
|
/// Finds the latest backup inside a backup group
|
||||||
pub fn last_backup(base_path: &Path, group: &BackupGroup, only_finished: bool)
|
pub fn last_backup(
|
||||||
-> Result<Option<BackupInfo>, Error>
|
base_path: &Path,
|
||||||
{
|
group: &BackupGroup,
|
||||||
|
only_finished: bool,
|
||||||
|
) -> Result<Option<BackupInfo>, Error> {
|
||||||
let backups = group.list_backups(base_path)?;
|
let backups = group.list_backups(base_path)?;
|
||||||
Ok(backups.into_iter()
|
Ok(backups
|
||||||
|
.into_iter()
|
||||||
.filter(|item| !only_finished || item.is_finished())
|
.filter(|item| !only_finished || item.is_finished())
|
||||||
.max_by_key(|item| item.backup_dir.backup_time()))
|
.max_by_key(|item| item.backup_dir.backup_time()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
||||||
if ascendending { // oldest first
|
if ascendending {
|
||||||
|
// oldest first
|
||||||
list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
|
list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
|
||||||
} else { // newest first
|
} else {
|
||||||
|
// newest first
|
||||||
list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
|
list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -316,31 +366,52 @@ impl BackupInfo {
|
|||||||
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
|
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
tools::scandir(libc::AT_FDCWD, base_path, &BACKUP_TYPE_REGEX, |l0_fd, backup_type, file_type| {
|
tools::scandir(
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
libc::AT_FDCWD,
|
||||||
tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |_, backup_id, file_type| {
|
base_path,
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
&BACKUP_TYPE_REGEX,
|
||||||
|
|l0_fd, backup_type, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
tools::scandir(
|
||||||
|
l0_fd,
|
||||||
|
backup_type,
|
||||||
|
&BACKUP_ID_REGEX,
|
||||||
|
|_, backup_id, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
list.push(BackupGroup::new(backup_type, backup_id));
|
list.push(BackupGroup::new(backup_type, backup_id));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
},
|
||||||
})?;
|
)
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_finished(&self) -> bool {
|
pub fn is_finished(&self) -> bool {
|
||||||
// backup is considered unfinished if there is no manifest
|
// backup is considered unfinished if there is no manifest
|
||||||
self.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME)
|
self.files
|
||||||
|
.iter()
|
||||||
|
.any(|name| name == super::MANIFEST_BLOB_NAME)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> {
|
fn list_backup_files<P: ?Sized + nix::NixPath>(
|
||||||
|
dirfd: RawFd,
|
||||||
|
path: &P,
|
||||||
|
) -> Result<Vec<String>, Error> {
|
||||||
let mut files = vec![];
|
let mut files = vec![];
|
||||||
|
|
||||||
tools::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
|
tools::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
|
||||||
if file_type != nix::dir::Type::File { return Ok(()); }
|
if file_type != nix::dir::Type::File {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
files.push(filename.to_owned());
|
files.push(filename.to_owned());
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
})?;
|
||||||
|
@ -452,7 +452,7 @@ impl ChunkStore {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_chunk_store1() {
|
fn test_chunk_store1() {
|
||||||
|
|
||||||
let mut path = std::fs::canonicalize(".").unwrap(); // we need absulute path
|
let mut path = std::fs::canonicalize(".").unwrap(); // we need absolute path
|
||||||
path.push(".testdir");
|
path.push(".testdir");
|
||||||
|
|
||||||
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
|
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
|
||||||
|
@ -448,7 +448,7 @@ impl DataStore {
|
|||||||
if !self.chunk_store.cond_touch_chunk(digest, false)? {
|
if !self.chunk_store.cond_touch_chunk(digest, false)? {
|
||||||
crate::task_warn!(
|
crate::task_warn!(
|
||||||
worker,
|
worker,
|
||||||
"warning: unable to access non-existant chunk {}, required by {:?}",
|
"warning: unable to access non-existent chunk {}, required by {:?}",
|
||||||
proxmox::tools::digest_to_hex(digest),
|
proxmox::tools::digest_to_hex(digest),
|
||||||
file_name,
|
file_name,
|
||||||
);
|
);
|
||||||
|
@ -1453,7 +1453,7 @@ fn parse_archive_type(name: &str) -> (String, ArchiveType) {
|
|||||||
type: String,
|
type: String,
|
||||||
description: r###"Target directory path. Use '-' to write to standard output.
|
description: r###"Target directory path. Use '-' to write to standard output.
|
||||||
|
|
||||||
We do not extraxt '.pxar' archives when writing to standard output.
|
We do not extract '.pxar' archives when writing to standard output.
|
||||||
|
|
||||||
"###
|
"###
|
||||||
},
|
},
|
||||||
|
@ -330,7 +330,7 @@ async fn get_versions(verbose: bool, param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let options = default_table_format_options()
|
let options = default_table_format_options()
|
||||||
.disable_sort()
|
.disable_sort()
|
||||||
.noborder(true) // just not helpfull for version info which gets copy pasted often
|
.noborder(true) // just not helpful for version info which gets copy pasted often
|
||||||
.column(ColumnConfig::new("Package"))
|
.column(ColumnConfig::new("Package"))
|
||||||
.column(ColumnConfig::new("Version"))
|
.column(ColumnConfig::new("Version"))
|
||||||
.column(ColumnConfig::new("ExtraInfo").header("Extra Info"))
|
.column(ColumnConfig::new("ExtraInfo").header("Extra Info"))
|
||||||
|
@ -27,10 +27,12 @@ use proxmox_backup::{
|
|||||||
api2::{
|
api2::{
|
||||||
self,
|
self,
|
||||||
types::{
|
types::{
|
||||||
|
Authid,
|
||||||
DATASTORE_SCHEMA,
|
DATASTORE_SCHEMA,
|
||||||
DRIVE_NAME_SCHEMA,
|
DRIVE_NAME_SCHEMA,
|
||||||
MEDIA_LABEL_SCHEMA,
|
MEDIA_LABEL_SCHEMA,
|
||||||
MEDIA_POOL_NAME_SCHEMA,
|
MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
Userid,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
config::{
|
config::{
|
||||||
@ -863,6 +865,14 @@ async fn backup(mut param: Value) -> Result<(), Error> {
|
|||||||
description: "Media set UUID.",
|
description: "Media set UUID.",
|
||||||
type: String,
|
type: String,
|
||||||
},
|
},
|
||||||
|
"notify-user": {
|
||||||
|
type: Userid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
owner: {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
"output-format": {
|
"output-format": {
|
||||||
schema: OUTPUT_FORMAT,
|
schema: OUTPUT_FORMAT,
|
||||||
optional: true,
|
optional: true,
|
||||||
|
@ -527,7 +527,7 @@ fn show_master_pubkey(path: Option<String>, param: Value) -> Result<(), Error> {
|
|||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
subject: {
|
subject: {
|
||||||
description: "Include the specified subject as titel text.",
|
description: "Include the specified subject as title text.",
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
"output-format": {
|
"output-format": {
|
||||||
|
@ -140,7 +140,7 @@ fn mount(
|
|||||||
return proxmox_backup::tools::runtime::main(mount_do(param, None));
|
return proxmox_backup::tools::runtime::main(mount_do(param, None));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process should be deamonized.
|
// Process should be daemonized.
|
||||||
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
||||||
let (pr, pw) = proxmox_backup::tools::pipe()?;
|
let (pr, pw) = proxmox_backup::tools::pipe()?;
|
||||||
match unsafe { fork() } {
|
match unsafe { fork() } {
|
||||||
|
@ -84,7 +84,7 @@ pub fn encryption_key_commands() -> CommandLineInterface {
|
|||||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
},
|
},
|
||||||
subject: {
|
subject: {
|
||||||
description: "Include the specified subject as titel text.",
|
description: "Include the specified subject as title text.",
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
"output-format": {
|
"output-format": {
|
||||||
@ -128,7 +128,7 @@ fn paper_key(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Print tthe encryption key's metadata.
|
/// Print the encryption key's metadata.
|
||||||
fn show_key(
|
fn show_key(
|
||||||
param: Value,
|
param: Value,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
@ -177,12 +177,14 @@ fn list_content(
|
|||||||
let options = default_table_format_options()
|
let options = default_table_format_options()
|
||||||
.sortby("media-set-uuid", false)
|
.sortby("media-set-uuid", false)
|
||||||
.sortby("seq-nr", false)
|
.sortby("seq-nr", false)
|
||||||
|
.sortby("store", false)
|
||||||
.sortby("snapshot", false)
|
.sortby("snapshot", false)
|
||||||
.sortby("backup-time", false)
|
.sortby("backup-time", false)
|
||||||
.column(ColumnConfig::new("label-text"))
|
.column(ColumnConfig::new("label-text"))
|
||||||
.column(ColumnConfig::new("pool"))
|
.column(ColumnConfig::new("pool"))
|
||||||
.column(ColumnConfig::new("media-set-name"))
|
.column(ColumnConfig::new("media-set-name"))
|
||||||
.column(ColumnConfig::new("seq-nr"))
|
.column(ColumnConfig::new("seq-nr"))
|
||||||
|
.column(ColumnConfig::new("store"))
|
||||||
.column(ColumnConfig::new("snapshot"))
|
.column(ColumnConfig::new("snapshot"))
|
||||||
.column(ColumnConfig::new("media-set-uuid"))
|
.column(ColumnConfig::new("media-set-uuid"))
|
||||||
;
|
;
|
||||||
|
@ -130,22 +130,22 @@ fn extract_archive(
|
|||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut feature_flags = Flags::DEFAULT;
|
let mut feature_flags = Flags::DEFAULT;
|
||||||
if no_xattrs {
|
if no_xattrs {
|
||||||
feature_flags ^= Flags::WITH_XATTRS;
|
feature_flags.remove(Flags::WITH_XATTRS);
|
||||||
}
|
}
|
||||||
if no_fcaps {
|
if no_fcaps {
|
||||||
feature_flags ^= Flags::WITH_FCAPS;
|
feature_flags.remove(Flags::WITH_FCAPS);
|
||||||
}
|
}
|
||||||
if no_acls {
|
if no_acls {
|
||||||
feature_flags ^= Flags::WITH_ACL;
|
feature_flags.remove(Flags::WITH_ACL);
|
||||||
}
|
}
|
||||||
if no_device_nodes {
|
if no_device_nodes {
|
||||||
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
feature_flags.remove(Flags::WITH_DEVICE_NODES);
|
||||||
}
|
}
|
||||||
if no_fifos {
|
if no_fifos {
|
||||||
feature_flags ^= Flags::WITH_FIFOS;
|
feature_flags.remove(Flags::WITH_FIFOS);
|
||||||
}
|
}
|
||||||
if no_sockets {
|
if no_sockets {
|
||||||
feature_flags ^= Flags::WITH_SOCKETS;
|
feature_flags.remove(Flags::WITH_SOCKETS);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pattern = pattern.unwrap_or_else(Vec::new);
|
let pattern = pattern.unwrap_or_else(Vec::new);
|
||||||
@ -353,22 +353,22 @@ async fn create_archive(
|
|||||||
let writer = std::io::BufWriter::with_capacity(1024 * 1024, file);
|
let writer = std::io::BufWriter::with_capacity(1024 * 1024, file);
|
||||||
let mut feature_flags = Flags::DEFAULT;
|
let mut feature_flags = Flags::DEFAULT;
|
||||||
if no_xattrs {
|
if no_xattrs {
|
||||||
feature_flags ^= Flags::WITH_XATTRS;
|
feature_flags.remove(Flags::WITH_XATTRS);
|
||||||
}
|
}
|
||||||
if no_fcaps {
|
if no_fcaps {
|
||||||
feature_flags ^= Flags::WITH_FCAPS;
|
feature_flags.remove(Flags::WITH_FCAPS);
|
||||||
}
|
}
|
||||||
if no_acls {
|
if no_acls {
|
||||||
feature_flags ^= Flags::WITH_ACL;
|
feature_flags.remove(Flags::WITH_ACL);
|
||||||
}
|
}
|
||||||
if no_device_nodes {
|
if no_device_nodes {
|
||||||
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
feature_flags.remove(Flags::WITH_DEVICE_NODES);
|
||||||
}
|
}
|
||||||
if no_fifos {
|
if no_fifos {
|
||||||
feature_flags ^= Flags::WITH_FIFOS;
|
feature_flags.remove(Flags::WITH_FIFOS);
|
||||||
}
|
}
|
||||||
if no_sockets {
|
if no_sockets {
|
||||||
feature_flags ^= Flags::WITH_SOCKETS;
|
feature_flags.remove(Flags::WITH_SOCKETS);
|
||||||
}
|
}
|
||||||
|
|
||||||
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/// Tape command implemented using scsi-generic raw commands
|
/// Tape command implemented using scsi-generic raw commands
|
||||||
///
|
///
|
||||||
/// SCSI-generic command needs root priviledges, so this binary need
|
/// SCSI-generic command needs root privileges, so this binary need
|
||||||
/// to be setuid root.
|
/// to be setuid root.
|
||||||
///
|
///
|
||||||
/// This command can use STDIN as tape device handle.
|
/// This command can use STDIN as tape device handle.
|
||||||
|
@ -16,11 +16,11 @@ pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
|
|||||||
/// namespaced directory for persistent logging
|
/// namespaced directory for persistent logging
|
||||||
pub const PROXMOX_BACKUP_LOG_DIR: &str = PROXMOX_BACKUP_LOG_DIR_M!();
|
pub const PROXMOX_BACKUP_LOG_DIR: &str = PROXMOX_BACKUP_LOG_DIR_M!();
|
||||||
|
|
||||||
/// logfile for all API reuests handled by the proxy and privileged API daemons. Note that not all
|
/// logfile for all API requests handled by the proxy and privileged API daemons. Note that not all
|
||||||
/// failed logins can be logged here with full information, use the auth log for that.
|
/// failed logins can be logged here with full information, use the auth log for that.
|
||||||
pub const API_ACCESS_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/access.log");
|
pub const API_ACCESS_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/access.log");
|
||||||
|
|
||||||
/// logfile for any failed authentication, via ticket or via token, and new successfull ticket
|
/// logfile for any failed authentication, via ticket or via token, and new successful ticket
|
||||||
/// creations. This file can be useful for fail2ban.
|
/// creations. This file can be useful for fail2ban.
|
||||||
pub const API_AUTH_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/auth.log");
|
pub const API_AUTH_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/auth.log");
|
||||||
|
|
||||||
|
@ -509,7 +509,7 @@ impl BackupWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other
|
// We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other
|
||||||
// funciton in the same path is `wid`, so those 3 could be in a struct, but there's no real use
|
// function in the same path is `wid`, so those 3 could be in a struct, but there's no real use
|
||||||
// since this is a private method.
|
// since this is a private method.
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn upload_chunk_info_stream(
|
fn upload_chunk_info_stream(
|
||||||
|
@ -86,7 +86,7 @@ impl tower_service::Service<Uri> for VsockConnector {
|
|||||||
|
|
||||||
Ok(connection)
|
Ok(connection)
|
||||||
})
|
})
|
||||||
// unravel the thread JoinHandle to a useable future
|
// unravel the thread JoinHandle to a usable future
|
||||||
.map(|res| match res {
|
.map(|res| match res {
|
||||||
Ok(res) => res,
|
Ok(res) => res,
|
||||||
Err(err) => Err(format_err!("thread join error on vsock connect: {}", err)),
|
Err(err) => Err(format_err!("thread join error on vsock connect: {}", err)),
|
||||||
|
@ -82,7 +82,7 @@ pub fn check_netmask(mask: u8, is_v6: bool) -> Result<(), Error> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse ip address with otional cidr mask
|
// parse ip address with optional cidr mask
|
||||||
pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), Error> {
|
pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), Error> {
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
|
@ -4,10 +4,10 @@
|
|||||||
//! indexed by key fingerprint.
|
//! indexed by key fingerprint.
|
||||||
//!
|
//!
|
||||||
//! We store the plain key (unencrypted), as well as a encrypted
|
//! We store the plain key (unencrypted), as well as a encrypted
|
||||||
//! version protected by passowrd (see struct `KeyConfig`)
|
//! version protected by password (see struct `KeyConfig`)
|
||||||
//!
|
//!
|
||||||
//! Tape backups store the password protected version on tape, so that
|
//! Tape backups store the password protected version on tape, so that
|
||||||
//! it is possible to retore the key from tape if you know the
|
//! it is possible to restore the key from tape if you know the
|
||||||
//! password.
|
//! password.
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
@ -590,7 +590,7 @@ impl TfaUserChallengeData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Save the current data. Note that we do not replace the file here since we lock the file
|
/// Save the current data. Note that we do not replace the file here since we lock the file
|
||||||
/// itself, as it is in `/run`, and the typicall error case for this particular situation
|
/// itself, as it is in `/run`, and the typical error case for this particular situation
|
||||||
/// (machine loses power) simply prevents some login, but that'll probably fail anyway for
|
/// (machine loses power) simply prevents some login, but that'll probably fail anyway for
|
||||||
/// other reasons then...
|
/// other reasons then...
|
||||||
///
|
///
|
||||||
|
@ -752,10 +752,7 @@ fn get_metadata(fd: RawFd, stat: &FileStat, flags: Flags, fs_magic: i64) -> Resu
|
|||||||
flags: 0,
|
flags: 0,
|
||||||
uid: stat.st_uid,
|
uid: stat.st_uid,
|
||||||
gid: stat.st_gid,
|
gid: stat.st_gid,
|
||||||
mtime: pxar::format::StatxTimestamp {
|
mtime: pxar::format::StatxTimestamp::new(stat.st_mtime, stat.st_mtime_nsec as u32),
|
||||||
secs: stat.st_mtime,
|
|
||||||
nanos: stat.st_mtime_nsec as u32,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
@ -768,7 +765,7 @@ fn get_metadata(fd: RawFd, stat: &FileStat, flags: Flags, fs_magic: i64) -> Resu
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: Flags) -> Result<(), Error> {
|
fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: Flags) -> Result<(), Error> {
|
||||||
if flags.contains(Flags::WITH_FCAPS) {
|
if !flags.contains(Flags::WITH_FCAPS) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -790,7 +787,7 @@ fn get_xattr_fcaps_acl(
|
|||||||
proc_path: &Path,
|
proc_path: &Path,
|
||||||
flags: Flags,
|
flags: Flags,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
if flags.contains(Flags::WITH_XATTRS) {
|
if !flags.contains(Flags::WITH_XATTRS) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -879,7 +876,7 @@ fn get_quota_project_id(
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
if flags.contains(Flags::WITH_QUOTA_PROJID) {
|
if !flags.contains(Flags::WITH_QUOTA_PROJID) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -914,7 +911,7 @@ fn get_quota_project_id(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_acl(metadata: &mut Metadata, proc_path: &Path, flags: Flags) -> Result<(), Error> {
|
fn get_acl(metadata: &mut Metadata, proc_path: &Path, flags: Flags) -> Result<(), Error> {
|
||||||
if flags.contains(Flags::WITH_ACL) {
|
if !flags.contains(Flags::WITH_ACL) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ Deduplication Factor: {{deduplication-factor}}
|
|||||||
Garbage collection successful.
|
Garbage collection successful.
|
||||||
|
|
||||||
|
|
||||||
Please visit the web interface for futher details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#DataStore-{{datastore}}>
|
<https://{{fqdn}}:{{port}}/#DataStore-{{datastore}}>
|
||||||
|
|
||||||
@ -57,7 +57,7 @@ Datastore: {{datastore}}
|
|||||||
Garbage collection failed: {{error}}
|
Garbage collection failed: {{error}}
|
||||||
|
|
||||||
|
|
||||||
Please visit the web interface for futher details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
|
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
|
||||||
|
|
||||||
@ -71,7 +71,7 @@ Datastore: {{job.store}}
|
|||||||
Verification successful.
|
Verification successful.
|
||||||
|
|
||||||
|
|
||||||
Please visit the web interface for futher details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
|
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ Verification failed on these snapshots/groups:
|
|||||||
{{/each}}
|
{{/each}}
|
||||||
|
|
||||||
|
|
||||||
Please visit the web interface for futher details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
|
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ Remote Store: {{job.remote-store}}
|
|||||||
Synchronization successful.
|
Synchronization successful.
|
||||||
|
|
||||||
|
|
||||||
Please visit the web interface for futher details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
|
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ Remote Store: {{job.remote-store}}
|
|||||||
Synchronization failed: {{error}}
|
Synchronization failed: {{error}}
|
||||||
|
|
||||||
|
|
||||||
Please visit the web interface for futher details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
|
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
|
||||||
|
|
||||||
@ -152,7 +152,7 @@ Tape Drive: {{job.drive}}
|
|||||||
Tape Backup successful.
|
Tape Backup successful.
|
||||||
|
|
||||||
|
|
||||||
Please visit the web interface for futher details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
|
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ Tape Drive: {{job.drive}}
|
|||||||
Tape Backup failed: {{error}}
|
Tape Backup failed: {{error}}
|
||||||
|
|
||||||
|
|
||||||
Please visit the web interface for futher details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
|
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
|
||||||
|
|
||||||
@ -448,6 +448,30 @@ pub fn send_tape_backup_status(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Send email to a person to request a manual media change
|
||||||
|
pub fn send_load_media_email(
|
||||||
|
drive: &str,
|
||||||
|
label_text: &str,
|
||||||
|
to: &str,
|
||||||
|
reason: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let subject = format!("Load Media '{}' request for drive '{}'", label_text, drive);
|
||||||
|
|
||||||
|
let mut text = String::new();
|
||||||
|
|
||||||
|
if let Some(reason) = reason {
|
||||||
|
text.push_str(&format!("The drive has the wrong or no tape inserted. Error:\n{}\n\n", reason));
|
||||||
|
}
|
||||||
|
|
||||||
|
text.push_str("Please insert the requested media into the backup drive.\n\n");
|
||||||
|
|
||||||
|
text.push_str(&format!("Drive: {}\n", drive));
|
||||||
|
text.push_str(&format!("Media: {}\n", label_text));
|
||||||
|
|
||||||
|
send_job_status_mail(to, &subject, &text)
|
||||||
|
}
|
||||||
|
|
||||||
fn get_server_url() -> (String, usize) {
|
fn get_server_url() -> (String, usize) {
|
||||||
|
|
||||||
// user will surely request that they can change this
|
// user will surely request that they can change this
|
||||||
|
@ -207,6 +207,8 @@ pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
|
|||||||
let mut iter = last_line.splitn(2, ": ");
|
let mut iter = last_line.splitn(2, ": ");
|
||||||
if let Some(time_str) = iter.next() {
|
if let Some(time_str) = iter.next() {
|
||||||
if let Ok(endtime) = proxmox::tools::time::parse_rfc3339(time_str) {
|
if let Ok(endtime) = proxmox::tools::time::parse_rfc3339(time_str) {
|
||||||
|
// set the endtime even if we cannot parse the state
|
||||||
|
status = TaskState::Unknown { endtime };
|
||||||
if let Some(rest) = iter.next().and_then(|rest| rest.strip_prefix("TASK ")) {
|
if let Some(rest) = iter.next().and_then(|rest| rest.strip_prefix("TASK ")) {
|
||||||
if let Ok(state) = TaskState::from_endtime_and_message(endtime, rest) {
|
if let Ok(state) = TaskState::from_endtime_and_message(endtime, rest) {
|
||||||
status = state;
|
status = state;
|
||||||
@ -749,7 +751,7 @@ impl WorkerTask {
|
|||||||
match data.abort_listeners.pop() {
|
match data.abort_listeners.pop() {
|
||||||
None => { break; },
|
None => { break; },
|
||||||
Some(ch) => {
|
Some(ch) => {
|
||||||
let _ = ch.send(()); // ignore erros here
|
let _ = ch.send(()); // ignore errors here
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
use anyhow::Error;
|
|
||||||
|
|
||||||
use proxmox::tools::email::sendmail;
|
|
||||||
|
|
||||||
/// Send email to a person to request a manual media change
|
|
||||||
pub fn send_load_media_email(
|
|
||||||
drive: &str,
|
|
||||||
label_text: &str,
|
|
||||||
to: &str,
|
|
||||||
reason: Option<String>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let subject = format!("Load Media '{}' request for drive '{}'", label_text, drive);
|
|
||||||
|
|
||||||
let mut text = String::new();
|
|
||||||
|
|
||||||
if let Some(reason) = reason {
|
|
||||||
text.push_str(&format!("The drive has the wrong or no tape inserted. Error:\n{}\n\n", reason));
|
|
||||||
}
|
|
||||||
|
|
||||||
text.push_str("Please insert the requested media into the backup drive.\n\n");
|
|
||||||
|
|
||||||
text.push_str(&format!("Drive: {}\n", drive));
|
|
||||||
text.push_str(&format!("Media: {}\n", label_text));
|
|
||||||
|
|
||||||
sendmail(
|
|
||||||
&[to],
|
|
||||||
&subject,
|
|
||||||
Some(&text),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,8 +1,5 @@
|
|||||||
//! Media changer implementation (SCSI media changer)
|
//! Media changer implementation (SCSI media changer)
|
||||||
|
|
||||||
mod email;
|
|
||||||
pub use email::*;
|
|
||||||
|
|
||||||
pub mod sg_pt_changer;
|
pub mod sg_pt_changer;
|
||||||
|
|
||||||
pub mod mtx;
|
pub mod mtx;
|
||||||
@ -35,7 +32,7 @@ use crate::api2::types::{
|
|||||||
/// Changer element status.
|
/// Changer element status.
|
||||||
///
|
///
|
||||||
/// Drive and slots may be `Empty`, or contain some media, either
|
/// Drive and slots may be `Empty`, or contain some media, either
|
||||||
/// with knwon volume tag `VolumeTag(String)`, or without (`Full`).
|
/// with known volume tag `VolumeTag(String)`, or without (`Full`).
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
pub enum ElementStatus {
|
pub enum ElementStatus {
|
||||||
Empty,
|
Empty,
|
||||||
@ -87,7 +84,7 @@ pub struct MtxStatus {
|
|||||||
pub drives: Vec<DriveStatus>,
|
pub drives: Vec<DriveStatus>,
|
||||||
/// List of known storage slots
|
/// List of known storage slots
|
||||||
pub slots: Vec<StorageElementStatus>,
|
pub slots: Vec<StorageElementStatus>,
|
||||||
/// Tranport elements
|
/// Transport elements
|
||||||
///
|
///
|
||||||
/// Note: Some libraries do not report transport elements.
|
/// Note: Some libraries do not report transport elements.
|
||||||
pub transports: Vec<TransportElementStatus>,
|
pub transports: Vec<TransportElementStatus>,
|
||||||
@ -261,7 +258,7 @@ pub trait MediaChange {
|
|||||||
|
|
||||||
/// List online media labels (label_text/barcodes)
|
/// List online media labels (label_text/barcodes)
|
||||||
///
|
///
|
||||||
/// List acessible (online) label texts. This does not include
|
/// List accessible (online) label texts. This does not include
|
||||||
/// media inside import-export slots or cleaning media.
|
/// media inside import-export slots or cleaning media.
|
||||||
fn online_media_label_texts(&mut self) -> Result<Vec<String>, Error> {
|
fn online_media_label_texts(&mut self) -> Result<Vec<String>, Error> {
|
||||||
let status = self.status()?;
|
let status = self.status()?;
|
||||||
@ -378,7 +375,7 @@ pub trait MediaChange {
|
|||||||
|
|
||||||
/// Unload media to a free storage slot
|
/// Unload media to a free storage slot
|
||||||
///
|
///
|
||||||
/// If posible to the slot it was previously loaded from.
|
/// If possible to the slot it was previously loaded from.
|
||||||
///
|
///
|
||||||
/// Note: This method consumes status - so please use returned status afterward.
|
/// Note: This method consumes status - so please use returned status afterward.
|
||||||
fn unload_to_free_slot(&mut self, status: MtxStatus) -> Result<MtxStatus, Error> {
|
fn unload_to_free_slot(&mut self, status: MtxStatus) -> Result<MtxStatus, Error> {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//! Wrapper around expernal `mtx` command line tool
|
//! Wrapper around external `mtx` command line tool
|
||||||
|
|
||||||
mod parse_mtx_status;
|
mod parse_mtx_status;
|
||||||
pub use parse_mtx_status::*;
|
pub use parse_mtx_status::*;
|
||||||
|
@ -246,7 +246,7 @@ pub fn unload(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tranfer medium from one storage slot to another
|
/// Transfer medium from one storage slot to another
|
||||||
pub fn transfer_medium<F: AsRawFd>(
|
pub fn transfer_medium<F: AsRawFd>(
|
||||||
file: &mut F,
|
file: &mut F,
|
||||||
from_slot: u64,
|
from_slot: u64,
|
||||||
@ -362,7 +362,7 @@ pub fn read_element_status<F: AsRawFd>(file: &mut F) -> Result<MtxStatus, Error>
|
|||||||
bail!("got wrong number of import/export elements");
|
bail!("got wrong number of import/export elements");
|
||||||
}
|
}
|
||||||
if (setup.transfer_element_count as usize) != drives.len() {
|
if (setup.transfer_element_count as usize) != drives.len() {
|
||||||
bail!("got wrong number of tranfer elements");
|
bail!("got wrong number of transfer elements");
|
||||||
}
|
}
|
||||||
|
|
||||||
// create same virtual slot order as mtx(1)
|
// create same virtual slot order as mtx(1)
|
||||||
@ -428,7 +428,7 @@ struct SubHeader {
|
|||||||
element_type_code: u8,
|
element_type_code: u8,
|
||||||
flags: u8,
|
flags: u8,
|
||||||
descriptor_length: u16,
|
descriptor_length: u16,
|
||||||
reseved: u8,
|
reserved: u8,
|
||||||
byte_count_of_descriptor_data_available: [u8;3],
|
byte_count_of_descriptor_data_available: [u8;3],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,7 +196,7 @@ struct SspDataEncryptionCapabilityPage {
|
|||||||
page_code: u16,
|
page_code: u16,
|
||||||
page_len: u16,
|
page_len: u16,
|
||||||
extdecc_cfgp_byte: u8,
|
extdecc_cfgp_byte: u8,
|
||||||
reserverd: [u8; 15],
|
reserved: [u8; 15],
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Endian)]
|
#[derive(Endian)]
|
||||||
@ -241,13 +241,13 @@ fn decode_spin_data_encryption_caps(data: &[u8]) -> Result<u8, Error> {
|
|||||||
let desc: SspDataEncryptionAlgorithmDescriptor =
|
let desc: SspDataEncryptionAlgorithmDescriptor =
|
||||||
unsafe { reader.read_be_value()? };
|
unsafe { reader.read_be_value()? };
|
||||||
if desc.descriptor_len != 0x14 {
|
if desc.descriptor_len != 0x14 {
|
||||||
bail!("got wrong key descriptior len");
|
bail!("got wrong key descriptor len");
|
||||||
}
|
}
|
||||||
if (desc.control_byte_4 & 0b00000011) != 2 {
|
if (desc.control_byte_4 & 0b00000011) != 2 {
|
||||||
continue; // cant encrypt in hardware
|
continue; // can't encrypt in hardware
|
||||||
}
|
}
|
||||||
if ((desc.control_byte_4 & 0b00001100) >> 2) != 2 {
|
if ((desc.control_byte_4 & 0b00001100) >> 2) != 2 {
|
||||||
continue; // cant decrypt in hardware
|
continue; // can't decrypt in hardware
|
||||||
}
|
}
|
||||||
if desc.algorithm_code == 0x00010014 && desc.key_size == 32 {
|
if desc.algorithm_code == 0x00010014 && desc.key_size == 32 {
|
||||||
aes_cgm_index = Some(desc.algorythm_index);
|
aes_cgm_index = Some(desc.algorythm_index);
|
||||||
@ -276,7 +276,7 @@ struct SspDataEncryptionStatusPage {
|
|||||||
control_byte: u8,
|
control_byte: u8,
|
||||||
key_format: u8,
|
key_format: u8,
|
||||||
key_len: u16,
|
key_len: u16,
|
||||||
reserverd: [u8; 8],
|
reserved: [u8; 8],
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_spin_data_encryption_status(data: &[u8]) -> Result<DataEncryptionStatus, Error> {
|
fn decode_spin_data_encryption_status(data: &[u8]) -> Result<DataEncryptionStatus, Error> {
|
||||||
|
@ -72,14 +72,14 @@ static MAM_ATTRIBUTES: &[ (u16, u16, MamFormat, &str) ] = &[
|
|||||||
(0x08_02, 8, MamFormat::ASCII, "Application Version"),
|
(0x08_02, 8, MamFormat::ASCII, "Application Version"),
|
||||||
(0x08_03, 160, MamFormat::ASCII, "User Medium Text Label"),
|
(0x08_03, 160, MamFormat::ASCII, "User Medium Text Label"),
|
||||||
(0x08_04, 12, MamFormat::ASCII, "Date And Time Last Written"),
|
(0x08_04, 12, MamFormat::ASCII, "Date And Time Last Written"),
|
||||||
(0x08_05, 1, MamFormat::BINARY, "Text Localization Identifer"),
|
(0x08_05, 1, MamFormat::BINARY, "Text Localization Identifier"),
|
||||||
(0x08_06, 32, MamFormat::ASCII, "Barcode"),
|
(0x08_06, 32, MamFormat::ASCII, "Barcode"),
|
||||||
(0x08_07, 80, MamFormat::ASCII, "Owning Host Textual Name"),
|
(0x08_07, 80, MamFormat::ASCII, "Owning Host Textual Name"),
|
||||||
(0x08_08, 160, MamFormat::ASCII, "Media Pool"),
|
(0x08_08, 160, MamFormat::ASCII, "Media Pool"),
|
||||||
(0x08_0B, 16, MamFormat::ASCII, "Application Format Version"),
|
(0x08_0B, 16, MamFormat::ASCII, "Application Format Version"),
|
||||||
(0x08_0C, 50, MamFormat::ASCII, "Volume Coherency Information"),
|
(0x08_0C, 50, MamFormat::ASCII, "Volume Coherency Information"),
|
||||||
(0x08_20, 36, MamFormat::ASCII, "Medium Globally Unique Identifer"),
|
(0x08_20, 36, MamFormat::ASCII, "Medium Globally Unique Identifier"),
|
||||||
(0x08_21, 36, MamFormat::ASCII, "Media Pool Globally Unique Identifer"),
|
(0x08_21, 36, MamFormat::ASCII, "Media Pool Globally Unique Identifier"),
|
||||||
|
|
||||||
(0x10_00, 28, MamFormat::BINARY, "Unique Cartridge Identify (UCI)"),
|
(0x10_00, 28, MamFormat::BINARY, "Unique Cartridge Identify (UCI)"),
|
||||||
(0x10_01, 24, MamFormat::BINARY, "Alternate Unique Cartridge Identify (Alt-UCI)"),
|
(0x10_01, 24, MamFormat::BINARY, "Alternate Unique Cartridge Identify (Alt-UCI)"),
|
||||||
@ -101,12 +101,13 @@ lazy_static::lazy_static!{
|
|||||||
|
|
||||||
fn read_tape_mam<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
fn read_tape_mam<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
||||||
|
|
||||||
let mut sg_raw = SgRaw::new(file, 32*1024)?;
|
let alloc_len: u32 = 32*1024;
|
||||||
|
let mut sg_raw = SgRaw::new(file, alloc_len as usize)?;
|
||||||
|
|
||||||
let mut cmd = Vec::new();
|
let mut cmd = Vec::new();
|
||||||
cmd.extend(&[0x8c, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8]);
|
cmd.extend(&[0x8c, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8]);
|
||||||
cmd.extend(&[0u8, 0u8]); // first attribute
|
cmd.extend(&[0u8, 0u8]); // first attribute
|
||||||
cmd.extend(&[0u8, 0u8, 0x8f, 0xff]); // alloc len
|
cmd.extend(&alloc_len.to_be_bytes()); // alloc len
|
||||||
cmd.extend(&[0u8, 0u8]);
|
cmd.extend(&[0u8, 0u8]);
|
||||||
|
|
||||||
sg_raw.do_command(&cmd)
|
sg_raw.do_command(&cmd)
|
||||||
@ -130,8 +131,12 @@ fn decode_mam_attributes(data: &[u8]) -> Result<Vec<MamAttribute>, Error> {
|
|||||||
|
|
||||||
let expected_len = data_len as usize;
|
let expected_len = data_len as usize;
|
||||||
|
|
||||||
if reader.len() != expected_len {
|
|
||||||
|
if reader.len() < expected_len {
|
||||||
bail!("read_mam_attributes: got unexpected data len ({} != {})", reader.len(), expected_len);
|
bail!("read_mam_attributes: got unexpected data len ({} != {})", reader.len(), expected_len);
|
||||||
|
} else if reader.len() > expected_len {
|
||||||
|
// Note: Quantum hh7 returns the allocation_length instead of real data_len
|
||||||
|
reader = &data[4..expected_len+4];
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
@ -51,7 +51,10 @@ use crate::{
|
|||||||
VirtualTapeDrive,
|
VirtualTapeDrive,
|
||||||
LinuxTapeDrive,
|
LinuxTapeDrive,
|
||||||
},
|
},
|
||||||
server::WorkerTask,
|
server::{
|
||||||
|
send_load_media_email,
|
||||||
|
WorkerTask,
|
||||||
|
},
|
||||||
tape::{
|
tape::{
|
||||||
TapeWrite,
|
TapeWrite,
|
||||||
TapeRead,
|
TapeRead,
|
||||||
@ -66,7 +69,6 @@ use crate::{
|
|||||||
changer::{
|
changer::{
|
||||||
MediaChange,
|
MediaChange,
|
||||||
MtxMediaChanger,
|
MtxMediaChanger,
|
||||||
send_load_media_email,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -209,7 +211,7 @@ pub trait TapeDriver {
|
|||||||
/// Set or clear encryption key
|
/// Set or clear encryption key
|
||||||
///
|
///
|
||||||
/// We use the media_set_uuid to XOR the secret key with the
|
/// We use the media_set_uuid to XOR the secret key with the
|
||||||
/// uuid (first 16 bytes), so that each media set uses an uique
|
/// uuid (first 16 bytes), so that each media set uses an unique
|
||||||
/// key for encryption.
|
/// key for encryption.
|
||||||
fn set_encryption(
|
fn set_encryption(
|
||||||
&mut self,
|
&mut self,
|
||||||
@ -465,7 +467,7 @@ pub fn request_and_load_media(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Aquires an exclusive lock for the tape device
|
/// Acquires an exclusive lock for the tape device
|
||||||
///
|
///
|
||||||
/// Basically calls lock_device_path() using the configured drive path.
|
/// Basically calls lock_device_path() using the configured drive path.
|
||||||
pub fn lock_tape_device(
|
pub fn lock_tape_device(
|
||||||
@ -539,7 +541,7 @@ fn tape_device_path(
|
|||||||
|
|
||||||
pub struct DeviceLockGuard(std::fs::File);
|
pub struct DeviceLockGuard(std::fs::File);
|
||||||
|
|
||||||
// Aquires an exclusive lock on `device_path`
|
// Acquires an exclusive lock on `device_path`
|
||||||
//
|
//
|
||||||
// Uses systemd escape_unit to compute a file name from `device_path`, the try
|
// Uses systemd escape_unit to compute a file name from `device_path`, the try
|
||||||
// to lock `/var/lock/<name>`.
|
// to lock `/var/lock/<name>`.
|
||||||
|
@ -429,7 +429,7 @@ impl MediaChange for VirtualTapeHandle {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn transfer_media(&mut self, _from: u64, _to: u64) -> Result<MtxStatus, Error> {
|
fn transfer_media(&mut self, _from: u64, _to: u64) -> Result<MtxStatus, Error> {
|
||||||
bail!("media tranfer is not implemented!");
|
bail!("media transfer is not implemented!");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn export_media(&mut self, _label_text: &str) -> Result<Option<u64>, Error> {
|
fn export_media(&mut self, _label_text: &str) -> Result<Option<u64>, Error> {
|
||||||
|
@ -27,11 +27,8 @@ pub fn read_volume_statistics<F: AsRawFd>(file: &mut F) -> Result<Lp17VolumeSta
|
|||||||
|
|
||||||
fn sg_read_volume_statistics<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
fn sg_read_volume_statistics<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
||||||
|
|
||||||
let buffer_size = 8192;
|
let alloc_len: u16 = 8192;
|
||||||
let mut sg_raw = SgRaw::new(file, buffer_size)?;
|
let mut sg_raw = SgRaw::new(file, alloc_len as usize)?;
|
||||||
|
|
||||||
// Note: We cannjot use LP 2Eh TapeAlerts, because that clears flags on read.
|
|
||||||
// Instead, we use LP 12h TapeAlert Response. which does not clear the flags.
|
|
||||||
|
|
||||||
let mut cmd = Vec::new();
|
let mut cmd = Vec::new();
|
||||||
cmd.push(0x4D); // LOG SENSE
|
cmd.push(0x4D); // LOG SENSE
|
||||||
@ -41,7 +38,7 @@ fn sg_read_volume_statistics<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error>
|
|||||||
cmd.push(0);
|
cmd.push(0);
|
||||||
cmd.push(0);
|
cmd.push(0);
|
||||||
cmd.push(0);
|
cmd.push(0);
|
||||||
cmd.push((buffer_size >> 8) as u8); cmd.push(0); // alloc len
|
cmd.extend(&alloc_len.to_be_bytes()); // alloc len
|
||||||
cmd.push(0u8); // control byte
|
cmd.push(0u8); // control byte
|
||||||
|
|
||||||
sg_raw.do_command(&cmd)
|
sg_raw.do_command(&cmd)
|
||||||
@ -145,8 +142,13 @@ fn decode_volume_statistics(data: &[u8]) -> Result<Lp17VolumeStatistics, Error>
|
|||||||
|
|
||||||
let page_len: u16 = unsafe { reader.read_be_value()? };
|
let page_len: u16 = unsafe { reader.read_be_value()? };
|
||||||
|
|
||||||
if (page_len as usize + 4) != data.len() {
|
let page_len = page_len as usize;
|
||||||
|
|
||||||
|
if (page_len + 4) > data.len() {
|
||||||
bail!("invalid page length");
|
bail!("invalid page length");
|
||||||
|
} else {
|
||||||
|
// Note: Quantum hh7 returns the allocation_length instead of real data_len
|
||||||
|
reader = &data[4..page_len+4];
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut stat = Lp17VolumeStatistics::default();
|
let mut stat = Lp17VolumeStatistics::default();
|
||||||
|
@ -77,7 +77,7 @@ impl <R: Read> BlockedReader<R> {
|
|||||||
|
|
||||||
if seq_nr != buffer.seq_nr() {
|
if seq_nr != buffer.seq_nr() {
|
||||||
proxmox::io_bail!(
|
proxmox::io_bail!(
|
||||||
"detected tape block with wrong seqence number ({} != {})",
|
"detected tape block with wrong sequence number ({} != {})",
|
||||||
seq_nr, buffer.seq_nr())
|
seq_nr, buffer.seq_nr())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,9 +14,10 @@ use crate::tape::{
|
|||||||
TapeWrite,
|
TapeWrite,
|
||||||
file_formats::{
|
file_formats::{
|
||||||
PROXMOX_TAPE_BLOCK_SIZE,
|
PROXMOX_TAPE_BLOCK_SIZE,
|
||||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
|
||||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0,
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0,
|
||||||
MediaContentHeader,
|
MediaContentHeader,
|
||||||
|
ChunkArchiveHeader,
|
||||||
ChunkArchiveEntryHeader,
|
ChunkArchiveEntryHeader,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -25,7 +26,7 @@ use crate::tape::{
|
|||||||
///
|
///
|
||||||
/// A chunk archive consists of a `MediaContentHeader` followed by a
|
/// A chunk archive consists of a `MediaContentHeader` followed by a
|
||||||
/// list of chunks entries. Each chunk entry consists of a
|
/// list of chunks entries. Each chunk entry consists of a
|
||||||
/// `ChunkArchiveEntryHeader` folowed by the chunk data (`DataBlob`).
|
/// `ChunkArchiveEntryHeader` followed by the chunk data (`DataBlob`).
|
||||||
///
|
///
|
||||||
/// `| MediaContentHeader | ( ChunkArchiveEntryHeader | DataBlob )* |`
|
/// `| MediaContentHeader | ( ChunkArchiveEntryHeader | DataBlob )* |`
|
||||||
pub struct ChunkArchiveWriter<'a> {
|
pub struct ChunkArchiveWriter<'a> {
|
||||||
@ -36,13 +37,20 @@ pub struct ChunkArchiveWriter<'a> {
|
|||||||
|
|
||||||
impl <'a> ChunkArchiveWriter<'a> {
|
impl <'a> ChunkArchiveWriter<'a> {
|
||||||
|
|
||||||
pub const MAGIC: [u8; 8] = PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0;
|
pub const MAGIC: [u8; 8] = PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1;
|
||||||
|
|
||||||
/// Creates a new instance
|
/// Creates a new instance
|
||||||
pub fn new(mut writer: Box<dyn TapeWrite + 'a>, close_on_leom: bool) -> Result<(Self,Uuid), Error> {
|
pub fn new(
|
||||||
|
mut writer: Box<dyn TapeWrite + 'a>,
|
||||||
|
store: &str,
|
||||||
|
close_on_leom: bool,
|
||||||
|
) -> Result<(Self,Uuid), Error> {
|
||||||
|
|
||||||
let header = MediaContentHeader::new(Self::MAGIC, 0);
|
let archive_header = ChunkArchiveHeader { store: store.to_string() };
|
||||||
writer.write_header(&header, &[])?;
|
let header_data = serde_json::to_string_pretty(&archive_header)?.as_bytes().to_vec();
|
||||||
|
|
||||||
|
let header = MediaContentHeader::new(Self::MAGIC, header_data.len() as u32);
|
||||||
|
writer.write_header(&header, &header_data)?;
|
||||||
|
|
||||||
let me = Self {
|
let me = Self {
|
||||||
writer: Some(writer),
|
writer: Some(writer),
|
||||||
@ -153,7 +161,7 @@ impl <R: Read> ChunkArchiveDecoder<R> {
|
|||||||
Self { reader }
|
Self { reader }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allow access to the underyling reader
|
/// Allow access to the underlying reader
|
||||||
pub fn reader(&self) -> &R {
|
pub fn reader(&self) -> &R {
|
||||||
&self.reader
|
&self.reader
|
||||||
}
|
}
|
||||||
|
@ -44,12 +44,22 @@ pub const PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0: [u8; 8] = [42, 5, 191, 60, 176,
|
|||||||
pub const PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0: [u8; 8] = [8, 96, 99, 249, 47, 151, 83, 216];
|
pub const PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0: [u8; 8] = [8, 96, 99, 249, 47, 151, 83, 216];
|
||||||
|
|
||||||
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive v1.0")[0..8]
|
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive v1.0")[0..8]
|
||||||
|
// only used in unreleased version - no longer supported
|
||||||
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0: [u8; 8] = [62, 173, 167, 95, 49, 76, 6, 110];
|
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0: [u8; 8] = [62, 173, 167, 95, 49, 76, 6, 110];
|
||||||
|
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive v1.1")[0..8]
|
||||||
|
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1: [u8; 8] = [109, 49, 99, 109, 215, 2, 131, 191];
|
||||||
|
|
||||||
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive Entry v1.0")[0..8]
|
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive Entry v1.0")[0..8]
|
||||||
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0: [u8; 8] = [72, 87, 109, 242, 222, 66, 143, 220];
|
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0: [u8; 8] = [72, 87, 109, 242, 222, 66, 143, 220];
|
||||||
|
|
||||||
// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.0")[0..8];
|
// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.0")[0..8];
|
||||||
|
// only used in unreleased version - no longer supported
|
||||||
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0: [u8; 8] = [9, 182, 2, 31, 125, 232, 114, 133];
|
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0: [u8; 8] = [9, 182, 2, 31, 125, 232, 114, 133];
|
||||||
|
// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.1")[0..8];
|
||||||
|
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1: [u8; 8] = [218, 22, 21, 208, 17, 226, 154, 98];
|
||||||
|
|
||||||
|
// openssl::sha::sha256(b"Proxmox Backup Catalog Archive v1.0")[0..8];
|
||||||
|
pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0: [u8; 8] = [183, 207, 199, 37, 158, 153, 30, 115];
|
||||||
|
|
||||||
lazy_static::lazy_static!{
|
lazy_static::lazy_static!{
|
||||||
// Map content magic numbers to human readable names.
|
// Map content magic numbers to human readable names.
|
||||||
@ -58,7 +68,10 @@ lazy_static::lazy_static!{
|
|||||||
map.insert(&PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, "Proxmox Backup Tape Label v1.0");
|
map.insert(&PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, "Proxmox Backup Tape Label v1.0");
|
||||||
map.insert(&PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, "Proxmox Backup MediaSet Label v1.0");
|
map.insert(&PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, "Proxmox Backup MediaSet Label v1.0");
|
||||||
map.insert(&PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, "Proxmox Backup Chunk Archive v1.0");
|
map.insert(&PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, "Proxmox Backup Chunk Archive v1.0");
|
||||||
|
map.insert(&PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, "Proxmox Backup Chunk Archive v1.1");
|
||||||
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, "Proxmox Backup Snapshot Archive v1.0");
|
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, "Proxmox Backup Snapshot Archive v1.0");
|
||||||
|
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, "Proxmox Backup Snapshot Archive v1.1");
|
||||||
|
map.insert(&PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, "Proxmox Backup Catalog Archive v1.0");
|
||||||
map
|
map
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -172,6 +185,13 @@ impl MediaContentHeader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// Header for chunk archives
|
||||||
|
pub struct ChunkArchiveHeader {
|
||||||
|
// Datastore name
|
||||||
|
pub store: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Endian)]
|
#[derive(Endian)]
|
||||||
#[repr(C,packed)]
|
#[repr(C,packed)]
|
||||||
/// Header for data blobs inside a chunk archive
|
/// Header for data blobs inside a chunk archive
|
||||||
@ -184,6 +204,26 @@ pub struct ChunkArchiveEntryHeader {
|
|||||||
pub size: u64,
|
pub size: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// Header for snapshot archives
|
||||||
|
pub struct SnapshotArchiveHeader {
|
||||||
|
/// Snapshot name
|
||||||
|
pub snapshot: String,
|
||||||
|
/// Datastore name
|
||||||
|
pub store: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// Header for Catalog archives
|
||||||
|
pub struct CatalogArchiveHeader {
|
||||||
|
/// The uuid of the media the catalog is for
|
||||||
|
pub uuid: Uuid,
|
||||||
|
/// The media set uuid the catalog is for
|
||||||
|
pub media_set_uuid: Uuid,
|
||||||
|
/// Media sequence number
|
||||||
|
pub seq_nr: u64,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize,Deserialize,Clone,Debug)]
|
#[derive(Serialize,Deserialize,Clone,Debug)]
|
||||||
/// Media Label
|
/// Media Label
|
||||||
///
|
///
|
||||||
|
@ -12,16 +12,18 @@ use crate::tape::{
|
|||||||
SnapshotReader,
|
SnapshotReader,
|
||||||
file_formats::{
|
file_formats::{
|
||||||
PROXMOX_TAPE_BLOCK_SIZE,
|
PROXMOX_TAPE_BLOCK_SIZE,
|
||||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
|
||||||
MediaContentHeader,
|
MediaContentHeader,
|
||||||
|
SnapshotArchiveHeader,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/// Write a set of files as `pxar` archive to the tape
|
/// Write a set of files as `pxar` archive to the tape
|
||||||
///
|
///
|
||||||
/// This ignores file attributes like ACLs and xattrs.
|
/// This ignores file attributes like ACLs and xattrs.
|
||||||
///
|
///
|
||||||
/// Returns `Ok(Some(content_uuid))` on succees, and `Ok(None)` if
|
/// Returns `Ok(Some(content_uuid))` on success, and `Ok(None)` if
|
||||||
/// `LEOM` was detected before all data was written. The stream is
|
/// `LEOM` was detected before all data was written. The stream is
|
||||||
/// marked inclomplete in that case and does not contain all data (The
|
/// marked inclomplete in that case and does not contain all data (The
|
||||||
/// backup task must rewrite the whole file on the next media).
|
/// backup task must rewrite the whole file on the next media).
|
||||||
@ -31,12 +33,15 @@ pub fn tape_write_snapshot_archive<'a>(
|
|||||||
) -> Result<Option<Uuid>, std::io::Error> {
|
) -> Result<Option<Uuid>, std::io::Error> {
|
||||||
|
|
||||||
let snapshot = snapshot_reader.snapshot().to_string();
|
let snapshot = snapshot_reader.snapshot().to_string();
|
||||||
|
let store = snapshot_reader.datastore_name().to_string();
|
||||||
let file_list = snapshot_reader.file_list();
|
let file_list = snapshot_reader.file_list();
|
||||||
|
|
||||||
let header_data = snapshot.as_bytes().to_vec();
|
let archive_header = SnapshotArchiveHeader { snapshot, store };
|
||||||
|
|
||||||
|
let header_data = serde_json::to_string_pretty(&archive_header)?.as_bytes().to_vec();
|
||||||
|
|
||||||
let header = MediaContentHeader::new(
|
let header = MediaContentHeader::new(
|
||||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, header_data.len() as u32);
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, header_data.len() as u32);
|
||||||
let content_uuid = header.uuid.into();
|
let content_uuid = header.uuid.into();
|
||||||
|
|
||||||
let root_metadata = pxar::Metadata::dir_builder(0o0664).build();
|
let root_metadata = pxar::Metadata::dir_builder(0o0664).build();
|
||||||
|
@ -26,6 +26,7 @@ use crate::{
|
|||||||
/// This make it easy to iterate over all used chunks and files.
|
/// This make it easy to iterate over all used chunks and files.
|
||||||
pub struct SnapshotReader {
|
pub struct SnapshotReader {
|
||||||
snapshot: BackupDir,
|
snapshot: BackupDir,
|
||||||
|
datastore_name: String,
|
||||||
file_list: Vec<String>,
|
file_list: Vec<String>,
|
||||||
locked_dir: Dir,
|
locked_dir: Dir,
|
||||||
}
|
}
|
||||||
@ -42,11 +43,13 @@ impl SnapshotReader {
|
|||||||
"snapshot",
|
"snapshot",
|
||||||
"locked by another operation")?;
|
"locked by another operation")?;
|
||||||
|
|
||||||
|
let datastore_name = datastore.name().to_string();
|
||||||
|
|
||||||
let manifest = match datastore.load_manifest(&snapshot) {
|
let manifest = match datastore.load_manifest(&snapshot) {
|
||||||
Ok((manifest, _)) => manifest,
|
Ok((manifest, _)) => manifest,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
bail!("manifest load error on datastore '{}' snapshot '{}' - {}",
|
bail!("manifest load error on datastore '{}' snapshot '{}' - {}",
|
||||||
datastore.name(), snapshot, err);
|
datastore_name, snapshot, err);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -60,7 +63,7 @@ impl SnapshotReader {
|
|||||||
file_list.push(CLIENT_LOG_BLOB_NAME.to_string());
|
file_list.push(CLIENT_LOG_BLOB_NAME.to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Self { snapshot, file_list, locked_dir })
|
Ok(Self { snapshot, datastore_name, file_list, locked_dir })
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the snapshot directory
|
/// Return the snapshot directory
|
||||||
@ -68,6 +71,11 @@ impl SnapshotReader {
|
|||||||
&self.snapshot
|
&self.snapshot
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the datastore name
|
||||||
|
pub fn datastore_name(&self) -> &str {
|
||||||
|
&self.datastore_name
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the list of files the snapshot refers to.
|
/// Returns the list of files the snapshot refers to.
|
||||||
pub fn file_list(&self) -> &Vec<String> {
|
pub fn file_list(&self) -> &Vec<String> {
|
||||||
&self.file_list
|
&self.file_list
|
||||||
@ -85,7 +93,7 @@ impl SnapshotReader {
|
|||||||
Ok(file)
|
Ok(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retunrs an iterator for all used chunks.
|
/// Returns an iterator for all used chunks.
|
||||||
pub fn chunk_iterator(&self) -> Result<SnapshotChunkIterator, Error> {
|
pub fn chunk_iterator(&self) -> Result<SnapshotChunkIterator, Error> {
|
||||||
SnapshotChunkIterator::new(&self)
|
SnapshotChunkIterator::new(&self)
|
||||||
}
|
}
|
||||||
@ -96,7 +104,6 @@ impl SnapshotReader {
|
|||||||
/// Note: The iterator returns a `Result`, and the iterator state is
|
/// Note: The iterator returns a `Result`, and the iterator state is
|
||||||
/// undefined after the first error. So it make no sense to continue
|
/// undefined after the first error. So it make no sense to continue
|
||||||
/// iteration after the first error.
|
/// iteration after the first error.
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct SnapshotChunkIterator<'a> {
|
pub struct SnapshotChunkIterator<'a> {
|
||||||
snapshot_reader: &'a SnapshotReader,
|
snapshot_reader: &'a SnapshotReader,
|
||||||
todo_list: Vec<String>,
|
todo_list: Vec<String>,
|
||||||
|
@ -276,7 +276,7 @@ impl Inventory {
|
|||||||
continue; // belong to another pool
|
continue; // belong to another pool
|
||||||
}
|
}
|
||||||
|
|
||||||
if set.uuid.as_ref() == [0u8;16] { // should we do this??
|
if set.uuid.as_ref() == [0u8;16] {
|
||||||
list.push(MediaId {
|
list.push(MediaId {
|
||||||
label: entry.id.label.clone(),
|
label: entry.id.label.clone(),
|
||||||
media_set_label: None,
|
media_set_label: None,
|
||||||
@ -561,7 +561,7 @@ impl Inventory {
|
|||||||
|
|
||||||
// Helpers to simplify testing
|
// Helpers to simplify testing
|
||||||
|
|
||||||
/// Genreate and insert a new free tape (test helper)
|
/// Generate and insert a new free tape (test helper)
|
||||||
pub fn generate_free_tape(&mut self, label_text: &str, ctime: i64) -> Uuid {
|
pub fn generate_free_tape(&mut self, label_text: &str, ctime: i64) -> Uuid {
|
||||||
|
|
||||||
let label = MediaLabel {
|
let label = MediaLabel {
|
||||||
@ -576,7 +576,7 @@ impl Inventory {
|
|||||||
uuid
|
uuid
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Genreate and insert a new tape assigned to a specific pool
|
/// Generate and insert a new tape assigned to a specific pool
|
||||||
/// (test helper)
|
/// (test helper)
|
||||||
pub fn generate_assigned_tape(
|
pub fn generate_assigned_tape(
|
||||||
&mut self,
|
&mut self,
|
||||||
@ -600,7 +600,7 @@ impl Inventory {
|
|||||||
uuid
|
uuid
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Genreate and insert a used tape (test helper)
|
/// Generate and insert a used tape (test helper)
|
||||||
pub fn generate_used_tape(
|
pub fn generate_used_tape(
|
||||||
&mut self,
|
&mut self,
|
||||||
label_text: &str,
|
label_text: &str,
|
||||||
|
@ -26,9 +26,24 @@ use crate::{
|
|||||||
backup::BackupDir,
|
backup::BackupDir,
|
||||||
tape::{
|
tape::{
|
||||||
MediaId,
|
MediaId,
|
||||||
|
file_formats::MediaSetLabel,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub struct DatastoreContent {
|
||||||
|
pub snapshot_index: HashMap<String, u64>, // snapshot => file_nr
|
||||||
|
pub chunk_index: HashMap<[u8;32], u64>, // chunk => file_nr
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatastoreContent {
|
||||||
|
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
chunk_index: HashMap::new(),
|
||||||
|
snapshot_index: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The Media Catalog
|
/// The Media Catalog
|
||||||
///
|
///
|
||||||
@ -44,13 +59,11 @@ pub struct MediaCatalog {
|
|||||||
|
|
||||||
log_to_stdout: bool,
|
log_to_stdout: bool,
|
||||||
|
|
||||||
current_archive: Option<(Uuid, u64)>,
|
current_archive: Option<(Uuid, u64, String)>, // (uuid, file_nr, store)
|
||||||
|
|
||||||
last_entry: Option<(Uuid, u64)>,
|
last_entry: Option<(Uuid, u64)>,
|
||||||
|
|
||||||
chunk_index: HashMap<[u8;32], u64>,
|
content: HashMap<String, DatastoreContent>,
|
||||||
|
|
||||||
snapshot_index: HashMap<String, u64>,
|
|
||||||
|
|
||||||
pending: Vec<u8>,
|
pending: Vec<u8>,
|
||||||
}
|
}
|
||||||
@ -59,8 +72,12 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
/// Magic number for media catalog files.
|
/// Magic number for media catalog files.
|
||||||
// openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.0")[0..8]
|
// openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.0")[0..8]
|
||||||
|
// Note: this version did not store datastore names (not supported anymore)
|
||||||
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0: [u8; 8] = [221, 29, 164, 1, 59, 69, 19, 40];
|
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0: [u8; 8] = [221, 29, 164, 1, 59, 69, 19, 40];
|
||||||
|
|
||||||
|
// openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.1")[0..8]
|
||||||
|
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1: [u8; 8] = [76, 142, 232, 193, 32, 168, 137, 113];
|
||||||
|
|
||||||
/// List media with catalogs
|
/// List media with catalogs
|
||||||
pub fn media_with_catalogs(base_path: &Path) -> Result<HashSet<Uuid>, Error> {
|
pub fn media_with_catalogs(base_path: &Path) -> Result<HashSet<Uuid>, Error> {
|
||||||
let mut catalogs = HashSet::new();
|
let mut catalogs = HashSet::new();
|
||||||
@ -120,11 +137,13 @@ impl MediaCatalog {
|
|||||||
/// Open a catalog database, load into memory
|
/// Open a catalog database, load into memory
|
||||||
pub fn open(
|
pub fn open(
|
||||||
base_path: &Path,
|
base_path: &Path,
|
||||||
uuid: &Uuid,
|
media_id: &MediaId,
|
||||||
write: bool,
|
write: bool,
|
||||||
create: bool,
|
create: bool,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
|
|
||||||
|
let uuid = &media_id.label.uuid;
|
||||||
|
|
||||||
let mut path = base_path.to_owned();
|
let mut path = base_path.to_owned();
|
||||||
path.push(uuid.to_string());
|
path.push(uuid.to_string());
|
||||||
path.set_extension("log");
|
path.set_extension("log");
|
||||||
@ -149,15 +168,14 @@ impl MediaCatalog {
|
|||||||
log_to_stdout: false,
|
log_to_stdout: false,
|
||||||
current_archive: None,
|
current_archive: None,
|
||||||
last_entry: None,
|
last_entry: None,
|
||||||
chunk_index: HashMap::new(),
|
content: HashMap::new(),
|
||||||
snapshot_index: HashMap::new(),
|
|
||||||
pending: Vec::new(),
|
pending: Vec::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let found_magic_number = me.load_catalog(&mut file)?;
|
let found_magic_number = me.load_catalog(&mut file, media_id.media_set_label.as_ref())?;
|
||||||
|
|
||||||
if !found_magic_number {
|
if !found_magic_number {
|
||||||
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0);
|
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if write {
|
if write {
|
||||||
@ -207,19 +225,18 @@ impl MediaCatalog {
|
|||||||
log_to_stdout: false,
|
log_to_stdout: false,
|
||||||
current_archive: None,
|
current_archive: None,
|
||||||
last_entry: None,
|
last_entry: None,
|
||||||
chunk_index: HashMap::new(),
|
content: HashMap::new(),
|
||||||
snapshot_index: HashMap::new(),
|
|
||||||
pending: Vec::new(),
|
pending: Vec::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
me.log_to_stdout = log_to_stdout;
|
me.log_to_stdout = log_to_stdout;
|
||||||
|
|
||||||
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0);
|
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
|
||||||
|
|
||||||
me.register_label(&media_id.label.uuid, 0)?;
|
me.register_label(&media_id.label.uuid, 0, 0)?;
|
||||||
|
|
||||||
if let Some(ref set) = media_id.media_set_label {
|
if let Some(ref set) = media_id.media_set_label {
|
||||||
me.register_label(&set.uuid, 1)?;
|
me.register_label(&set.uuid, set.seq_nr, 1)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
me.commit()?;
|
me.commit()?;
|
||||||
@ -265,8 +282,8 @@ impl MediaCatalog {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to content list
|
/// Accessor to content list
|
||||||
pub fn snapshot_index(&self) -> &HashMap<String, u64> {
|
pub fn content(&self) -> &HashMap<String, DatastoreContent> {
|
||||||
&self.snapshot_index
|
&self.content
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Commit pending changes
|
/// Commit pending changes
|
||||||
@ -319,31 +336,47 @@ impl MediaCatalog {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Test if the catalog already contain a snapshot
|
/// Test if the catalog already contain a snapshot
|
||||||
pub fn contains_snapshot(&self, snapshot: &str) -> bool {
|
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
|
||||||
self.snapshot_index.contains_key(snapshot)
|
match self.content.get(store) {
|
||||||
|
None => false,
|
||||||
|
Some(content) => content.snapshot_index.contains_key(snapshot),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the chunk archive file number
|
/// Returns the snapshot archive file number
|
||||||
pub fn lookup_snapshot(&self, snapshot: &str) -> Option<u64> {
|
pub fn lookup_snapshot(&self, store: &str, snapshot: &str) -> Option<u64> {
|
||||||
self.snapshot_index.get(snapshot).copied()
|
match self.content.get(store) {
|
||||||
|
None => None,
|
||||||
|
Some(content) => content.snapshot_index.get(snapshot).copied(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test if the catalog already contain a chunk
|
/// Test if the catalog already contain a chunk
|
||||||
pub fn contains_chunk(&self, digest: &[u8;32]) -> bool {
|
pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
|
||||||
self.chunk_index.contains_key(digest)
|
match self.content.get(store) {
|
||||||
|
None => false,
|
||||||
|
Some(content) => content.chunk_index.contains_key(digest),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the chunk archive file number
|
/// Returns the chunk archive file number
|
||||||
pub fn lookup_chunk(&self, digest: &[u8;32]) -> Option<u64> {
|
pub fn lookup_chunk(&self, store: &str, digest: &[u8;32]) -> Option<u64> {
|
||||||
self.chunk_index.get(digest).copied()
|
match self.content.get(store) {
|
||||||
|
None => None,
|
||||||
|
Some(content) => content.chunk_index.get(digest).copied(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_register_label(&self, file_number: u64) -> Result<(), Error> {
|
fn check_register_label(&self, file_number: u64, uuid: &Uuid) -> Result<(), Error> {
|
||||||
|
|
||||||
if file_number >= 2 {
|
if file_number >= 2 {
|
||||||
bail!("register label failed: got wrong file number ({} >= 2)", file_number);
|
bail!("register label failed: got wrong file number ({} >= 2)", file_number);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if file_number == 0 && uuid != &self.uuid {
|
||||||
|
bail!("register label failed: uuid does not match");
|
||||||
|
}
|
||||||
|
|
||||||
if self.current_archive.is_some() {
|
if self.current_archive.is_some() {
|
||||||
bail!("register label failed: inside chunk archive");
|
bail!("register label failed: inside chunk archive");
|
||||||
}
|
}
|
||||||
@ -363,15 +396,21 @@ impl MediaCatalog {
|
|||||||
/// Register media labels (file 0 and 1)
|
/// Register media labels (file 0 and 1)
|
||||||
pub fn register_label(
|
pub fn register_label(
|
||||||
&mut self,
|
&mut self,
|
||||||
uuid: &Uuid, // Uuid form MediaContentHeader
|
uuid: &Uuid, // Media/MediaSet Uuid
|
||||||
|
seq_nr: u64, // onyl used for media set labels
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
self.check_register_label(file_number)?;
|
self.check_register_label(file_number, uuid)?;
|
||||||
|
|
||||||
|
if file_number == 0 && seq_nr != 0 {
|
||||||
|
bail!("register_label failed - seq_nr should be 0 - iternal error");
|
||||||
|
}
|
||||||
|
|
||||||
let entry = LabelEntry {
|
let entry = LabelEntry {
|
||||||
file_number,
|
file_number,
|
||||||
uuid: *uuid.as_bytes(),
|
uuid: *uuid.as_bytes(),
|
||||||
|
seq_nr,
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
@ -395,9 +434,9 @@ impl MediaCatalog {
|
|||||||
digest: &[u8;32],
|
digest: &[u8;32],
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let file_number = match self.current_archive {
|
let (file_number, store) = match self.current_archive {
|
||||||
None => bail!("register_chunk failed: no archive started"),
|
None => bail!("register_chunk failed: no archive started"),
|
||||||
Some((_, file_number)) => file_number,
|
Some((_, file_number, ref store)) => (file_number, store),
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
@ -407,7 +446,12 @@ impl MediaCatalog {
|
|||||||
self.pending.push(b'C');
|
self.pending.push(b'C');
|
||||||
self.pending.extend(digest);
|
self.pending.extend(digest);
|
||||||
|
|
||||||
self.chunk_index.insert(*digest, file_number);
|
match self.content.get_mut(store) {
|
||||||
|
None => bail!("storage {} not registered - internal error", store),
|
||||||
|
Some(content) => {
|
||||||
|
content.chunk_index.insert(*digest, file_number);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -440,6 +484,7 @@ impl MediaCatalog {
|
|||||||
&mut self,
|
&mut self,
|
||||||
uuid: Uuid, // Uuid form MediaContentHeader
|
uuid: Uuid, // Uuid form MediaContentHeader
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
|
store: &str,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
self.check_start_chunk_archive(file_number)?;
|
self.check_start_chunk_archive(file_number)?;
|
||||||
@ -447,17 +492,21 @@ impl MediaCatalog {
|
|||||||
let entry = ChunkArchiveStart {
|
let entry = ChunkArchiveStart {
|
||||||
file_number,
|
file_number,
|
||||||
uuid: *uuid.as_bytes(),
|
uuid: *uuid.as_bytes(),
|
||||||
|
store_name_len: u8::try_from(store.len())?,
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
println!("A|{}|{}", file_number, uuid.to_string());
|
println!("A|{}|{}|{}", file_number, uuid.to_string(), store);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.pending.push(b'A');
|
self.pending.push(b'A');
|
||||||
|
|
||||||
unsafe { self.pending.write_le_value(entry)?; }
|
unsafe { self.pending.write_le_value(entry)?; }
|
||||||
|
self.pending.extend(store.as_bytes());
|
||||||
|
|
||||||
self.current_archive = Some((uuid, file_number));
|
self.content.entry(store.to_string()).or_insert(DatastoreContent::new());
|
||||||
|
|
||||||
|
self.current_archive = Some((uuid, file_number, store.to_string()));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -466,7 +515,7 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
match self.current_archive {
|
match self.current_archive {
|
||||||
None => bail!("end_chunk archive failed: not started"),
|
None => bail!("end_chunk archive failed: not started"),
|
||||||
Some((ref expected_uuid, expected_file_number)) => {
|
Some((ref expected_uuid, expected_file_number, ..)) => {
|
||||||
if uuid != expected_uuid {
|
if uuid != expected_uuid {
|
||||||
bail!("end_chunk_archive failed: got unexpected uuid");
|
bail!("end_chunk_archive failed: got unexpected uuid");
|
||||||
}
|
}
|
||||||
@ -476,7 +525,6 @@ impl MediaCatalog {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -485,7 +533,7 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
match self.current_archive.take() {
|
match self.current_archive.take() {
|
||||||
None => bail!("end_chunk_archive failed: not started"),
|
None => bail!("end_chunk_archive failed: not started"),
|
||||||
Some((uuid, file_number)) => {
|
Some((uuid, file_number, ..)) => {
|
||||||
|
|
||||||
let entry = ChunkArchiveEnd {
|
let entry = ChunkArchiveEnd {
|
||||||
file_number,
|
file_number,
|
||||||
@ -539,6 +587,7 @@ impl MediaCatalog {
|
|||||||
&mut self,
|
&mut self,
|
||||||
uuid: Uuid, // Uuid form MediaContentHeader
|
uuid: Uuid, // Uuid form MediaContentHeader
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
|
store: &str,
|
||||||
snapshot: &str,
|
snapshot: &str,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
@ -547,26 +596,36 @@ impl MediaCatalog {
|
|||||||
let entry = SnapshotEntry {
|
let entry = SnapshotEntry {
|
||||||
file_number,
|
file_number,
|
||||||
uuid: *uuid.as_bytes(),
|
uuid: *uuid.as_bytes(),
|
||||||
|
store_name_len: u8::try_from(store.len())?,
|
||||||
name_len: u16::try_from(snapshot.len())?,
|
name_len: u16::try_from(snapshot.len())?,
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
println!("S|{}|{}|{}", file_number, uuid.to_string(), snapshot);
|
println!("S|{}|{}|{}:{}", file_number, uuid.to_string(), store, snapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.pending.push(b'S');
|
self.pending.push(b'S');
|
||||||
|
|
||||||
unsafe { self.pending.write_le_value(entry)?; }
|
unsafe { self.pending.write_le_value(entry)?; }
|
||||||
|
self.pending.extend(store.as_bytes());
|
||||||
|
self.pending.push(b':');
|
||||||
self.pending.extend(snapshot.as_bytes());
|
self.pending.extend(snapshot.as_bytes());
|
||||||
|
|
||||||
self.snapshot_index.insert(snapshot.to_string(), file_number);
|
let content = self.content.entry(store.to_string())
|
||||||
|
.or_insert(DatastoreContent::new());
|
||||||
|
|
||||||
|
content.snapshot_index.insert(snapshot.to_string(), file_number);
|
||||||
|
|
||||||
self.last_entry = Some((uuid, file_number));
|
self.last_entry = Some((uuid, file_number));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_catalog(&mut self, file: &mut File) -> Result<bool, Error> {
|
fn load_catalog(
|
||||||
|
&mut self,
|
||||||
|
file: &mut File,
|
||||||
|
media_set_label: Option<&MediaSetLabel>,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
let mut file = BufReader::new(file);
|
let mut file = BufReader::new(file);
|
||||||
let mut found_magic_number = false;
|
let mut found_magic_number = false;
|
||||||
@ -581,7 +640,11 @@ impl MediaCatalog {
|
|||||||
Ok(true) => { /* OK */ }
|
Ok(true) => { /* OK */ }
|
||||||
Err(err) => bail!("read failed - {}", err),
|
Err(err) => bail!("read failed - {}", err),
|
||||||
}
|
}
|
||||||
if magic != Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0 {
|
if magic == Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0 {
|
||||||
|
// only use in unreleased versions
|
||||||
|
bail!("old catalog format (v1.0) is no longer supported");
|
||||||
|
}
|
||||||
|
if magic != Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1 {
|
||||||
bail!("wrong magic number");
|
bail!("wrong magic number");
|
||||||
}
|
}
|
||||||
found_magic_number = true;
|
found_magic_number = true;
|
||||||
@ -597,22 +660,34 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
match entry_type[0] {
|
match entry_type[0] {
|
||||||
b'C' => {
|
b'C' => {
|
||||||
let file_number = match self.current_archive {
|
let (file_number, store) = match self.current_archive {
|
||||||
None => bail!("register_chunk failed: no archive started"),
|
None => bail!("register_chunk failed: no archive started"),
|
||||||
Some((_, file_number)) => file_number,
|
Some((_, file_number, ref store)) => (file_number, store),
|
||||||
};
|
};
|
||||||
let mut digest = [0u8; 32];
|
let mut digest = [0u8; 32];
|
||||||
file.read_exact(&mut digest)?;
|
file.read_exact(&mut digest)?;
|
||||||
self.chunk_index.insert(digest, file_number);
|
match self.content.get_mut(store) {
|
||||||
|
None => bail!("storage {} not registered - internal error", store),
|
||||||
|
Some(content) => {
|
||||||
|
content.chunk_index.insert(digest, file_number);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
b'A' => {
|
b'A' => {
|
||||||
let entry: ChunkArchiveStart = unsafe { file.read_le_value()? };
|
let entry: ChunkArchiveStart = unsafe { file.read_le_value()? };
|
||||||
let file_number = entry.file_number;
|
let file_number = entry.file_number;
|
||||||
let uuid = Uuid::from(entry.uuid);
|
let uuid = Uuid::from(entry.uuid);
|
||||||
|
let store_name_len = entry.store_name_len as usize;
|
||||||
|
|
||||||
|
let store = file.read_exact_allocated(store_name_len)?;
|
||||||
|
let store = std::str::from_utf8(&store)?;
|
||||||
|
|
||||||
self.check_start_chunk_archive(file_number)?;
|
self.check_start_chunk_archive(file_number)?;
|
||||||
|
|
||||||
self.current_archive = Some((uuid, file_number));
|
self.content.entry(store.to_string())
|
||||||
|
.or_insert(DatastoreContent::new());
|
||||||
|
|
||||||
|
self.current_archive = Some((uuid, file_number, store.to_string()));
|
||||||
}
|
}
|
||||||
b'E' => {
|
b'E' => {
|
||||||
let entry: ChunkArchiveEnd = unsafe { file.read_le_value()? };
|
let entry: ChunkArchiveEnd = unsafe { file.read_le_value()? };
|
||||||
@ -627,15 +702,26 @@ impl MediaCatalog {
|
|||||||
b'S' => {
|
b'S' => {
|
||||||
let entry: SnapshotEntry = unsafe { file.read_le_value()? };
|
let entry: SnapshotEntry = unsafe { file.read_le_value()? };
|
||||||
let file_number = entry.file_number;
|
let file_number = entry.file_number;
|
||||||
let name_len = entry.name_len;
|
let store_name_len = entry.store_name_len as usize;
|
||||||
|
let name_len = entry.name_len as usize;
|
||||||
let uuid = Uuid::from(entry.uuid);
|
let uuid = Uuid::from(entry.uuid);
|
||||||
|
|
||||||
let snapshot = file.read_exact_allocated(name_len.into())?;
|
let store = file.read_exact_allocated(store_name_len + 1)?;
|
||||||
|
if store[store_name_len] != b':' {
|
||||||
|
bail!("parse-error: missing separator in SnapshotEntry");
|
||||||
|
}
|
||||||
|
|
||||||
|
let store = std::str::from_utf8(&store[..store_name_len])?;
|
||||||
|
|
||||||
|
let snapshot = file.read_exact_allocated(name_len)?;
|
||||||
let snapshot = std::str::from_utf8(&snapshot)?;
|
let snapshot = std::str::from_utf8(&snapshot)?;
|
||||||
|
|
||||||
self.check_register_snapshot(file_number, snapshot)?;
|
self.check_register_snapshot(file_number, snapshot)?;
|
||||||
|
|
||||||
self.snapshot_index.insert(snapshot.to_string(), file_number);
|
let content = self.content.entry(store.to_string())
|
||||||
|
.or_insert(DatastoreContent::new());
|
||||||
|
|
||||||
|
content.snapshot_index.insert(snapshot.to_string(), file_number);
|
||||||
|
|
||||||
self.last_entry = Some((uuid, file_number));
|
self.last_entry = Some((uuid, file_number));
|
||||||
}
|
}
|
||||||
@ -644,7 +730,18 @@ impl MediaCatalog {
|
|||||||
let file_number = entry.file_number;
|
let file_number = entry.file_number;
|
||||||
let uuid = Uuid::from(entry.uuid);
|
let uuid = Uuid::from(entry.uuid);
|
||||||
|
|
||||||
self.check_register_label(file_number)?;
|
self.check_register_label(file_number, &uuid)?;
|
||||||
|
|
||||||
|
if file_number == 1 {
|
||||||
|
if let Some(set) = media_set_label {
|
||||||
|
if set.uuid != uuid {
|
||||||
|
bail!("got unexpected media set uuid");
|
||||||
|
}
|
||||||
|
if set.seq_nr != entry.seq_nr {
|
||||||
|
bail!("got unexpected media set sequence number");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
self.last_entry = Some((uuid, file_number));
|
self.last_entry = Some((uuid, file_number));
|
||||||
}
|
}
|
||||||
@ -693,9 +790,9 @@ impl MediaSetCatalog {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Test if the catalog already contain a snapshot
|
/// Test if the catalog already contain a snapshot
|
||||||
pub fn contains_snapshot(&self, snapshot: &str) -> bool {
|
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
|
||||||
for catalog in self.catalog_list.values() {
|
for catalog in self.catalog_list.values() {
|
||||||
if catalog.contains_snapshot(snapshot) {
|
if catalog.contains_snapshot(store, snapshot) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -703,9 +800,9 @@ impl MediaSetCatalog {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Test if the catalog already contain a chunk
|
/// Test if the catalog already contain a chunk
|
||||||
pub fn contains_chunk(&self, digest: &[u8;32]) -> bool {
|
pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
|
||||||
for catalog in self.catalog_list.values() {
|
for catalog in self.catalog_list.values() {
|
||||||
if catalog.contains_chunk(digest) {
|
if catalog.contains_chunk(store, digest) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -720,6 +817,7 @@ impl MediaSetCatalog {
|
|||||||
struct LabelEntry {
|
struct LabelEntry {
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
uuid: [u8;16],
|
uuid: [u8;16],
|
||||||
|
seq_nr: u64, // only used for media set labels
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Endian)]
|
#[derive(Endian)]
|
||||||
@ -727,6 +825,8 @@ struct LabelEntry {
|
|||||||
struct ChunkArchiveStart {
|
struct ChunkArchiveStart {
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
uuid: [u8;16],
|
uuid: [u8;16],
|
||||||
|
store_name_len: u8,
|
||||||
|
/* datastore name follows */
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Endian)]
|
#[derive(Endian)]
|
||||||
@ -741,6 +841,7 @@ struct ChunkArchiveEnd{
|
|||||||
struct SnapshotEntry{
|
struct SnapshotEntry{
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
uuid: [u8;16],
|
uuid: [u8;16],
|
||||||
|
store_name_len: u8,
|
||||||
name_len: u16,
|
name_len: u16,
|
||||||
/* snapshot name follows */
|
/* datastore name, ':', snapshot name follows */
|
||||||
}
|
}
|
||||||
|
@ -3,11 +3,11 @@
|
|||||||
//! A set of backup medias.
|
//! A set of backup medias.
|
||||||
//!
|
//!
|
||||||
//! This struct manages backup media state during backup. The main
|
//! This struct manages backup media state during backup. The main
|
||||||
//! purpose is to allocate media sets and assing new tapes to it.
|
//! purpose is to allocate media sets and assign new tapes to it.
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
|
|
||||||
use std::path::Path;
|
use std::path::{PathBuf, Path};
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
@ -41,6 +41,7 @@ pub struct MediaPoolLockGuard(std::fs::File);
|
|||||||
pub struct MediaPool {
|
pub struct MediaPool {
|
||||||
|
|
||||||
name: String,
|
name: String,
|
||||||
|
state_path: PathBuf,
|
||||||
|
|
||||||
media_set_policy: MediaSetPolicy,
|
media_set_policy: MediaSetPolicy,
|
||||||
retention: RetentionPolicy,
|
retention: RetentionPolicy,
|
||||||
@ -82,6 +83,7 @@ impl MediaPool {
|
|||||||
|
|
||||||
Ok(MediaPool {
|
Ok(MediaPool {
|
||||||
name: String::from(name),
|
name: String::from(name),
|
||||||
|
state_path: state_path.to_owned(),
|
||||||
media_set_policy,
|
media_set_policy,
|
||||||
retention,
|
retention,
|
||||||
changer_name,
|
changer_name,
|
||||||
@ -135,7 +137,7 @@ impl MediaPool {
|
|||||||
&self.name
|
&self.name
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retruns encryption settings
|
/// Returns encryption settings
|
||||||
pub fn encrypt_fingerprint(&self) -> Option<Fingerprint> {
|
pub fn encrypt_fingerprint(&self) -> Option<Fingerprint> {
|
||||||
self.encrypt_fingerprint.clone()
|
self.encrypt_fingerprint.clone()
|
||||||
}
|
}
|
||||||
@ -284,7 +286,7 @@ impl MediaPool {
|
|||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tests if the media data is considered as expired at sepcified time
|
// tests if the media data is considered as expired at specified time
|
||||||
pub fn media_is_expired(&self, media: &BackupMedia, current_time: i64) -> bool {
|
pub fn media_is_expired(&self, media: &BackupMedia, current_time: i64) -> bool {
|
||||||
if media.status() != &MediaStatus::Full {
|
if media.status() != &MediaStatus::Full {
|
||||||
return false;
|
return false;
|
||||||
@ -386,7 +388,13 @@ impl MediaPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// sort empty_media, newest first -> oldest last
|
// sort empty_media, newest first -> oldest last
|
||||||
empty_media.sort_unstable_by(|a, b| b.label().ctime.cmp(&a.label().ctime));
|
empty_media.sort_unstable_by(|a, b| {
|
||||||
|
let mut res = b.label().ctime.cmp(&a.label().ctime);
|
||||||
|
if res == std::cmp::Ordering::Equal {
|
||||||
|
res = b.label().label_text.cmp(&a.label().label_text);
|
||||||
|
}
|
||||||
|
res
|
||||||
|
});
|
||||||
|
|
||||||
if let Some(media) = empty_media.pop() {
|
if let Some(media) = empty_media.pop() {
|
||||||
// found empty media, add to media set an use it
|
// found empty media, add to media set an use it
|
||||||
@ -416,7 +424,11 @@ impl MediaPool {
|
|||||||
|
|
||||||
// sort expired_media, newest first -> oldest last
|
// sort expired_media, newest first -> oldest last
|
||||||
expired_media.sort_unstable_by(|a, b| {
|
expired_media.sort_unstable_by(|a, b| {
|
||||||
b.media_set_label().unwrap().ctime.cmp(&a.media_set_label().unwrap().ctime)
|
let mut res = b.media_set_label().unwrap().ctime.cmp(&a.media_set_label().unwrap().ctime);
|
||||||
|
if res == std::cmp::Ordering::Equal {
|
||||||
|
res = b.label().label_text.cmp(&a.label().label_text);
|
||||||
|
}
|
||||||
|
res
|
||||||
});
|
});
|
||||||
|
|
||||||
if let Some(media) = expired_media.pop() {
|
if let Some(media) = expired_media.pop() {
|
||||||
@ -429,7 +441,12 @@ impl MediaPool {
|
|||||||
println!("no expired media in pool, try to find unassigned/free media");
|
println!("no expired media in pool, try to find unassigned/free media");
|
||||||
|
|
||||||
// try unassigned media
|
// try unassigned media
|
||||||
// fixme: lock free media pool to avoid races
|
|
||||||
|
// lock artificial "__UNASSIGNED__" pool to avoid races
|
||||||
|
let _lock = MediaPool::lock(&self.state_path, "__UNASSIGNED__")?;
|
||||||
|
|
||||||
|
self.inventory.reload()?;
|
||||||
|
|
||||||
let mut free_media = Vec::new();
|
let mut free_media = Vec::new();
|
||||||
|
|
||||||
for media_id in self.inventory.list_unassigned_media() {
|
for media_id in self.inventory.list_unassigned_media() {
|
||||||
@ -447,6 +464,15 @@ impl MediaPool {
|
|||||||
free_media.push(media_id);
|
free_media.push(media_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sort free_media, newest first -> oldest last
|
||||||
|
free_media.sort_unstable_by(|a, b| {
|
||||||
|
let mut res = b.label.ctime.cmp(&a.label.ctime);
|
||||||
|
if res == std::cmp::Ordering::Equal {
|
||||||
|
res = b.label.label_text.cmp(&a.label.label_text);
|
||||||
|
}
|
||||||
|
res
|
||||||
|
});
|
||||||
|
|
||||||
if let Some(media_id) = free_media.pop() {
|
if let Some(media_id) = free_media.pop() {
|
||||||
println!("use free media '{}'", media_id.label.label_text);
|
println!("use free media '{}'", media_id.label.label_text);
|
||||||
let uuid = media_id.label.uuid.clone();
|
let uuid = media_id.label.uuid.clone();
|
||||||
|
@ -48,7 +48,7 @@ impl MediaSet {
|
|||||||
let seq_nr = seq_nr as usize;
|
let seq_nr = seq_nr as usize;
|
||||||
if self.media_list.len() > seq_nr {
|
if self.media_list.len() > seq_nr {
|
||||||
if self.media_list[seq_nr].is_some() {
|
if self.media_list[seq_nr].is_some() {
|
||||||
bail!("found duplicate squence number in media set '{}/{}'",
|
bail!("found duplicate sequence number in media set '{}/{}'",
|
||||||
self.uuid.to_string(), seq_nr);
|
self.uuid.to_string(), seq_nr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use proxmox::tools::Uuid;
|
use proxmox::tools::Uuid;
|
||||||
|
|
||||||
@ -10,6 +11,7 @@ use crate::{
|
|||||||
task_log,
|
task_log,
|
||||||
backup::{
|
backup::{
|
||||||
DataStore,
|
DataStore,
|
||||||
|
DataBlob,
|
||||||
},
|
},
|
||||||
server::WorkerTask,
|
server::WorkerTask,
|
||||||
tape::{
|
tape::{
|
||||||
@ -18,7 +20,6 @@ use crate::{
|
|||||||
COMMIT_BLOCK_SIZE,
|
COMMIT_BLOCK_SIZE,
|
||||||
TapeWrite,
|
TapeWrite,
|
||||||
SnapshotReader,
|
SnapshotReader,
|
||||||
SnapshotChunkIterator,
|
|
||||||
MediaPool,
|
MediaPool,
|
||||||
MediaId,
|
MediaId,
|
||||||
MediaCatalog,
|
MediaCatalog,
|
||||||
@ -38,32 +39,196 @@ use crate::{
|
|||||||
config::tape_encryption_keys::load_key_configs,
|
config::tape_encryption_keys::load_key_configs,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Helper to build and query sets of catalogs
|
||||||
|
pub struct CatalogBuilder {
|
||||||
|
// read only part
|
||||||
|
media_set_catalog: MediaSetCatalog,
|
||||||
|
// catalog to modify (latest in set)
|
||||||
|
catalog: Option<MediaCatalog>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CatalogBuilder {
|
||||||
|
|
||||||
|
/// Test if the catalog already contains a snapshot
|
||||||
|
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
|
||||||
|
if let Some(ref catalog) = self.catalog {
|
||||||
|
if catalog.contains_snapshot(store, snapshot) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.media_set_catalog.contains_snapshot(store, snapshot)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test if the catalog already contains a chunk
|
||||||
|
pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
|
||||||
|
if let Some(ref catalog) = self.catalog {
|
||||||
|
if catalog.contains_chunk(store, digest) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.media_set_catalog.contains_chunk(store, digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add a new catalog, move the old on to the read-only set
|
||||||
|
pub fn append_catalog(&mut self, new_catalog: MediaCatalog) -> Result<(), Error> {
|
||||||
|
|
||||||
|
// append current catalog to read-only set
|
||||||
|
if let Some(catalog) = self.catalog.take() {
|
||||||
|
self.media_set_catalog.append_catalog(catalog)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove read-only version from set (in case it is there)
|
||||||
|
self.media_set_catalog.remove_catalog(&new_catalog.uuid());
|
||||||
|
|
||||||
|
self.catalog = Some(new_catalog);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register a snapshot
|
||||||
|
pub fn register_snapshot(
|
||||||
|
&mut self,
|
||||||
|
uuid: Uuid, // Uuid form MediaContentHeader
|
||||||
|
file_number: u64,
|
||||||
|
store: &str,
|
||||||
|
snapshot: &str,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
match self.catalog {
|
||||||
|
Some(ref mut catalog) => {
|
||||||
|
catalog.register_snapshot(uuid, file_number, store, snapshot)?;
|
||||||
|
}
|
||||||
|
None => bail!("no catalog loaded - internal error"),
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register a chunk archive
|
||||||
|
pub fn register_chunk_archive(
|
||||||
|
&mut self,
|
||||||
|
uuid: Uuid, // Uuid form MediaContentHeader
|
||||||
|
file_number: u64,
|
||||||
|
store: &str,
|
||||||
|
chunk_list: &[[u8; 32]],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
match self.catalog {
|
||||||
|
Some(ref mut catalog) => {
|
||||||
|
catalog.start_chunk_archive(uuid, file_number, store)?;
|
||||||
|
for digest in chunk_list {
|
||||||
|
catalog.register_chunk(digest)?;
|
||||||
|
}
|
||||||
|
catalog.end_chunk_archive()?;
|
||||||
|
}
|
||||||
|
None => bail!("no catalog loaded - internal error"),
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Commit the catalog changes
|
||||||
|
pub fn commit(&mut self) -> Result<(), Error> {
|
||||||
|
if let Some(ref mut catalog) = self.catalog {
|
||||||
|
catalog.commit()?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Chunk iterator which use a separate thread to read chunks
|
||||||
|
///
|
||||||
|
/// The iterator skips duplicate chunks and chunks already in the
|
||||||
|
/// catalog.
|
||||||
|
pub struct NewChunksIterator {
|
||||||
|
rx: std::sync::mpsc::Receiver<Result<Option<([u8; 32], DataBlob)>, Error>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NewChunksIterator {
|
||||||
|
|
||||||
|
/// Creates the iterator, spawning a new thread
|
||||||
|
///
|
||||||
|
/// Make sure to join() the returnd thread handle.
|
||||||
|
pub fn spawn(
|
||||||
|
datastore: Arc<DataStore>,
|
||||||
|
snapshot_reader: Arc<Mutex<SnapshotReader>>,
|
||||||
|
catalog_builder: Arc<Mutex<CatalogBuilder>>,
|
||||||
|
) -> Result<(std::thread::JoinHandle<()>, Self), Error> {
|
||||||
|
|
||||||
|
let (tx, rx) = std::sync::mpsc::sync_channel(3);
|
||||||
|
|
||||||
|
let reader_thread = std::thread::spawn(move || {
|
||||||
|
|
||||||
|
let snapshot_reader = snapshot_reader.lock().unwrap();
|
||||||
|
|
||||||
|
let mut chunk_index: HashSet<[u8;32]> = HashSet::new();
|
||||||
|
|
||||||
|
let datastore_name = snapshot_reader.datastore_name();
|
||||||
|
|
||||||
|
let result: Result<(), Error> = proxmox::try_block!({
|
||||||
|
|
||||||
|
let mut chunk_iter = snapshot_reader.chunk_iterator()?;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let digest = match chunk_iter.next() {
|
||||||
|
None => {
|
||||||
|
tx.send(Ok(None)).unwrap();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Some(digest) => digest?,
|
||||||
|
};
|
||||||
|
|
||||||
|
if chunk_index.contains(&digest) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if catalog_builder.lock().unwrap().contains_chunk(&datastore_name, &digest) {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let blob = datastore.load_chunk(&digest)?;
|
||||||
|
//println!("LOAD CHUNK {}", proxmox::tools::digest_to_hex(&digest));
|
||||||
|
tx.send(Ok(Some((digest, blob)))).unwrap();
|
||||||
|
|
||||||
|
chunk_index.insert(digest);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
if let Err(err) = result {
|
||||||
|
tx.send(Err(err)).unwrap();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok((reader_thread, Self { rx }))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We do not use Receiver::into_iter(). The manual implementation
|
||||||
|
// returns a simpler type.
|
||||||
|
impl Iterator for NewChunksIterator {
|
||||||
|
type Item = Result<([u8; 32], DataBlob), Error>;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
match self.rx.recv() {
|
||||||
|
Ok(Ok(None)) => None,
|
||||||
|
Ok(Ok(Some((digest, blob)))) => Some(Ok((digest, blob))),
|
||||||
|
Ok(Err(err)) => Some(Err(err)),
|
||||||
|
Err(_) => Some(Err(format_err!("reader thread failed"))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct PoolWriterState {
|
struct PoolWriterState {
|
||||||
drive: Box<dyn TapeDriver>,
|
drive: Box<dyn TapeDriver>,
|
||||||
catalog: MediaCatalog,
|
|
||||||
// tell if we already moved to EOM
|
// tell if we already moved to EOM
|
||||||
at_eom: bool,
|
at_eom: bool,
|
||||||
// bytes written after the last tape fush/sync
|
// bytes written after the last tape fush/sync
|
||||||
bytes_written: usize,
|
bytes_written: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PoolWriterState {
|
|
||||||
|
|
||||||
fn commit(&mut self) -> Result<(), Error> {
|
|
||||||
self.drive.sync()?; // sync all data to the tape
|
|
||||||
self.catalog.commit()?; // then commit the catalog
|
|
||||||
self.bytes_written = 0;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper to manage a backup job, writing several tapes of a pool
|
/// Helper to manage a backup job, writing several tapes of a pool
|
||||||
pub struct PoolWriter {
|
pub struct PoolWriter {
|
||||||
pool: MediaPool,
|
pool: MediaPool,
|
||||||
drive_name: String,
|
drive_name: String,
|
||||||
status: Option<PoolWriterState>,
|
status: Option<PoolWriterState>,
|
||||||
media_set_catalog: MediaSetCatalog,
|
catalog_builder: Arc<Mutex<CatalogBuilder>>,
|
||||||
notify_email: Option<String>,
|
notify_email: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,20 +253,23 @@ impl PoolWriter {
|
|||||||
|
|
||||||
// load all catalogs read-only at start
|
// load all catalogs read-only at start
|
||||||
for media_uuid in pool.current_media_list()? {
|
for media_uuid in pool.current_media_list()? {
|
||||||
|
let media_info = pool.lookup_media(media_uuid).unwrap();
|
||||||
let media_catalog = MediaCatalog::open(
|
let media_catalog = MediaCatalog::open(
|
||||||
Path::new(TAPE_STATUS_DIR),
|
Path::new(TAPE_STATUS_DIR),
|
||||||
&media_uuid,
|
media_info.id(),
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
)?;
|
)?;
|
||||||
media_set_catalog.append_catalog(media_catalog)?;
|
media_set_catalog.append_catalog(media_catalog)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let catalog_builder = CatalogBuilder { media_set_catalog, catalog: None };
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
pool,
|
pool,
|
||||||
drive_name: drive_name.to_string(),
|
drive_name: drive_name.to_string(),
|
||||||
status: None,
|
status: None,
|
||||||
media_set_catalog,
|
catalog_builder: Arc::new(Mutex::new(catalog_builder)),
|
||||||
notify_email,
|
notify_email,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -116,13 +284,8 @@ impl PoolWriter {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn contains_snapshot(&self, snapshot: &str) -> bool {
|
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
|
||||||
if let Some(PoolWriterState { ref catalog, .. }) = self.status {
|
self.catalog_builder.lock().unwrap().contains_snapshot(store, snapshot)
|
||||||
if catalog.contains_snapshot(snapshot) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.media_set_catalog.contains_snapshot(snapshot)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Eject media and drop PoolWriterState (close drive)
|
/// Eject media and drop PoolWriterState (close drive)
|
||||||
@ -188,16 +351,17 @@ impl PoolWriter {
|
|||||||
/// This is done automatically during a backupsession, but needs to
|
/// This is done automatically during a backupsession, but needs to
|
||||||
/// be called explicitly before dropping the PoolWriter
|
/// be called explicitly before dropping the PoolWriter
|
||||||
pub fn commit(&mut self) -> Result<(), Error> {
|
pub fn commit(&mut self) -> Result<(), Error> {
|
||||||
if let Some(ref mut status) = self.status {
|
if let Some(PoolWriterState {ref mut drive, .. }) = self.status {
|
||||||
status.commit()?;
|
drive.sync()?; // sync all data to the tape
|
||||||
}
|
}
|
||||||
|
self.catalog_builder.lock().unwrap().commit()?; // then commit the catalog
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Load a writable media into the drive
|
/// Load a writable media into the drive
|
||||||
pub fn load_writable_media(&mut self, worker: &WorkerTask) -> Result<Uuid, Error> {
|
pub fn load_writable_media(&mut self, worker: &WorkerTask) -> Result<Uuid, Error> {
|
||||||
let last_media_uuid = match self.status {
|
let last_media_uuid = match self.catalog_builder.lock().unwrap().catalog {
|
||||||
Some(PoolWriterState { ref catalog, .. }) => Some(catalog.uuid().clone()),
|
Some(ref catalog) => Some(catalog.uuid().clone()),
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -217,14 +381,12 @@ impl PoolWriter {
|
|||||||
|
|
||||||
task_log!(worker, "allocated new writable media '{}'", media.label_text());
|
task_log!(worker, "allocated new writable media '{}'", media.label_text());
|
||||||
|
|
||||||
// remove read-only catalog (we store a writable version in status)
|
if let Some(PoolWriterState {mut drive, .. }) = self.status.take() {
|
||||||
self.media_set_catalog.remove_catalog(&media_uuid);
|
if last_media_uuid.is_some() {
|
||||||
|
|
||||||
if let Some(PoolWriterState {mut drive, catalog, .. }) = self.status.take() {
|
|
||||||
self.media_set_catalog.append_catalog(catalog)?;
|
|
||||||
task_log!(worker, "eject current media");
|
task_log!(worker, "eject current media");
|
||||||
drive.eject_media()?;
|
drive.eject_media()?;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let (drive_config, _digest) = crate::config::drive::config()?;
|
let (drive_config, _digest) = crate::config::drive::config()?;
|
||||||
|
|
||||||
@ -249,6 +411,8 @@ impl PoolWriter {
|
|||||||
media.id(),
|
media.id(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
self.catalog_builder.lock().unwrap().append_catalog(catalog)?;
|
||||||
|
|
||||||
let media_set = media.media_set_label().clone().unwrap();
|
let media_set = media.media_set_label().clone().unwrap();
|
||||||
|
|
||||||
let encrypt_fingerprint = media_set
|
let encrypt_fingerprint = media_set
|
||||||
@ -258,20 +422,12 @@ impl PoolWriter {
|
|||||||
|
|
||||||
drive.set_encryption(encrypt_fingerprint)?;
|
drive.set_encryption(encrypt_fingerprint)?;
|
||||||
|
|
||||||
self.status = Some(PoolWriterState { drive, catalog, at_eom: false, bytes_written: 0 });
|
self.status = Some(PoolWriterState { drive, at_eom: false, bytes_written: 0 });
|
||||||
|
|
||||||
Ok(media_uuid)
|
Ok(media_uuid)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// uuid of currently loaded BackupMedia
|
/// Move to EOM (if not already there), then creates a new snapshot
|
||||||
pub fn current_media_uuid(&self) -> Result<&Uuid, Error> {
|
|
||||||
match self.status {
|
|
||||||
Some(PoolWriterState { ref catalog, ..}) => Ok(catalog.uuid()),
|
|
||||||
None => bail!("PoolWriter - no media loaded"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Move to EOM (if not aleady there), then creates a new snapshot
|
|
||||||
/// archive writing specified files (as .pxar) into it. On
|
/// archive writing specified files (as .pxar) into it. On
|
||||||
/// success, this return 'Ok(true)' and the media catalog gets
|
/// success, this return 'Ok(true)' and the media catalog gets
|
||||||
/// updated.
|
/// updated.
|
||||||
@ -308,9 +464,10 @@ impl PoolWriter {
|
|||||||
|
|
||||||
match tape_write_snapshot_archive(writer.as_mut(), snapshot_reader)? {
|
match tape_write_snapshot_archive(writer.as_mut(), snapshot_reader)? {
|
||||||
Some(content_uuid) => {
|
Some(content_uuid) => {
|
||||||
status.catalog.register_snapshot(
|
self.catalog_builder.lock().unwrap().register_snapshot(
|
||||||
content_uuid,
|
content_uuid,
|
||||||
current_file_number,
|
current_file_number,
|
||||||
|
&snapshot_reader.datastore_name().to_string(),
|
||||||
&snapshot_reader.snapshot().to_string(),
|
&snapshot_reader.snapshot().to_string(),
|
||||||
)?;
|
)?;
|
||||||
(true, writer.bytes_written())
|
(true, writer.bytes_written())
|
||||||
@ -324,21 +481,21 @@ impl PoolWriter {
|
|||||||
let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
|
let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
|
||||||
|
|
||||||
if !done || request_sync {
|
if !done || request_sync {
|
||||||
status.commit()?;
|
self.commit()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((done, bytes_written))
|
Ok((done, bytes_written))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Move to EOM (if not aleady there), then creates a new chunk
|
/// Move to EOM (if not already there), then creates a new chunk
|
||||||
/// archive and writes chunks from 'chunk_iter'. This stops when
|
/// archive and writes chunks from 'chunk_iter'. This stops when
|
||||||
/// it detect LEOM or when we reach max archive size
|
/// it detect LEOM or when we reach max archive size
|
||||||
/// (4GB). Written chunks are registered in the media catalog.
|
/// (4GB). Written chunks are registered in the media catalog.
|
||||||
pub fn append_chunk_archive(
|
pub fn append_chunk_archive(
|
||||||
&mut self,
|
&mut self,
|
||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
datastore: &DataStore,
|
chunk_iter: &mut std::iter::Peekable<NewChunksIterator>,
|
||||||
chunk_iter: &mut std::iter::Peekable<SnapshotChunkIterator>,
|
store: &str,
|
||||||
) -> Result<(bool, usize), Error> {
|
) -> Result<(bool, usize), Error> {
|
||||||
|
|
||||||
let status = match self.status {
|
let status = match self.status {
|
||||||
@ -363,10 +520,8 @@ impl PoolWriter {
|
|||||||
let (saved_chunks, content_uuid, leom, bytes_written) = write_chunk_archive(
|
let (saved_chunks, content_uuid, leom, bytes_written) = write_chunk_archive(
|
||||||
worker,
|
worker,
|
||||||
writer,
|
writer,
|
||||||
datastore,
|
|
||||||
chunk_iter,
|
chunk_iter,
|
||||||
&self.media_set_catalog,
|
store,
|
||||||
&status.catalog,
|
|
||||||
MAX_CHUNK_ARCHIVE_SIZE,
|
MAX_CHUNK_ARCHIVE_SIZE,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -374,42 +529,48 @@ impl PoolWriter {
|
|||||||
|
|
||||||
let elapsed = start_time.elapsed()?.as_secs_f64();
|
let elapsed = start_time.elapsed()?.as_secs_f64();
|
||||||
worker.log(format!(
|
worker.log(format!(
|
||||||
"wrote {:.2} MB ({} MB/s)",
|
"wrote {} chunks ({:.2} MB at {:.2} MB/s)",
|
||||||
bytes_written as f64 / (1024.0*1024.0),
|
saved_chunks.len(),
|
||||||
(bytes_written as f64)/(1024.0*1024.0*elapsed),
|
bytes_written as f64 /1_000_000.0,
|
||||||
|
(bytes_written as f64)/(1_000_000.0*elapsed),
|
||||||
));
|
));
|
||||||
|
|
||||||
let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
|
let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
|
||||||
|
|
||||||
// register chunks in media_catalog
|
// register chunks in media_catalog
|
||||||
status.catalog.start_chunk_archive(content_uuid, current_file_number)?;
|
self.catalog_builder.lock().unwrap()
|
||||||
for digest in saved_chunks {
|
.register_chunk_archive(content_uuid, current_file_number, store, &saved_chunks)?;
|
||||||
status.catalog.register_chunk(&digest)?;
|
|
||||||
}
|
|
||||||
status.catalog.end_chunk_archive()?;
|
|
||||||
|
|
||||||
if leom || request_sync {
|
if leom || request_sync {
|
||||||
status.commit()?;
|
self.commit()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((leom, bytes_written))
|
Ok((leom, bytes_written))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn spawn_chunk_reader_thread(
|
||||||
|
&self,
|
||||||
|
datastore: Arc<DataStore>,
|
||||||
|
snapshot_reader: Arc<Mutex<SnapshotReader>>,
|
||||||
|
) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> {
|
||||||
|
NewChunksIterator::spawn(
|
||||||
|
datastore,
|
||||||
|
snapshot_reader,
|
||||||
|
Arc::clone(&self.catalog_builder),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// write up to <max_size> of chunks
|
/// write up to <max_size> of chunks
|
||||||
fn write_chunk_archive<'a>(
|
fn write_chunk_archive<'a>(
|
||||||
worker: &WorkerTask,
|
_worker: &WorkerTask,
|
||||||
writer: Box<dyn 'a + TapeWrite>,
|
writer: Box<dyn 'a + TapeWrite>,
|
||||||
datastore: &DataStore,
|
chunk_iter: &mut std::iter::Peekable<NewChunksIterator>,
|
||||||
chunk_iter: &mut std::iter::Peekable<SnapshotChunkIterator>,
|
store: &str,
|
||||||
media_set_catalog: &MediaSetCatalog,
|
|
||||||
media_catalog: &MediaCatalog,
|
|
||||||
max_size: usize,
|
max_size: usize,
|
||||||
) -> Result<(Vec<[u8;32]>, Uuid, bool, usize), Error> {
|
) -> Result<(Vec<[u8;32]>, Uuid, bool, usize), Error> {
|
||||||
|
|
||||||
let (mut writer, content_uuid) = ChunkArchiveWriter::new(writer, true)?;
|
let (mut writer, content_uuid) = ChunkArchiveWriter::new(writer, store, true)?;
|
||||||
|
|
||||||
let mut chunk_index: HashSet<[u8;32]> = HashSet::new();
|
|
||||||
|
|
||||||
// we want to get the chunk list in correct order
|
// we want to get the chunk list in correct order
|
||||||
let mut chunk_list: Vec<[u8;32]> = Vec::new();
|
let mut chunk_list: Vec<[u8;32]> = Vec::new();
|
||||||
@ -417,26 +578,21 @@ fn write_chunk_archive<'a>(
|
|||||||
let mut leom = false;
|
let mut leom = false;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let digest = match chunk_iter.next() {
|
let (digest, blob) = match chunk_iter.peek() {
|
||||||
None => break,
|
None => break,
|
||||||
Some(digest) => digest?,
|
Some(Ok((digest, blob))) => (digest, blob),
|
||||||
|
Some(Err(err)) => bail!("{}", err),
|
||||||
};
|
};
|
||||||
if media_catalog.contains_chunk(&digest)
|
|
||||||
|| chunk_index.contains(&digest)
|
|
||||||
|| media_set_catalog.contains_chunk(&digest)
|
|
||||||
{
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let blob = datastore.load_chunk(&digest)?;
|
//println!("CHUNK {} size {}", proxmox::tools::digest_to_hex(digest), blob.raw_size());
|
||||||
//println!("CHUNK {} size {}", proxmox::tools::digest_to_hex(&digest), blob.raw_size());
|
|
||||||
|
|
||||||
match writer.try_write_chunk(&digest, &blob) {
|
match writer.try_write_chunk(&digest, &blob) {
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
chunk_index.insert(digest);
|
chunk_list.push(*digest);
|
||||||
chunk_list.push(digest);
|
chunk_iter.next(); // consume
|
||||||
}
|
}
|
||||||
Ok(false) => {
|
Ok(false) => {
|
||||||
|
// Note; we do not consume the chunk (no chunk_iter.next())
|
||||||
leom = true;
|
leom = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -444,7 +600,7 @@ fn write_chunk_archive<'a>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if writer.bytes_written() > max_size {
|
if writer.bytes_written() > max_size {
|
||||||
worker.log("Chunk Archive max size reached, closing archive".to_string());
|
//worker.log("Chunk Archive max size reached, closing archive".to_string());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -500,7 +656,7 @@ fn update_media_set_label(
|
|||||||
if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint {
|
if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint {
|
||||||
bail!("detected changed encryption fingerprint - internal error");
|
bail!("detected changed encryption fingerprint - internal error");
|
||||||
}
|
}
|
||||||
media_catalog = MediaCatalog::open(status_path, &media_id.label.uuid, true, false)?;
|
media_catalog = MediaCatalog::open(status_path, &media_id, true, false)?;
|
||||||
} else {
|
} else {
|
||||||
worker.log(
|
worker.log(
|
||||||
format!("wrinting new media set label (overwrite '{}/{}')",
|
format!("wrinting new media set label (overwrite '{}/{}')",
|
||||||
@ -514,7 +670,6 @@ fn update_media_set_label(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// todo: verify last content/media_catalog somehow?
|
// todo: verify last content/media_catalog somehow?
|
||||||
drive.move_to_eom()?; // just to be sure
|
|
||||||
|
|
||||||
Ok(media_catalog)
|
Ok(media_catalog)
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,7 @@ pub trait TapeWrite {
|
|||||||
///
|
///
|
||||||
/// See: https://github.com/torvalds/linux/blob/master/Documentation/scsi/st.rst
|
/// See: https://github.com/torvalds/linux/blob/master/Documentation/scsi/st.rst
|
||||||
///
|
///
|
||||||
/// On sucess, this returns if we en countered a EOM condition.
|
/// On success, this returns if we en countered a EOM condition.
|
||||||
pub fn tape_device_write_block<W: Write>(
|
pub fn tape_device_write_block<W: Write>(
|
||||||
writer: &mut W,
|
writer: &mut W,
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
|
@ -173,7 +173,7 @@ fn test_alloc_writable_media_4() -> Result<(), Error> {
|
|||||||
// next call fail because there is no free media
|
// next call fail because there is no free media
|
||||||
assert!(pool.alloc_writable_media(start_time + 5).is_err());
|
assert!(pool.alloc_writable_media(start_time + 5).is_err());
|
||||||
|
|
||||||
// Create new nedia set, so that previous set can expire
|
// Create new media set, so that previous set can expire
|
||||||
pool.start_write_session(start_time + 10)?;
|
pool.start_write_session(start_time + 10)?;
|
||||||
|
|
||||||
assert!(pool.alloc_writable_media(start_time + 10).is_err());
|
assert!(pool.alloc_writable_media(start_time + 10).is_err());
|
||||||
|
@ -302,7 +302,7 @@ impl<K, V> LinkedList<K, V> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove the node referenced by `node_ptr` from the linke list and return it.
|
/// Remove the node referenced by `node_ptr` from the linked list and return it.
|
||||||
fn remove(&mut self, node_ptr: *mut CacheNode<K, V>) -> Box<CacheNode<K, V>> {
|
fn remove(&mut self, node_ptr: *mut CacheNode<K, V>) -> Box<CacheNode<K, V>> {
|
||||||
let node = unsafe { Box::from_raw(node_ptr) };
|
let node = unsafe { Box::from_raw(node_ptr) };
|
||||||
|
|
||||||
|
@ -138,10 +138,10 @@ impl<I: Send + 'static> ParallelHandler<I> {
|
|||||||
if let Err(panic) = handle.join() {
|
if let Err(panic) = handle.join() {
|
||||||
match panic.downcast::<&str>() {
|
match panic.downcast::<&str>() {
|
||||||
Ok(panic_msg) => msg_list.push(
|
Ok(panic_msg) => msg_list.push(
|
||||||
format!("thread {} ({}) paniced: {}", self.name, i, panic_msg)
|
format!("thread {} ({}) panicked: {}", self.name, i, panic_msg)
|
||||||
),
|
),
|
||||||
Err(_) => msg_list.push(
|
Err(_) => msg_list.push(
|
||||||
format!("thread {} ({}) paniced", self.name, i)
|
format!("thread {} ({}) panicked", self.name, i)
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
//!
|
//!
|
||||||
//! See: `/usr/include/scsi/sg_pt.h`
|
//! See: `/usr/include/scsi/sg_pt.h`
|
||||||
//!
|
//!
|
||||||
//! The SCSI Commands Reference Manual also contains some usefull information.
|
//! The SCSI Commands Reference Manual also contains some useful information.
|
||||||
|
|
||||||
use std::os::unix::io::AsRawFd;
|
use std::os::unix::io::AsRawFd;
|
||||||
use std::ptr::NonNull;
|
use std::ptr::NonNull;
|
||||||
|
@ -210,7 +210,7 @@ fn test_parse_register_response() -> Result<(), Error> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// querys the up to date subscription status and parses the response
|
/// queries the up to date subscription status and parses the response
|
||||||
pub fn check_subscription(key: String, server_id: String) -> Result<SubscriptionInfo, Error> {
|
pub fn check_subscription(key: String, server_id: String) -> Result<SubscriptionInfo, Error> {
|
||||||
|
|
||||||
let now = proxmox::tools::time::epoch_i64();
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
@ -299,7 +299,7 @@ pub fn delete_subscription() -> Result<(), Error> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// updates apt authenification for repo access
|
/// updates apt authentication for repo access
|
||||||
pub fn update_apt_auth(key: Option<String>, password: Option<String>) -> Result<(), Error> {
|
pub fn update_apt_auth(key: Option<String>, password: Option<String>) -> Result<(), Error> {
|
||||||
let auth_conf = std::path::Path::new(APT_AUTH_FN);
|
let auth_conf = std::path::Path::new(APT_AUTH_FN);
|
||||||
match (key, password) {
|
match (key, password) {
|
||||||
@ -318,8 +318,11 @@ pub fn update_apt_auth(key: Option<String>, password: Option<String>) -> Result<
|
|||||||
replace_file(auth_conf, conf.as_bytes(), file_opts)
|
replace_file(auth_conf, conf.as_bytes(), file_opts)
|
||||||
.map_err(|e| format_err!("Error saving apt auth config - {}", e))?;
|
.map_err(|e| format_err!("Error saving apt auth config - {}", e))?;
|
||||||
}
|
}
|
||||||
_ => nix::unistd::unlink(auth_conf)
|
_ => match nix::unistd::unlink(auth_conf) {
|
||||||
.map_err(|e| format_err!("Error clearing apt auth config - {}", e))?,
|
Ok(()) => Ok(()),
|
||||||
|
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => Ok(()), // ignore not existing
|
||||||
|
Err(err) => Err(err),
|
||||||
|
}.map_err(|e| format_err!("Error clearing apt auth config - {}", e))?,
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -80,6 +80,7 @@ struct Zip64FieldWithOffset {
|
|||||||
uncompressed_size: u64,
|
uncompressed_size: u64,
|
||||||
compressed_size: u64,
|
compressed_size: u64,
|
||||||
offset: u64,
|
offset: u64,
|
||||||
|
start_disk: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Endian)]
|
#[derive(Endian)]
|
||||||
@ -300,10 +301,26 @@ impl ZipEntry {
|
|||||||
let filename_len = filename.len();
|
let filename_len = filename.len();
|
||||||
let header_size = size_of::<CentralDirectoryFileHeader>();
|
let header_size = size_of::<CentralDirectoryFileHeader>();
|
||||||
let zip_field_size = size_of::<Zip64FieldWithOffset>();
|
let zip_field_size = size_of::<Zip64FieldWithOffset>();
|
||||||
let size: usize = header_size + filename_len + zip_field_size;
|
let mut size: usize = header_size + filename_len;
|
||||||
|
|
||||||
let (date, time) = epoch_to_dos(self.mtime);
|
let (date, time) = epoch_to_dos(self.mtime);
|
||||||
|
|
||||||
|
let (compressed_size, uncompressed_size, offset, need_zip64) = if self.compressed_size
|
||||||
|
>= (u32::MAX as u64)
|
||||||
|
|| self.uncompressed_size >= (u32::MAX as u64)
|
||||||
|
|| self.offset >= (u32::MAX as u64)
|
||||||
|
{
|
||||||
|
size += zip_field_size;
|
||||||
|
(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, true)
|
||||||
|
} else {
|
||||||
|
(
|
||||||
|
self.compressed_size as u32,
|
||||||
|
self.uncompressed_size as u32,
|
||||||
|
self.offset as u32,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
write_struct(
|
write_struct(
|
||||||
&mut buf,
|
&mut buf,
|
||||||
CentralDirectoryFileHeader {
|
CentralDirectoryFileHeader {
|
||||||
@ -315,32 +332,35 @@ impl ZipEntry {
|
|||||||
time,
|
time,
|
||||||
date,
|
date,
|
||||||
crc32: self.crc32,
|
crc32: self.crc32,
|
||||||
compressed_size: 0xFFFFFFFF,
|
compressed_size,
|
||||||
uncompressed_size: 0xFFFFFFFF,
|
uncompressed_size,
|
||||||
filename_len: filename_len as u16,
|
filename_len: filename_len as u16,
|
||||||
extra_field_len: zip_field_size as u16,
|
extra_field_len: if need_zip64 { zip_field_size as u16 } else { 0 },
|
||||||
comment_len: 0,
|
comment_len: 0,
|
||||||
start_disk: 0,
|
start_disk: 0,
|
||||||
internal_flags: 0,
|
internal_flags: 0,
|
||||||
external_flags: (self.mode as u32) << 16 | (!self.is_file as u32) << 4,
|
external_flags: (self.mode as u32) << 16 | (!self.is_file as u32) << 4,
|
||||||
offset: 0xFFFFFFFF,
|
offset,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
buf.write_all(filename).await?;
|
buf.write_all(filename).await?;
|
||||||
|
|
||||||
|
if need_zip64 {
|
||||||
write_struct(
|
write_struct(
|
||||||
&mut buf,
|
&mut buf,
|
||||||
Zip64FieldWithOffset {
|
Zip64FieldWithOffset {
|
||||||
field_type: 1,
|
field_type: 1,
|
||||||
field_size: 3 * 8,
|
field_size: 3 * 8 + 4,
|
||||||
uncompressed_size: self.uncompressed_size,
|
uncompressed_size: self.uncompressed_size,
|
||||||
compressed_size: self.compressed_size,
|
compressed_size: self.compressed_size,
|
||||||
offset: self.offset,
|
offset: self.offset,
|
||||||
|
start_disk: 0,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(size)
|
Ok(size)
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ Ext.define('PBS.view.main.NavigationTree', {
|
|||||||
if (view.tapestore === undefined) {
|
if (view.tapestore === undefined) {
|
||||||
view.tapestore = Ext.create('Proxmox.data.UpdateStore', {
|
view.tapestore = Ext.create('Proxmox.data.UpdateStore', {
|
||||||
autoStart: true,
|
autoStart: true,
|
||||||
interval: 2 * 1000,
|
interval: 60 * 1000,
|
||||||
storeid: 'pbs-tape-drive-list',
|
storeid: 'pbs-tape-drive-list',
|
||||||
model: 'pbs-tape-drive-list',
|
model: 'pbs-tape-drive-list',
|
||||||
});
|
});
|
||||||
@ -188,11 +188,13 @@ Ext.define('PBS.view.main.NavigationTree', {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let toremove = [];
|
||||||
list.eachChild((child) => {
|
list.eachChild((child) => {
|
||||||
if (!newSet[child.data.path]) {
|
if (!newSet[child.data.path]) {
|
||||||
list.removeChild(child, true);
|
toremove.push(child);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
toremove.forEach((child) => list.removeChild(child, true));
|
||||||
|
|
||||||
if (view.pathToSelect !== undefined) {
|
if (view.pathToSelect !== undefined) {
|
||||||
let path = view.pathToSelect;
|
let path = view.pathToSelect;
|
||||||
@ -267,6 +269,15 @@ Ext.define('PBS.view.main.NavigationTree', {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
reloadTapeStore: function() {
|
||||||
|
let me = this;
|
||||||
|
if (!PBS.enableTapeUI) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
me.tapestore.load();
|
||||||
|
},
|
||||||
|
|
||||||
select: function(path, silent) {
|
select: function(path, silent) {
|
||||||
var me = this;
|
var me = this;
|
||||||
if (me.rstore.isLoaded() && (!PBS.enableTapeUI || me.tapestore.isLoaded())) {
|
if (me.rstore.isLoaded() && (!PBS.enableTapeUI || me.tapestore.isLoaded())) {
|
||||||
|
@ -47,10 +47,18 @@ const proxmoxOnlineHelpInfo = {
|
|||||||
"link": "/docs/package-repositories.html#sysadmin-package-repositories",
|
"link": "/docs/package-repositories.html#sysadmin-package-repositories",
|
||||||
"title": "Debian Package Repositories"
|
"title": "Debian Package Repositories"
|
||||||
},
|
},
|
||||||
|
"sysadmin-package-repos-enterprise": {
|
||||||
|
"link": "/docs/package-repositories.html#sysadmin-package-repos-enterprise",
|
||||||
|
"title": "`Proxmox Backup`_ Enterprise Repository"
|
||||||
|
},
|
||||||
"get-help": {
|
"get-help": {
|
||||||
"link": "/docs/introduction.html#get-help",
|
"link": "/docs/introduction.html#get-help",
|
||||||
"title": "Getting Help"
|
"title": "Getting Help"
|
||||||
},
|
},
|
||||||
|
"get-help-enterprise-support": {
|
||||||
|
"link": "/docs/introduction.html#get-help-enterprise-support",
|
||||||
|
"title": "Enterprise Support"
|
||||||
|
},
|
||||||
"chapter-zfs": {
|
"chapter-zfs": {
|
||||||
"link": "/docs/sysadmin.html#chapter-zfs",
|
"link": "/docs/sysadmin.html#chapter-zfs",
|
||||||
"title": "ZFS on Linux"
|
"title": "ZFS on Linux"
|
||||||
|
@ -273,3 +273,7 @@ span.snapshot-comment-column {
|
|||||||
height: 20px;
|
height: 20px;
|
||||||
background-image:url(../images/icon-tape-drive.svg);
|
background-image:url(../images/icon-tape-drive.svg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.info-pointer div.right-aligned {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
@ -42,7 +42,7 @@ Ext.define('PBS.Datastore.Options', {
|
|||||||
rows: {
|
rows: {
|
||||||
"notify": {
|
"notify": {
|
||||||
required: true,
|
required: true,
|
||||||
header: gettext('Notfiy'),
|
header: gettext('Notify'),
|
||||||
renderer: (value) => {
|
renderer: (value) => {
|
||||||
let notify = PBS.Utils.parsePropertyString(value);
|
let notify = PBS.Utils.parsePropertyString(value);
|
||||||
let res = [];
|
let res = [];
|
||||||
@ -59,7 +59,7 @@ Ext.define('PBS.Datastore.Options', {
|
|||||||
"notify-user": {
|
"notify-user": {
|
||||||
required: true,
|
required: true,
|
||||||
defaultValue: 'root@pam',
|
defaultValue: 'root@pam',
|
||||||
header: gettext('Notfiy User'),
|
header: gettext('Notify User'),
|
||||||
editor: {
|
editor: {
|
||||||
xtype: 'pbsNotifyOptionEdit',
|
xtype: 'pbsNotifyOptionEdit',
|
||||||
},
|
},
|
||||||
|
@ -33,7 +33,7 @@ Ext.define('PBS.form.CalendarEvent', {
|
|||||||
config: {
|
config: {
|
||||||
deleteEmpty: true,
|
deleteEmpty: true,
|
||||||
},
|
},
|
||||||
// overide framework function to implement deleteEmpty behaviour
|
// override framework function to implement deleteEmpty behaviour
|
||||||
getSubmitData: function() {
|
getSubmitData: function() {
|
||||||
let me = this, data = null;
|
let me = this, data = null;
|
||||||
if (!me.disabled && me.submitValue) {
|
if (!me.disabled && me.submitValue) {
|
||||||
|
@ -127,9 +127,16 @@ Ext.define('PBS.TapeManagement.BackupOverview', {
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
list.result.data.sort((a, b) => a.snapshot.localeCompare(b.snapshot));
|
list.result.data.sort(function(a, b) {
|
||||||
|
let storeRes = a.store.localeCompare(b.store);
|
||||||
|
if (storeRes === 0) {
|
||||||
|
return a.snapshot.localeCompare(b.snapshot);
|
||||||
|
} else {
|
||||||
|
return storeRes;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
let tapes = {};
|
let stores = {};
|
||||||
|
|
||||||
for (let entry of list.result.data) {
|
for (let entry of list.result.data) {
|
||||||
entry.text = entry.snapshot;
|
entry.text = entry.snapshot;
|
||||||
@ -140,9 +147,19 @@ Ext.define('PBS.TapeManagement.BackupOverview', {
|
|||||||
entry.iconCls = `fa ${iconCls}`;
|
entry.iconCls = `fa ${iconCls}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let store = entry.store;
|
||||||
let tape = entry['label-text'];
|
let tape = entry['label-text'];
|
||||||
if (tapes[tape] === undefined) {
|
if (stores[store] === undefined) {
|
||||||
tapes[tape] = {
|
stores[store] = {
|
||||||
|
text: store,
|
||||||
|
'media-set-uuid': entry['media-set-uuid'],
|
||||||
|
iconCls: 'fa fa-database',
|
||||||
|
tapes: {},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stores[store].tapes[tape] === undefined) {
|
||||||
|
stores[store].tapes[tape] = {
|
||||||
text: tape,
|
text: tape,
|
||||||
'media-set-uuid': entry['media-set-uuid'],
|
'media-set-uuid': entry['media-set-uuid'],
|
||||||
'seq-nr': entry['seq-nr'],
|
'seq-nr': entry['seq-nr'],
|
||||||
@ -153,7 +170,7 @@ Ext.define('PBS.TapeManagement.BackupOverview', {
|
|||||||
}
|
}
|
||||||
let [type, group, _id] = PBS.Utils.parse_snapshot_id(entry.snapshot);
|
let [type, group, _id] = PBS.Utils.parse_snapshot_id(entry.snapshot);
|
||||||
|
|
||||||
let children = tapes[tape].children;
|
let children = stores[store].tapes[tape].children;
|
||||||
let text = `${type}/${group}`;
|
let text = `${type}/${group}`;
|
||||||
if (children.length < 1 || children[children.length - 1].text !== text) {
|
if (children.length < 1 || children[children.length - 1].text !== text) {
|
||||||
children.push({
|
children.push({
|
||||||
@ -167,8 +184,13 @@ Ext.define('PBS.TapeManagement.BackupOverview', {
|
|||||||
children[children.length - 1].children.push(entry);
|
children[children.length - 1].children.push(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const tape of Object.values(tapes)) {
|
let storeList = Object.values(stores);
|
||||||
node.appendChild(tape);
|
let expand = storeList.length === 1;
|
||||||
|
for (const store of storeList) {
|
||||||
|
store.children = Object.values(store.tapes);
|
||||||
|
store.expanded = expand;
|
||||||
|
delete store.tapes;
|
||||||
|
node.appendChild(store);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (list.result.data.length === 0) {
|
if (list.result.data.length === 0) {
|
||||||
|
@ -11,6 +11,11 @@ Ext.define('PBS.TapeManagement.ChangerPanel', {
|
|||||||
controller: {
|
controller: {
|
||||||
xclass: 'Ext.app.ViewController',
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
reloadTapeStore: function() {
|
||||||
|
let navtree = Ext.ComponentQuery.query('navigationtree')[0];
|
||||||
|
navtree.reloadTapeStore();
|
||||||
|
},
|
||||||
|
|
||||||
onAdd: function() {
|
onAdd: function() {
|
||||||
let me = this;
|
let me = this;
|
||||||
Ext.create('PBS.TapeManagement.ChangerEditWindow', {
|
Ext.create('PBS.TapeManagement.ChangerEditWindow', {
|
||||||
@ -40,6 +45,7 @@ Ext.define('PBS.TapeManagement.ChangerPanel', {
|
|||||||
|
|
||||||
reload: function() {
|
reload: function() {
|
||||||
this.getView().getStore().rstore.load();
|
this.getView().getStore().rstore.load();
|
||||||
|
this.reloadTapeStore();
|
||||||
},
|
},
|
||||||
|
|
||||||
stopStore: function() {
|
stopStore: function() {
|
||||||
|
@ -11,6 +11,29 @@ Ext.define('pbs-slot-model', {
|
|||||||
idProperty: 'entry-id',
|
idProperty: 'entry-id',
|
||||||
});
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.TapeManagement.FreeSlotSelector', {
|
||||||
|
extend: 'Proxmox.form.ComboGrid',
|
||||||
|
alias: 'widget.pbsFreeSlotSelector',
|
||||||
|
|
||||||
|
valueField: 'id',
|
||||||
|
displayField: 'id',
|
||||||
|
|
||||||
|
listConfig: {
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
dataIndex: 'id',
|
||||||
|
text: gettext('ID'),
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
dataIndex: 'type',
|
||||||
|
text: gettext('Type'),
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
Ext.define('PBS.TapeManagement.ChangerStatus', {
|
Ext.define('PBS.TapeManagement.ChangerStatus', {
|
||||||
extend: 'Ext.panel.Panel',
|
extend: 'Ext.panel.Panel',
|
||||||
alias: 'widget.pbsChangerStatus',
|
alias: 'widget.pbsChangerStatus',
|
||||||
@ -40,9 +63,12 @@ Ext.define('PBS.TapeManagement.ChangerStatus', {
|
|||||||
fieldLabel: gettext('From Slot'),
|
fieldLabel: gettext('From Slot'),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxintegerfield',
|
xtype: 'pbsFreeSlotSelector',
|
||||||
name: 'to',
|
name: 'to',
|
||||||
fieldLabel: gettext('To Slot'),
|
fieldLabel: gettext('To Slot'),
|
||||||
|
store: {
|
||||||
|
data: me.free_slots,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
listeners: {
|
listeners: {
|
||||||
@ -73,9 +99,12 @@ Ext.define('PBS.TapeManagement.ChangerStatus', {
|
|||||||
fieldLabel: gettext('From Slot'),
|
fieldLabel: gettext('From Slot'),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'proxmoxintegerfield',
|
xtype: 'pbsFreeSlotSelector',
|
||||||
name: 'to',
|
name: 'to',
|
||||||
fieldLabel: gettext('To Slot'),
|
fieldLabel: gettext('To Slot'),
|
||||||
|
store: {
|
||||||
|
data: me.free_slots.concat(me.free_ie_slots),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
listeners: {
|
listeners: {
|
||||||
@ -340,6 +369,14 @@ Ext.define('PBS.TapeManagement.ChangerStatus', {
|
|||||||
me.reload_full(false);
|
me.reload_full(false);
|
||||||
},
|
},
|
||||||
|
|
||||||
|
free_slots: [],
|
||||||
|
|
||||||
|
updateFreeSlots: function(free_slots, free_ie_slots) {
|
||||||
|
let me = this;
|
||||||
|
me.free_slots = free_slots;
|
||||||
|
me.free_ie_slots = free_ie_slots;
|
||||||
|
},
|
||||||
|
|
||||||
reload_full: async function(use_cache) {
|
reload_full: async function(use_cache) {
|
||||||
let me = this;
|
let me = this;
|
||||||
let view = me.getView();
|
let view = me.getView();
|
||||||
@ -399,6 +436,9 @@ Ext.define('PBS.TapeManagement.ChangerStatus', {
|
|||||||
drive_entries[entry['changer-drivenum'] || 0] = entry;
|
drive_entries[entry['changer-drivenum'] || 0] = entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let free_slots = [];
|
||||||
|
let free_ie_slots = [];
|
||||||
|
|
||||||
for (let entry of status.result.data) {
|
for (let entry of status.result.data) {
|
||||||
let type = entry['entry-kind'];
|
let type = entry['entry-kind'];
|
||||||
|
|
||||||
@ -414,6 +454,19 @@ Ext.define('PBS.TapeManagement.ChangerStatus', {
|
|||||||
entry['is-labeled'] = false;
|
entry['is-labeled'] = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!entry['label-text'] && type !== 'drive') {
|
||||||
|
if (type === 'slot') {
|
||||||
|
free_slots.push({
|
||||||
|
id: entry['entry-id'],
|
||||||
|
type,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
free_ie_slots.push({
|
||||||
|
id: entry['entry-id'],
|
||||||
|
type,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
data[type].push(entry);
|
data[type].push(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -433,6 +486,8 @@ Ext.define('PBS.TapeManagement.ChangerStatus', {
|
|||||||
// manually fire selectionchange to update button status
|
// manually fire selectionchange to update button status
|
||||||
me.lookup('drives').getSelectionModel().fireEvent('selectionchange', me);
|
me.lookup('drives').getSelectionModel().fireEvent('selectionchange', me);
|
||||||
|
|
||||||
|
me.updateFreeSlots(free_slots, free_ie_slots);
|
||||||
|
|
||||||
if (!use_cache) {
|
if (!use_cache) {
|
||||||
Proxmox.Utils.setErrorMask(view);
|
Proxmox.Utils.setErrorMask(view);
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,11 @@ Ext.define('PBS.TapeManagement.DrivePanel', {
|
|||||||
controller: {
|
controller: {
|
||||||
xclass: 'Ext.app.ViewController',
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
reloadTapeStore: function() {
|
||||||
|
let navtree = Ext.ComponentQuery.query('navigationtree')[0];
|
||||||
|
navtree.reloadTapeStore();
|
||||||
|
},
|
||||||
|
|
||||||
onAdd: function() {
|
onAdd: function() {
|
||||||
let me = this;
|
let me = this;
|
||||||
Ext.create('PBS.TapeManagement.DriveEditWindow', {
|
Ext.create('PBS.TapeManagement.DriveEditWindow', {
|
||||||
@ -57,6 +62,7 @@ Ext.define('PBS.TapeManagement.DrivePanel', {
|
|||||||
|
|
||||||
reload: function() {
|
reload: function() {
|
||||||
this.getView().getStore().rstore.load();
|
this.getView().getStore().rstore.load();
|
||||||
|
this.reloadTapeStore();
|
||||||
},
|
},
|
||||||
|
|
||||||
stopStore: function() {
|
stopStore: function() {
|
||||||
|
@ -84,6 +84,24 @@ Ext.define('PBS.TapeManagement.DriveStatus', {
|
|||||||
}).show();
|
}).show();
|
||||||
},
|
},
|
||||||
|
|
||||||
|
erase: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let driveid = view.drive;
|
||||||
|
PBS.Utils.driveCommand(driveid, 'erase-media', {
|
||||||
|
waitMsgTarget: view,
|
||||||
|
method: 'POST',
|
||||||
|
success: function(response) {
|
||||||
|
Ext.create('Proxmox.window.TaskProgress', {
|
||||||
|
upid: response.result.data,
|
||||||
|
taskDone: function() {
|
||||||
|
me.reload();
|
||||||
|
},
|
||||||
|
}).show();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
ejectMedia: function() {
|
ejectMedia: function() {
|
||||||
let me = this;
|
let me = this;
|
||||||
let view = me.getView();
|
let view = me.getView();
|
||||||
@ -193,6 +211,18 @@ Ext.define('PBS.TapeManagement.DriveStatus', {
|
|||||||
disabled: '{!online}',
|
disabled: '{!online}',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
text: gettext('Erase'),
|
||||||
|
xtype: 'proxmoxButton',
|
||||||
|
handler: 'erase',
|
||||||
|
iconCls: 'fa fa-trash-o',
|
||||||
|
dangerous: true,
|
||||||
|
confirmMsg: gettext('Are you sure you want to erase the inserted tape?'),
|
||||||
|
disabled: true,
|
||||||
|
bind: {
|
||||||
|
disabled: '{!online}',
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
text: gettext('Catalog'),
|
text: gettext('Catalog'),
|
||||||
xtype: 'proxmoxButton',
|
xtype: 'proxmoxButton',
|
||||||
@ -400,6 +430,7 @@ Ext.define('PBS.TapeManagement.DriveInfoPanel', {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
xtype: 'pmxInfoWidget',
|
xtype: 'pmxInfoWidget',
|
||||||
|
reference: 'statewidget',
|
||||||
title: gettext('State'),
|
title: gettext('State'),
|
||||||
bind: {
|
bind: {
|
||||||
data: {
|
data: {
|
||||||
@ -409,6 +440,23 @@ Ext.define('PBS.TapeManagement.DriveInfoPanel', {
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
|
||||||
|
clickState: function(e, t, eOpts) {
|
||||||
|
let me = this;
|
||||||
|
let vm = me.getViewModel();
|
||||||
|
let drive = vm.get('drive');
|
||||||
|
if (t.classList.contains('right-aligned')) {
|
||||||
|
let upid = drive.state;
|
||||||
|
if (!upid || !upid.startsWith("UPID")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ext.create('Proxmox.window.TaskViewer', {
|
||||||
|
autoShow: true,
|
||||||
|
upid,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
updateData: function(store) {
|
updateData: function(store) {
|
||||||
let me = this;
|
let me = this;
|
||||||
if (!store) {
|
if (!store) {
|
||||||
@ -422,6 +470,37 @@ Ext.define('PBS.TapeManagement.DriveInfoPanel', {
|
|||||||
let vm = me.getViewModel();
|
let vm = me.getViewModel();
|
||||||
vm.set('drive', record.data);
|
vm.set('drive', record.data);
|
||||||
vm.notify();
|
vm.notify();
|
||||||
|
me.updatePointer();
|
||||||
|
},
|
||||||
|
|
||||||
|
updatePointer: function() {
|
||||||
|
let me = this;
|
||||||
|
let stateWidget = me.down('pmxInfoWidget[reference=statewidget]');
|
||||||
|
let stateEl = stateWidget.getEl();
|
||||||
|
if (!stateEl) {
|
||||||
|
setTimeout(function() {
|
||||||
|
me.updatePointer();
|
||||||
|
}, 100);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let vm = me.getViewModel();
|
||||||
|
let drive = vm.get('drive');
|
||||||
|
|
||||||
|
if (drive.state) {
|
||||||
|
stateEl.addCls('info-pointer');
|
||||||
|
} else {
|
||||||
|
stateEl.removeCls('info-pointer');
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
afterrender: function() {
|
||||||
|
let me = this;
|
||||||
|
let stateWidget = me.down('pmxInfoWidget[reference=statewidget]');
|
||||||
|
let stateEl = stateWidget.getEl();
|
||||||
|
stateEl.on('click', me.clickState, me);
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
initComponent: function() {
|
initComponent: function() {
|
||||||
@ -430,12 +509,12 @@ Ext.define('PBS.TapeManagement.DriveInfoPanel', {
|
|||||||
throw "no drive given";
|
throw "no drive given";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
me.callParent();
|
||||||
|
|
||||||
let tapeStore = Ext.ComponentQuery.query('navigationtree')[0].tapestore;
|
let tapeStore = Ext.ComponentQuery.query('navigationtree')[0].tapestore;
|
||||||
me.mon(tapeStore, 'load', me.updateData, me);
|
me.mon(tapeStore, 'load', me.updateData, me);
|
||||||
if (tapeStore.isLoaded()) {
|
if (tapeStore.isLoaded()) {
|
||||||
me.updateData(tapeStore);
|
me.updateData(tapeStore);
|
||||||
}
|
}
|
||||||
|
|
||||||
me.callParent();
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
@ -51,5 +51,15 @@ Ext.define('PBS.TapeManagement.TapeRestoreWindow', {
|
|||||||
skipEmptyText: true,
|
skipEmptyText: true,
|
||||||
renderer: Ext.String.htmlEncode,
|
renderer: Ext.String.htmlEncode,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
xtype: 'pbsUserSelector',
|
||||||
|
name: 'owner',
|
||||||
|
fieldLabel: gettext('Owner'),
|
||||||
|
emptyText: gettext('Current User'),
|
||||||
|
value: null,
|
||||||
|
allowBlank: true,
|
||||||
|
skipEmptyText: true,
|
||||||
|
renderer: Ext.String.htmlEncode,
|
||||||
|
},
|
||||||
],
|
],
|
||||||
});
|
});
|
||||||
|
Reference in New Issue
Block a user