Compare commits

...

15 Commits

Author SHA1 Message Date
8aa4842fa8 bump version to 1.0.8-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-04 12:39:52 +01:00
efc09f63cc docs: tech overview: avoid 'we' and other small style fixes/additions
"we" should be avoided, it's never quite clear who is "we" in the
context here and it leads to some technical wrong meanings, e.g., we
(here assumed to be "we developers") do not read any backup data, the
Proxmox Backup client does.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-04 12:27:14 +01:00
3253d8a2e4 docs: tech overfiew: fix line length
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-04 12:05:27 +01:00
1531185dd0 docs: explain some technical details about datastores/chunks
adds explanations for:
* what datastores are
* their relation with snapshots/chunks
* basic information about chunk directory structures
* fixed-/dynamically-sized chunks
* special handling of encrypted chunks
* hash collision probability
* limitation of file-based backups

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2021-02-04 11:30:42 +01:00
baf9c3704e ui: task summary: add verification jobs to count
fixes a bug in which verification jobs were being excluded from the
verify task summary.

Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
2021-02-04 11:07:22 +01:00
cdf39e62b3 tape: MediaPool - replace use_offline_media with changer_name
This way, we can improve location_is_available, because we only
consider media from that changer as available.
2021-02-04 10:15:18 +01:00
b81e37f6ab tape: improve code reuse 2021-02-04 09:39:16 +01:00
ddebbb52fd tape: fix tests for BlockedReader 2021-02-04 08:54:54 +01:00
983e929e25 tape: add multi volume reader/writer implementations
We currently do not use it. Added anaways, to show the possibility.
2021-02-04 08:36:35 +01:00
f47e035721 tape: cleanup - move tape file readers/writers into src/tape/file_formats folder 2021-02-04 07:59:37 +01:00
a80d72f999 tape: allow to abort restore tasks 2021-02-04 07:05:43 +01:00
8de9a9917f cleanup: use task_log macro 2021-02-04 06:55:18 +01:00
fa016c1697 HttpsConnector: use hostname instead of URL again
fixes connecting to hosts with valid certificates without a
pinned fingerprint
this was accidentally changed in the tokio-1.0 updates
apparently

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
Fixes: 0f860f712f ("tokio 1.0: update to new tokio-openssl interface")
2021-02-03 15:18:18 +01:00
7d2c156eb1 tape: BlockedReader - always consume EOF 2021-02-03 13:25:59 +01:00
04cec92e8d update copyright years
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-02-03 12:04:27 +01:00
28 changed files with 563 additions and 99 deletions

View File

@ -1,6 +1,6 @@
[package] [package]
name = "proxmox-backup" name = "proxmox-backup"
version = "1.0.7" version = "1.0.8"
authors = [ authors = [
"Dietmar Maurer <dietmar@proxmox.com>", "Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>", "Dominik Csapak <d.csapak@proxmox.com>",

11
debian/changelog vendored
View File

@ -1,3 +1,14 @@
rust-proxmox-backup (1.0.8-1) unstable; urgency=medium
* Https Connector: use hostname instead of URL again to avoid certificate
verification issues.
* ui: task summary: add verification jobs to count
* docs: explain some technical details about datastores/chunks
-- Proxmox Support Team <support@proxmox.com> Thu, 04 Feb 2021 12:39:49 +0100
rust-proxmox-backup (1.0.7-1) unstable; urgency=medium rust-proxmox-backup (1.0.7-1) unstable; urgency=medium
* fix #3197: skip fingerprint check when restoring key * fix #3197: skip fingerprint check when restoring key

2
debian/copyright vendored
View File

@ -1,4 +1,4 @@
Copyright (C) 2019 Proxmox Server Solutions GmbH Copyright (C) 2019 - 2021 Proxmox Server Solutions GmbH
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com> This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>

View File

@ -74,7 +74,7 @@ rst_epilog = epilog_file.read()
# General information about the project. # General information about the project.
project = 'Proxmox Backup' project = 'Proxmox Backup'
copyright = '2019-2020, Proxmox Server Solutions GmbH' copyright = '2019-2021, Proxmox Server Solutions GmbH'
author = 'Proxmox Support Team' author = 'Proxmox Support Team'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for

View File

@ -33,6 +33,7 @@ in the section entitled "GNU Free Documentation License".
pve-integration.rst pve-integration.rst
pxar-tool.rst pxar-tool.rst
sysadmin.rst sysadmin.rst
technical-overview.rst
faq.rst faq.rst
.. raw:: latex .. raw:: latex

View File

@ -161,7 +161,7 @@ of the issue and will send a notification once it has been solved.
License License
------- -------
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH Copyright (C) 2019-2021 Proxmox Server Solutions GmbH
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com> This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>

166
docs/technical-overview.rst Normal file
View File

@ -0,0 +1,166 @@
Technical Overview
==================
.. _technical_overview:
Datastores
----------
A Datastore is the logical place where :ref:`Backup Snapshots
<backup_snapshot>` and their chunks are stored. Snapshots consist of a
manifest, blobs, dynamic- and fixed-indexes (see :ref:`terminology`), and are
stored in the following directory structure:
<datastore-root>/<type>/<id>/<time>/
The deduplication of datastores is based on reusing chunks, which are
referenced by the indexes in a backup snapshot. This means that multiple
indexes can reference the same chunks, reducing the amount of space needed to
contain the data (even across backup snapshots).
Chunks
------
A chunk is some (possibly encrypted) data with a CRC-32 checksum at the end and
a type marker at the beginning. It is identified by the SHA-256 checksum of its
content.
To generate such chunks, backup data is split either into fixed-size or
dynamically sized chunks. The same content will be hashed to the same checksum.
The chunks of a datastore are found in
<datastore-root>/.chunks/
This chunk directory is further subdivided by the first four byte of the chunks
checksum, so the chunk with the checksum
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
lives in
<datastore-root>/.chunks/a342/
This is done to reduce the number of files per directory, as having many files
per directory can be bad for file system performance.
These chunk directories ('0000'-'ffff') will be preallocated when a datastore
is created.
Fixed-sized Chunks
^^^^^^^^^^^^^^^^^^
For block based backups (like VMs), fixed-sized chunks are used. The content
(disk image), is split into chunks of the same length (typically 4 MiB).
This works very well for VM images, since the file system on the guest most
often tries to allocate files in contiguous pieces, so new files get new
blocks, and changing existing files changes only their own blocks.
As an optimization, VMs in `Proxmox VE`_ can make use of 'dirty bitmaps', which
can track the changed blocks of an image. Since these bitmap are also a
representation of the image split into chunks, there is a direct relation
between dirty blocks of the image and chunks which need to get uploaded, so
only modified chunks of the disk have to be uploaded for a backup.
Since the image is always split into chunks of the same size, unchanged blocks
will result in identical checksums for those chunks, so such chunks do not need
to be backed up again. This way storage snapshots are not needed to find the
changed blocks.
For consistency, `Proxmox VE`_ uses a QEMU internal snapshot mechanism, that
does not rely on storage snapshots either.
Dynamically sized Chunks
^^^^^^^^^^^^^^^^^^^^^^^^
If one does not want to backup block-based systems but rather file-based
systems, using fixed-sized chunks is not a good idea, since every time a file
would change in size, the remaining data gets shifted around and this would
result in many chunks changing, reducing the amount of deduplication.
To improve this, `Proxmox Backup`_ Server uses dynamically sized chunks
instead. Instead of splitting an image into fixed sizes, it first generates a
consistent file archive (:ref:`pxar <pxar-format>`) and uses a rolling hash
over this on-the-fly generated archive to calculate chunk boundaries.
We use a variant of Buzhash which is a cyclic polynomial algorithm. It works
by continuously calculating a checksum while iterating over the data, and on
certain conditions it triggers a hash boundary.
Assuming that most files of the system that is to be backed up have not
changed, eventually the algorithm triggers the boundary on the same data as a
previous backup, resulting in chunks that can be reused.
Encrypted Chunks
^^^^^^^^^^^^^^^^
Encrypted chunks are a special case. Both fixed- and dynamically sized chunks
can be encrypted, and they are handled in a slightly different manner than
normal chunks.
The hashes of encrypted chunks are calculated not with the actual (encrypted)
chunk content, but with the plaintext content concatenated with the encryption
key. This way, two chunks of the same data encrypted with different keys
generate two different checksums and no collisions occur for multiple
encryption keys.
This is done to speed up the client part of the backup, since it only needs to
encrypt chunks that are actually getting uploaded. Chunks that exist already in
the previous backup, do not need to be encrypted and uploaded.
Caveats and Limitations
-----------------------
Notes on hash collisions
^^^^^^^^^^^^^^^^^^^^^^^^
Every hashing algorithm has a chance to produce collisions, meaning two (or
more) inputs generate the same checksum. For SHA-256, this chance is
negligible. To calculate such a collision, one can use the ideas of the
'birthday problem' from probability theory. For big numbers, this is actually
infeasible to calculate with regular computers, but there is a good
approximation:
.. math::
p(n, d) = 1 - e^{-n^2/(2d)}
Where `n` is the number of tries, and `d` is the number of possibilities.
For a concrete example lets assume a large datastore of 1 PiB, and an average
chunk size of 4 MiB. That means :math:`n = 268435456` tries, and :math:`d =
2^{256}` possibilities. Inserting those values in the formula from earlier you
will see that the probability of a collision in that scenario is:
.. math::
3.1115 * 10^{-61}
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
collission is about the same as winning 13 such lotto games *in a row*.
In conclusion, it is extremely unlikely that such a collision would occur by
accident in a normal datastore.
Additionally, SHA-256 is prone to length extension attacks, but since there is
an upper limit for how big the chunk are, this is not a problem, since a
potential attacker cannot arbitrarily add content to the data beyond that
limit.
File-based Backup
^^^^^^^^^^^^^^^^^
Since dynamically sized chunks (for file-based backups) are created on a custom
archive format (pxar) and not over the files directly, there is no relation
between files and the chunks. This means that the Proxmox Backup client has to
read all files again for every backup, otherwise it would not be possible to
generate a consistent independent pxar archive where the original chunks can be
reused. Note that there will be still only new or change chunks be uploaded.
Verification of encrypted chunks
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For encrypted chunks, only the checksum of the original (plaintext) data is
available, making it impossible for the server (without the encryption key), to
verify its content against it. Instead only the CRC-32 checksum gets checked.

View File

@ -1,3 +1,5 @@
.. _terminology:
Terminology Terminology
=========== ===========
@ -99,6 +101,7 @@ Backup Group
The tuple ``<type>/<ID>`` is called a backup group. Such a group The tuple ``<type>/<ID>`` is called a backup group. Such a group
may contain one or more backup snapshots. may contain one or more backup snapshots.
.. _backup_snapshot:
Backup Snapshot Backup Snapshot
--------------- ---------------

View File

@ -131,11 +131,9 @@ fn backup_worker(
let _lock = MediaPool::lock(status_path, &pool_config.name)?; let _lock = MediaPool::lock(status_path, &pool_config.name)?;
task_log!(worker, "update media online status"); task_log!(worker, "update media online status");
let has_changer = update_media_online_status(drive)?; let changer_name = update_media_online_status(drive)?;
let use_offline_media = !has_changer; let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
let pool = MediaPool::with_config(status_path, &pool_config, use_offline_media)?;
let mut pool_writer = PoolWriter::new(pool, drive)?; let mut pool_writer = PoolWriter::new(pool, drive)?;
@ -168,17 +166,13 @@ fn backup_worker(
} }
// Try to update the the media online status // Try to update the the media online status
fn update_media_online_status(drive: &str) -> Result<bool, Error> { fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
let (config, _digest) = config::drive::config()?; let (config, _digest) = config::drive::config()?;
let mut has_changer = false;
if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) { if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
has_changer = true; let label_text_list = changer.online_media_label_texts()?;
let label_text_list = changer.online_media_label_texts()?;
let status_path = Path::new(TAPE_STATUS_DIR); let status_path = Path::new(TAPE_STATUS_DIR);
let mut inventory = Inventory::load(status_path)?; let mut inventory = Inventory::load(status_path)?;
@ -189,9 +183,11 @@ fn update_media_online_status(drive: &str) -> Result<bool, Error> {
&changer_name, &changer_name,
&label_text_list, &label_text_list,
)?; )?;
}
Ok(has_changer) Ok(Some(changer_name))
} else {
Ok(None)
}
} }
pub fn backup_snapshot( pub fn backup_snapshot(

View File

@ -86,8 +86,8 @@ pub async fn list_media(pool: Option<String>) -> Result<Vec<MediaListEntry>, Err
let config: MediaPoolConfig = config.lookup("pool", pool_name)?; let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
let use_offline_media = true; // does not matter here let changer_name = None; // does not matter here
let pool = MediaPool::with_config(status_path, &config, use_offline_media)?; let pool = MediaPool::with_config(status_path, &config, changer_name)?;
let current_time = proxmox::tools::time::epoch_i64(); let current_time = proxmox::tools::time::epoch_i64();

View File

@ -24,6 +24,8 @@ use proxmox::{
}; };
use crate::{ use crate::{
task_log,
task::TaskState,
tools::compute_file_csum, tools::compute_file_csum,
api2::types::{ api2::types::{
DATASTORE_SCHEMA, DATASTORE_SCHEMA,
@ -55,7 +57,6 @@ use crate::{
TapeRead, TapeRead,
MediaId, MediaId,
MediaCatalog, MediaCatalog,
ChunkArchiveDecoder,
MediaPool, MediaPool,
Inventory, Inventory,
file_formats::{ file_formats::{
@ -65,6 +66,7 @@ use crate::{
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
MediaContentHeader, MediaContentHeader,
ChunkArchiveDecoder,
}, },
drive::{ drive::{
TapeDriver, TapeDriver,
@ -159,20 +161,21 @@ pub fn restore(
} }
} }
worker.log(format!("Restore mediaset '{}'", media_set)); task_log!(worker, "Restore mediaset '{}'", media_set);
if let Some(fingerprint) = encryption_key_fingerprint { if let Some(fingerprint) = encryption_key_fingerprint {
worker.log(format!("Encryption key fingerprint: {}", fingerprint)); task_log!(worker, "Encryption key fingerprint: {}", fingerprint);
} }
worker.log(format!("Pool: {}", pool)); task_log!(worker, "Pool: {}", pool);
worker.log(format!("Datastore: {}", store)); task_log!(worker, "Datastore: {}", store);
worker.log(format!("Drive: {}", drive)); task_log!(worker, "Drive: {}", drive);
worker.log(format!( task_log!(
worker,
"Required media list: {}", "Required media list: {}",
media_id_list.iter() media_id_list.iter()
.map(|media_id| media_id.label.label_text.as_str()) .map(|media_id| media_id.label.label_text.as_str())
.collect::<Vec<&str>>() .collect::<Vec<&str>>()
.join(";") .join(";")
)); );
for media_id in media_id_list.iter() { for media_id in media_id_list.iter() {
request_and_restore_media( request_and_restore_media(
@ -185,7 +188,7 @@ pub fn restore(
)?; )?;
} }
worker.log(format!("Restore mediaset '{}' done", media_set)); task_log!(worker, "Restore mediaset '{}' done", media_set);
Ok(()) Ok(())
} }
)?; )?;
@ -249,7 +252,7 @@ pub fn restore_media(
let current_file_number = drive.current_file_number()?; let current_file_number = drive.current_file_number()?;
let reader = match drive.read_next_file()? { let reader = match drive.read_next_file()? {
None => { None => {
worker.log(format!("detected EOT after {} files", current_file_number)); task_log!(worker, "detected EOT after {} files", current_file_number);
break; break;
} }
Some(reader) => reader, Some(reader) => reader,
@ -287,7 +290,7 @@ fn restore_archive<'a>(
let snapshot = reader.read_exact_allocated(header.size as usize)?; let snapshot = reader.read_exact_allocated(header.size as usize)?;
let snapshot = std::str::from_utf8(&snapshot) let snapshot = std::str::from_utf8(&snapshot)
.map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?; .map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
worker.log(format!("Found snapshot archive: {} {}", current_file_number, snapshot)); task_log!(worker, "Found snapshot archive: {} {}", current_file_number, snapshot);
let backup_dir: BackupDir = snapshot.parse()?; let backup_dir: BackupDir = snapshot.parse()?;
@ -303,16 +306,16 @@ fn restore_archive<'a>(
path.push(rel_path); path.push(rel_path);
if is_new { if is_new {
worker.log(format!("restore snapshot {}", backup_dir)); task_log!(worker, "restore snapshot {}", backup_dir);
match restore_snapshot_archive(reader, &path) { match restore_snapshot_archive(worker, reader, &path) {
Err(err) => { Err(err) => {
std::fs::remove_dir_all(&path)?; std::fs::remove_dir_all(&path)?;
bail!("restore snapshot {} failed - {}", backup_dir, err); bail!("restore snapshot {} failed - {}", backup_dir, err);
} }
Ok(false) => { Ok(false) => {
std::fs::remove_dir_all(&path)?; std::fs::remove_dir_all(&path)?;
worker.log(format!("skip incomplete snapshot {}", backup_dir)); task_log!(worker, "skip incomplete snapshot {}", backup_dir);
} }
Ok(true) => { Ok(true) => {
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?; catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
@ -331,7 +334,7 @@ fn restore_archive<'a>(
} }
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => { PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
worker.log(format!("Found chunk archive: {}", current_file_number)); task_log!(worker, "Found chunk archive: {}", current_file_number);
let datastore = target.as_ref().map(|t| t.0); let datastore = target.as_ref().map(|t| t.0);
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? { if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
@ -339,7 +342,7 @@ fn restore_archive<'a>(
for digest in chunks.iter() { for digest in chunks.iter() {
catalog.register_chunk(&digest)?; catalog.register_chunk(&digest)?;
} }
worker.log(format!("register {} chunks", chunks.len())); task_log!(worker, "register {} chunks", chunks.len());
catalog.end_chunk_archive()?; catalog.end_chunk_archive()?;
catalog.commit_if_large()?; catalog.commit_if_large()?;
} }
@ -365,6 +368,9 @@ fn restore_chunk_archive<'a>(
let result: Result<_, Error> = proxmox::try_block!({ let result: Result<_, Error> = proxmox::try_block!({
while let Some((digest, blob)) = decoder.next_chunk()? { while let Some((digest, blob)) = decoder.next_chunk()? {
worker.check_abort()?;
if let Some(datastore) = datastore { if let Some(datastore) = datastore {
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?; let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
if !chunk_exists { if !chunk_exists {
@ -374,14 +380,14 @@ fn restore_chunk_archive<'a>(
blob.decode(None, Some(&digest))?; // verify digest blob.decode(None, Some(&digest))?; // verify digest
} }
if verbose { if verbose {
worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest))); task_log!(worker, "Insert chunk: {}", proxmox::tools::digest_to_hex(&digest));
} }
datastore.insert_chunk(&blob, &digest)?; datastore.insert_chunk(&blob, &digest)?;
} else if verbose { } else if verbose {
worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest))); task_log!(worker, "Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest));
} }
} else if verbose { } else if verbose {
worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest))); task_log!(worker, "Found chunk: {}", proxmox::tools::digest_to_hex(&digest));
} }
chunks.push(digest); chunks.push(digest);
} }
@ -411,12 +417,13 @@ fn restore_chunk_archive<'a>(
} }
fn restore_snapshot_archive<'a>( fn restore_snapshot_archive<'a>(
worker: &WorkerTask,
reader: Box<dyn 'a + TapeRead>, reader: Box<dyn 'a + TapeRead>,
snapshot_path: &Path, snapshot_path: &Path,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
match try_restore_snapshot_archive(&mut decoder, snapshot_path) { match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) {
Ok(()) => Ok(true), Ok(()) => Ok(true),
Err(err) => { Err(err) => {
let reader = decoder.input(); let reader = decoder.input();
@ -438,6 +445,7 @@ fn restore_snapshot_archive<'a>(
} }
fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>( fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
worker: &WorkerTask,
decoder: &mut pxar::decoder::sync::Decoder<R>, decoder: &mut pxar::decoder::sync::Decoder<R>,
snapshot_path: &Path, snapshot_path: &Path,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -460,6 +468,8 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
let mut manifest = None; let mut manifest = None;
loop { loop {
worker.check_abort()?;
let entry = match decoder.next() { let entry = match decoder.next() {
None => break, None => break,
Some(entry) => entry?, Some(entry) => entry?,

View File

@ -45,8 +45,8 @@ use proxmox_backup::{
complete_media_set_uuid, complete_media_set_uuid,
file_formats::{ file_formats::{
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
PROXMOX_BACKUP_CONTENT_NAME,
MediaContentHeader, MediaContentHeader,
proxmox_tape_magic_to_text,
}, },
}, },
}; };
@ -565,7 +565,7 @@ fn debug_scan(param: Value) -> Result<(), Error> {
Ok(header) => { Ok(header) => {
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 { if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
println!("got MediaContentHeader with wrong magic: {:?}", header.magic); println!("got MediaContentHeader with wrong magic: {:?}", header.magic);
} else if let Some(name) = PROXMOX_BACKUP_CONTENT_NAME.get(&header.content_magic) { } else if let Some(name) = proxmox_tape_magic_to_text(&header.content_magic) {
println!("got content header: {}", name); println!("got content header: {}", name);
println!(" uuid: {}", header.content_uuid()); println!(" uuid: {}", header.content_uuid());
println!(" ctime: {}", strftime_local("%c", header.ctime)?); println!(" ctime: {}", strftime_local("%c", header.ctime)?);

View File

@ -40,15 +40,13 @@ use crate::{
}, },
file_formats::{ file_formats::{
PROXMOX_TAPE_BLOCK_SIZE, PROXMOX_TAPE_BLOCK_SIZE,
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
MediaSetLabel, MediaSetLabel,
MediaContentHeader, MediaContentHeader,
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
},
helpers::{
BlockedReader, BlockedReader,
BlockedWriter, BlockedWriter,
}, },
} },
}; };
fn run_sg_tape_cmd(subcmd: &str, args: &[&str], fd: RawFd) -> Result<String, Error> { fn run_sg_tape_cmd(subcmd: &str, args: &[&str], fd: RawFd) -> Result<String, Error> {

View File

@ -30,12 +30,12 @@ use crate::{
MediaSetLabel, MediaSetLabel,
MediaContentHeader, MediaContentHeader,
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
BlockedReader,
BlockedWriter,
}, },
helpers::{ helpers::{
EmulateTapeReader, EmulateTapeReader,
EmulateTapeWriter, EmulateTapeWriter,
BlockedReader,
BlockedWriter,
}, },
}, },
}; };

View File

@ -49,16 +49,21 @@ impl <R: Read> BlockedReader<R> {
let (_size, found_end_marker) = Self::check_buffer(&buffer, 0)?; let (_size, found_end_marker) = Self::check_buffer(&buffer, 0)?;
let mut incomplete = false; let mut incomplete = false;
let mut got_eod = false;
if found_end_marker { if found_end_marker {
incomplete = buffer.flags.contains(BlockHeaderFlags::INCOMPLETE); incomplete = buffer.flags.contains(BlockHeaderFlags::INCOMPLETE);
Self::consume_eof_marker(&mut reader)?;
got_eod = true;
} }
Ok(Some(Self { Ok(Some(Self {
reader, reader,
buffer, buffer,
found_end_marker, found_end_marker,
incomplete, incomplete,
got_eod,
seq_nr: 1, seq_nr: 1,
got_eod: false,
read_error: false, read_error: false,
read_pos: 0, read_pos: 0,
})) }))
@ -101,6 +106,14 @@ impl <R: Read> BlockedReader<R> {
tape_device_read_block(reader, data) tape_device_read_block(reader, data)
} }
fn consume_eof_marker(reader: &mut R) -> Result<(), std::io::Error> {
let mut tmp_buf = [0u8; 512]; // use a small buffer for testing EOF
if tape_device_read_block(reader, &mut tmp_buf)? {
proxmox::io_bail!("detected tape block after stream end marker");
}
Ok(())
}
fn read_block(&mut self) -> Result<usize, std::io::Error> { fn read_block(&mut self) -> Result<usize, std::io::Error> {
if !Self::read_block_frame(&mut self.buffer, &mut self.reader)? { if !Self::read_block_frame(&mut self.buffer, &mut self.reader)? {
@ -118,12 +131,8 @@ impl <R: Read> BlockedReader<R> {
if found_end_marker { // consume EOF mark if found_end_marker { // consume EOF mark
self.found_end_marker = true; self.found_end_marker = true;
self.incomplete = self.buffer.flags.contains(BlockHeaderFlags::INCOMPLETE); self.incomplete = self.buffer.flags.contains(BlockHeaderFlags::INCOMPLETE);
let mut tmp_buf = [0u8; 512]; // use a small buffer for testing EOF Self::consume_eof_marker(&mut self.reader)?;
if tape_device_read_block(&mut self.reader, &mut tmp_buf)? { self.got_eod = true;
proxmox::io_bail!("detected tape block after stream end marker");
} else {
self.got_eod = true;
}
} }
self.read_pos = 0; self.read_pos = 0;
@ -198,8 +207,8 @@ mod test {
use anyhow::Error; use anyhow::Error;
use crate::tape::{ use crate::tape::{
TapeWrite, TapeWrite,
file_formats::PROXMOX_TAPE_BLOCK_SIZE, file_formats::{
helpers::{ PROXMOX_TAPE_BLOCK_SIZE,
BlockedReader, BlockedReader,
BlockedWriter, BlockedWriter,
}, },

View File

@ -1,4 +1,23 @@
//! File format definitions for data written to tapes //! File format definitions and implementations for data written to
//! tapes
mod blocked_reader;
pub use blocked_reader::*;
mod blocked_writer;
pub use blocked_writer::*;
mod chunk_archive;
pub use chunk_archive::*;
mod snapshot_archive;
pub use snapshot_archive::*;
mod multi_volume_writer;
pub use multi_volume_writer::*;
mod multi_volume_reader;
pub use multi_volume_reader::*;
use std::collections::HashMap; use std::collections::HashMap;
@ -33,8 +52,8 @@ pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0: [u8; 8] = [72, 87, 109,
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0: [u8; 8] = [9, 182, 2, 31, 125, 232, 114, 133]; pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0: [u8; 8] = [9, 182, 2, 31, 125, 232, 114, 133];
lazy_static::lazy_static!{ lazy_static::lazy_static!{
/// Map content Uuid to human readable names. // Map content magic numbers to human readable names.
pub static ref PROXMOX_BACKUP_CONTENT_NAME: HashMap<&'static [u8;8], &'static str> = { static ref PROXMOX_TAPE_CONTENT_NAME: HashMap<&'static [u8;8], &'static str> = {
let mut map = HashMap::new(); let mut map = HashMap::new();
map.insert(&PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, "Proxmox Backup Tape Label v1.0"); map.insert(&PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, "Proxmox Backup Tape Label v1.0");
map.insert(&PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, "Proxmox Backup MediaSet Label v1.0"); map.insert(&PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, "Proxmox Backup MediaSet Label v1.0");
@ -44,6 +63,11 @@ lazy_static::lazy_static!{
}; };
} }
/// Map content magic numbers to human readable names.
pub fn proxmox_tape_magic_to_text(magic: &[u8; 8]) -> Option<String> {
PROXMOX_TAPE_CONTENT_NAME.get(magic).map(|s| String::from(*s))
}
/// Tape Block Header with data payload /// Tape Block Header with data payload
/// ///
/// All tape files are written as sequence of blocks. /// All tape files are written as sequence of blocks.

View File

@ -0,0 +1,102 @@
use std::io::{Read};
use anyhow::{bail, Error};
use proxmox::tools::io::ReadExt;
use crate::tape::{
TapeRead,
file_formats::MediaContentHeader,
};
/// Read multi volume data streams written by `MultiVolumeWriter`
///
/// Note: We do not use this feature currently.
pub struct MultiVolumeReader<'a> {
reader: Option<Box<dyn TapeRead + 'a>>,
next_reader_fn: Box<dyn 'a + FnMut() -> Result<Box<dyn TapeRead +'a>, Error>>,
complete: bool,
header: MediaContentHeader,
}
impl <'a> MultiVolumeReader<'a> {
/// Creates a new instance
pub fn new(
reader: Box<dyn TapeRead +'a>,
header: MediaContentHeader,
next_reader_fn: Box<dyn 'a + FnMut() -> Result<Box<dyn TapeRead +'a>, Error>>,
) -> Result<Self, Error> {
if header.part_number != 0 {
bail!("MultiVolumeReader::new - got wrong header part_number ({} != 0)",
header.part_number);
}
Ok(Self {
reader: Some(reader),
next_reader_fn,
complete: false,
header,
})
}
}
impl <'a> Read for MultiVolumeReader<'a> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
if self.complete {
return Ok(0);
}
if self.reader.is_none() {
let mut reader = (self.next_reader_fn)()
.map_err(|err| proxmox::io_format_err!("multi-volume next failed: {}", err))?;
proxmox::try_block!({
let part_header: MediaContentHeader = unsafe { reader.read_le_value()? };
self.reader = Some(reader);
if part_header.uuid != self.header.uuid {
proxmox::io_bail!("got wrong part uuid");
}
if part_header.content_magic!= self.header.content_magic {
proxmox::io_bail!("got wrong part content magic");
}
let expect_part_number = self.header.part_number + 1;
if part_header.part_number != expect_part_number {
proxmox::io_bail!("got wrong part number ({} != {})",
part_header.part_number, expect_part_number);
}
self.header.part_number = expect_part_number;
Ok(())
}).map_err(|err| {
proxmox::io_format_err!("multi-volume read content header failed: {}", err)
})?;
}
match self.reader {
None => unreachable!(),
Some(ref mut reader) => {
match reader.read(buf) {
Ok(0) => {
if reader.is_incomplete()? {
self.reader = None;
self.read(buf)
} else {
self.reader = None;
self.complete = true;
Ok(0)
}
}
Ok(n) => Ok(n),
Err(err) => Err(err)
}
}
}
}
}

View File

@ -0,0 +1,136 @@
use anyhow::Error;
use proxmox::tools::Uuid;
use crate::tape::{
TapeWrite,
file_formats::MediaContentHeader,
};
/// Writes data streams using multiple volumes
///
/// Note: We do not use this feature currently.
pub struct MultiVolumeWriter<'a> {
writer: Option<Box<dyn TapeWrite + 'a>>,
next_writer_fn: Box<dyn 'a + FnMut() -> Result<Box<dyn TapeWrite +'a>, Error>>,
got_leom: bool,
finished: bool,
wrote_header: bool,
header: MediaContentHeader,
header_data: Vec<u8>,
bytes_written: usize, // does not include bytes from current writer
}
impl <'a> MultiVolumeWriter<'a> {
/// Creates a new instance
pub fn new(
writer: Box<dyn TapeWrite +'a>,
content_magic: [u8; 8],
header_data: Vec<u8>,
next_writer_fn: Box<dyn 'a + FnMut() -> Result<Box<dyn TapeWrite + 'a>, Error>>,
) -> Self {
let header = MediaContentHeader::new(content_magic, header_data.len() as u32);
Self {
writer: Some(writer),
next_writer_fn,
got_leom: false,
finished: false,
header,
header_data,
wrote_header: false,
bytes_written: 0,
}
}
/// Returns the cuntent Uuid with the current part number
pub fn uuid_and_part_number(&self) -> (Uuid, usize) {
(self.header.uuid.into(), self.header.part_number as usize)
}
}
impl <'a> TapeWrite for MultiVolumeWriter<'a> {
fn write_all(&mut self, buf: &[u8]) -> Result<bool, std::io::Error> {
if self.finished {
proxmox::io_bail!("multi-volume writer already finished: internal error");
}
if self.got_leom {
if !self.wrote_header {
proxmox::io_bail!("multi-volume writer: got LEOM before writing anything - internal error");
}
let mut writer = match self.writer.take() {
Some(writer) => writer,
None => proxmox::io_bail!("multi-volume writer: no writer -internal error"),
};
self.bytes_written = writer.bytes_written();
writer.finish(true)?;
}
if self.writer.is_none() {
if self.header.part_number >= 255 {
proxmox::io_bail!("multi-volume writer: too many parts");
}
self.writer = Some(
(self.next_writer_fn)()
.map_err(|err| proxmox::io_format_err!("multi-volume get next volume failed: {}", err))?
);
self.got_leom = false;
self.wrote_header = false;
self.header.part_number += 1;
}
let leom = match self.writer {
None => unreachable!(),
Some(ref mut writer) => {
if !self.wrote_header {
writer.write_header(&self.header, &self.header_data)?;
self.wrote_header = true;
}
writer.write_all(buf)?
}
};
if leom { self.got_leom = true; }
Ok(false)
}
fn bytes_written(&self) -> usize {
let mut bytes_written = self.bytes_written;
if let Some(ref writer) = self.writer {
bytes_written += writer.bytes_written();
}
bytes_written
}
fn finish(&mut self, incomplete: bool) -> Result<bool, std::io::Error> {
if incomplete {
proxmox::io_bail!(
"incomplete flag makes no sense for multi-volume stream: internal error");
}
match self.writer.take() {
None if self.finished => proxmox::io_bail!(
"multi-volume writer already finished: internal error"),
None => Ok(false),
Some(ref mut writer) => {
self.finished = true;
if !self.wrote_header {
writer.write_header(&self.header, &self.header_data)?;
self.wrote_header = true;
}
writer.finish(false)
}
}
}
fn logical_end_of_media(&self) -> bool {
self.got_leom
}
}

View File

@ -4,11 +4,5 @@ pub use emulate_tape_writer::*;
mod emulate_tape_reader; mod emulate_tape_reader;
pub use emulate_tape_reader::*; pub use emulate_tape_reader::*;
mod blocked_reader;
pub use blocked_reader::*;
mod blocked_writer;
pub use blocked_writer::*;
mod snapshot_reader; mod snapshot_reader;
pub use snapshot_reader::*; pub use snapshot_reader::*;

View File

@ -44,7 +44,9 @@ pub struct MediaPool {
media_set_policy: MediaSetPolicy, media_set_policy: MediaSetPolicy,
retention: RetentionPolicy, retention: RetentionPolicy,
use_offline_media: bool,
changer_name: Option<String>,
encrypt_fingerprint: Option<Fingerprint>, encrypt_fingerprint: Option<Fingerprint>,
inventory: Inventory, inventory: Inventory,
@ -55,12 +57,18 @@ pub struct MediaPool {
impl MediaPool { impl MediaPool {
/// Creates a new instance /// Creates a new instance
///
/// If you specify a `changer_name`, only media accessible via
/// that changer is considered available. If you pass `None` for
/// `changer`, all offline media is considered available (backups
/// to standalone drives may not use media from inside a tape
/// library).
pub fn new( pub fn new(
name: &str, name: &str,
state_path: &Path, state_path: &Path,
media_set_policy: MediaSetPolicy, media_set_policy: MediaSetPolicy,
retention: RetentionPolicy, retention: RetentionPolicy,
use_offline_media: bool, changer_name: Option<String>,
encrypt_fingerprint: Option<Fingerprint>, encrypt_fingerprint: Option<Fingerprint>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
@ -75,7 +83,7 @@ impl MediaPool {
name: String::from(name), name: String::from(name),
media_set_policy, media_set_policy,
retention, retention,
use_offline_media, changer_name,
inventory, inventory,
current_media_set, current_media_set,
encrypt_fingerprint, encrypt_fingerprint,
@ -86,7 +94,7 @@ impl MediaPool {
pub fn with_config( pub fn with_config(
state_path: &Path, state_path: &Path,
config: &MediaPoolConfig, config: &MediaPoolConfig,
use_offline_media: bool, changer_name: Option<String>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let allocation = config.allocation.clone().unwrap_or_else(|| String::from("continue")).parse()?; let allocation = config.allocation.clone().unwrap_or_else(|| String::from("continue")).parse()?;
@ -103,7 +111,7 @@ impl MediaPool {
state_path, state_path,
allocation, allocation,
retention, retention,
use_offline_media, changer_name,
encrypt_fingerprint, encrypt_fingerprint,
) )
} }
@ -272,8 +280,18 @@ impl MediaPool {
// check if a location is considered on site // check if a location is considered on site
pub fn location_is_available(&self, location: &MediaLocation) -> bool { pub fn location_is_available(&self, location: &MediaLocation) -> bool {
match location { match location {
MediaLocation::Online(_) => true, MediaLocation::Online(name) => {
MediaLocation::Offline => self.use_offline_media, if let Some(ref changer_name) = self.changer_name {
name == changer_name
} else {
// a standalone drive cannot use media currently inside a library
false
}
}
MediaLocation::Offline => {
// consider available for standalone drives
self.changer_name.is_none()
}
MediaLocation::Vault(_) => false, MediaLocation::Vault(_) => false,
} }
} }
@ -467,16 +485,11 @@ impl MediaPool {
match media.status() { match media.status() {
MediaStatus::Full => { /* OK */ }, MediaStatus::Full => { /* OK */ },
MediaStatus::Writable if (seq + 1) == media_count => { MediaStatus::Writable if (seq + 1) == media_count => {
match media.location() { let media_location = media.location();
MediaLocation::Online(_) => { if self.location_is_available(media_location) {
last_is_writable = true; last_is_writable = true;
}, } else {
MediaLocation::Offline => { if let MediaLocation::Vault(vault) = media_location {
if self.use_offline_media {
last_is_writable = true;
}
}
MediaLocation::Vault(vault) => {
bail!("writable media offsite in vault '{}'", vault); bail!("writable media offsite in vault '{}'", vault);
} }
} }

View File

@ -40,12 +40,6 @@ pub use media_pool::*;
mod media_catalog; mod media_catalog;
pub use media_catalog::*; pub use media_catalog::*;
mod chunk_archive;
pub use chunk_archive::*;
mod snapshot_archive;
pub use snapshot_archive::*;
mod pool_writer; mod pool_writer;
pub use pool_writer::*; pub use pool_writer::*;

View File

@ -16,15 +16,17 @@ use crate::{
MAX_CHUNK_ARCHIVE_SIZE, MAX_CHUNK_ARCHIVE_SIZE,
COMMIT_BLOCK_SIZE, COMMIT_BLOCK_SIZE,
TapeWrite, TapeWrite,
ChunkArchiveWriter,
SnapshotReader, SnapshotReader,
SnapshotChunkIterator, SnapshotChunkIterator,
MediaPool, MediaPool,
MediaId, MediaId,
MediaCatalog, MediaCatalog,
MediaSetCatalog, MediaSetCatalog,
tape_write_snapshot_archive, file_formats::{
file_formats::MediaSetLabel, MediaSetLabel,
ChunkArchiveWriter,
tape_write_snapshot_archive,
},
drive::{ drive::{
TapeDriver, TapeDriver,
request_and_load_media, request_and_load_media,

View File

@ -49,7 +49,7 @@ fn test_current_set_usable_1() -> Result<(), Error> {
&testdir, &testdir,
MediaSetPolicy::AlwaysCreate, MediaSetPolicy::AlwaysCreate,
RetentionPolicy::KeepForever, RetentionPolicy::KeepForever,
true, None,
None, None,
)?; )?;
@ -75,7 +75,7 @@ fn test_current_set_usable_2() -> Result<(), Error> {
&testdir, &testdir,
MediaSetPolicy::AlwaysCreate, MediaSetPolicy::AlwaysCreate,
RetentionPolicy::KeepForever, RetentionPolicy::KeepForever,
true, None,
None, None,
)?; )?;
@ -103,7 +103,7 @@ fn test_current_set_usable_3() -> Result<(), Error> {
&testdir, &testdir,
MediaSetPolicy::AlwaysCreate, MediaSetPolicy::AlwaysCreate,
RetentionPolicy::KeepForever, RetentionPolicy::KeepForever,
false, Some(String::from("changer1")),
None, None,
)?; )?;
@ -131,7 +131,7 @@ fn test_current_set_usable_4() -> Result<(), Error> {
&testdir, &testdir,
MediaSetPolicy::AlwaysCreate, MediaSetPolicy::AlwaysCreate,
RetentionPolicy::KeepForever, RetentionPolicy::KeepForever,
true, None,
None, None,
)?; )?;
@ -161,7 +161,7 @@ fn test_current_set_usable_5() -> Result<(), Error> {
&testdir, &testdir,
MediaSetPolicy::AlwaysCreate, MediaSetPolicy::AlwaysCreate,
RetentionPolicy::KeepForever, RetentionPolicy::KeepForever,
true, None,
None, None,
)?; )?;
@ -189,7 +189,7 @@ fn test_current_set_usable_6() -> Result<(), Error> {
&testdir, &testdir,
MediaSetPolicy::AlwaysCreate, MediaSetPolicy::AlwaysCreate,
RetentionPolicy::KeepForever, RetentionPolicy::KeepForever,
true, None,
None, None,
)?; )?;
@ -223,7 +223,7 @@ fn test_current_set_usable_7() -> Result<(), Error> {
&testdir, &testdir,
MediaSetPolicy::AlwaysCreate, MediaSetPolicy::AlwaysCreate,
RetentionPolicy::KeepForever, RetentionPolicy::KeepForever,
true, None,
None, None,
)?; )?;

View File

@ -124,6 +124,11 @@ impl hyper::service::Service<Uri> for HttpsConnector {
.ok_or_else(|| format_err!("missing URL scheme"))? .ok_or_else(|| format_err!("missing URL scheme"))?
== "https"; == "https";
let host = dst
.host()
.ok_or_else(|| format_err!("missing hostname in destination url?"))?
.to_string();
let config = this.ssl_connector.configure(); let config = this.ssl_connector.configure();
let dst_str = dst.to_string(); // for error messages let dst_str = dst.to_string(); // for error messages
let conn = this let conn = this
@ -135,7 +140,7 @@ impl hyper::service::Service<Uri> for HttpsConnector {
let _ = set_tcp_keepalive(conn.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME); let _ = set_tcp_keepalive(conn.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
if is_https { if is_https {
let conn: tokio_openssl::SslStream<tokio::net::TcpStream> = tokio_openssl::SslStream::new(config?.into_ssl(&dst_str)?, conn)?; let conn: tokio_openssl::SslStream<tokio::net::TcpStream> = tokio_openssl::SslStream::new(config?.into_ssl(&host)?, conn)?;
let mut conn = Box::pin(conn); let mut conn = Box::pin(conn);
conn.as_mut().connect().await?; conn.as_mut().connect().await?;
Ok(MaybeTlsStream::Right(conn)) Ok(MaybeTlsStream::Right(conn))

View File

@ -159,7 +159,7 @@ Ext.define('PBS.Dashboard', {
type = 'sync'; type = 'sync';
} }
if (type.startsWith('verify')) { if (type.startsWith('verif')) {
type = 'verify'; type = 'verify';
} }