Compare commits

..

21 Commits

Author SHA1 Message Date
cfe01b2e6a bump version to 0.8.21-1 2020-09-25 13:20:35 +02:00
b19b032be3 debian/control: update 2020-09-25 13:17:49 +02:00
5441708634 src/client/pull.rs: use new ParallelHandler 2020-09-25 12:58:20 +02:00
3c9b370255 src/tools/parallel_handler.rs: execute closure inside a thread pool 2020-09-25 12:58:20 +02:00
510544770b depend on crossbeam-channel 2020-09-25 12:58:20 +02:00
e8293841c2 docs: html: show "Proxmox Backup" in navi for small devices
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-24 20:03:17 +02:00
46114bf28e docs: html: improve css for small displays
fixed-width navi/toc links were not switched in color for small width
displays, and thus they were barely readable as the background
switches to dark for small widths.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-24 20:03:17 +02:00
0d7e61f06f docs: buildsys: add more dependencies to html target
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-24 19:45:23 +02:00
fd6a54dfbc docs: conf: fix conf for new alabaster theme version
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-24 19:44:50 +02:00
1ea5722b8f docs: html: adapt custom css
highlighting the current chapter and some other small formatting
improvements

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-24 19:44:00 +02:00
bc8fadf494 docs: index: hide todo list toctree and genindex
I do not found another way to disable inclusion in the sidebar...

The genindex information is alredy provided through glossary

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-24 19:43:18 +02:00
a76934ad33 docs: html: adapt sidebar in index page
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-24 19:41:19 +02:00
d7a122a026 use jobstate mechanism for verify/garbage_collection schedules
also changes:
* correct comment about reset (replace 'sync' with 'action')
* check schedule change correctly (only when it is actually changed)

with this changes, we can drop the 'lookup_last_worker' method

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-24 17:06:12 +02:00
6c25588e63 proxy: fix error handling in prune scheduling
we rely on the jobstate handling to write the error of the worker
into its state file, but we used '?' here in a block which does not
return the error to the block, but to the function/closure instead

so if a prune job failed because of such an '?', we did not write
into the statefile and got a wrong state there

instead use our try_block! macro that wraps the code in a closure

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-24 17:06:09 +02:00
17a1f579d0 bump version to 0.8.20-1 2020-09-24 13:17:06 +02:00
998db63933 src/client/pull.rs: decode, verify and write in a separate threads
To maximize throughput.
2020-09-24 13:12:04 +02:00
c0fa14d94a src/backup/data_blob.rs: add is_encrypted helper 2020-09-24 13:00:16 +02:00
6fd129844d remove DummyCatalogWriter
we're using an `Option` instead now

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
2020-09-24 09:13:54 +02:00
baae780c99 benchmark: use compressable data to get more realistic result
And add a benchmatrk to test chunk verify speed (decompress+sha256).
2020-09-24 08:58:13 +02:00
09a1da25ed src/backup/data_blob.rs: improve decompress speed 2020-09-24 08:52:35 +02:00
298c6aaef6 docs: add onlineHelp to some panels
name sections according to the title or content and add
the respective onlineHelp to the following panels:
- datastore
- user management
- ACL
- backup remote

Signed-off-by: Oguz Bektas <o.bektas@proxmox.com>
Reviewed-By: Dominik Csapak <d.csapak@proxmox.com>
Tested-By: Dominik Csapak <d.csapak@proxmox.com>
2020-09-22 19:48:32 +02:00
23 changed files with 459 additions and 103 deletions

View File

@ -1,6 +1,6 @@
[package] [package]
name = "proxmox-backup" name = "proxmox-backup"
version = "0.8.19" version = "0.8.21"
authors = ["Dietmar Maurer <dietmar@proxmox.com>"] authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
edition = "2018" edition = "2018"
license = "AGPL-3" license = "AGPL-3"
@ -61,6 +61,7 @@ walkdir = "2"
xdg = "2.2" xdg = "2.2"
zstd = { version = "0.4", features = [ "bindgen" ] } zstd = { version = "0.4", features = [ "bindgen" ] }
nom = "5.1" nom = "5.1"
crossbeam-channel = "0.4"
[features] [features]
default = [] default = []

24
debian/changelog vendored
View File

@ -1,3 +1,27 @@
rust-proxmox-backup (0.8.21-1) unstable; urgency=medium
* depend on crossbeam-channel
* speedup sync jobs (allow up to 4 worker threads)
* improve docs
* use jobstate mechanism for verify/garbage_collection schedules
* proxy: fix error handling in prune scheduling
-- Proxmox Support Team <support@proxmox.com> Fri, 25 Sep 2020 13:20:19 +0200
rust-proxmox-backup (0.8.20-1) unstable; urgency=medium
* improve sync speed
* benchmark: use compressable data to get more realistic result
* docs: add onlineHelp to some panels
-- Proxmox Support Team <support@proxmox.com> Thu, 24 Sep 2020 13:15:45 +0200
rust-proxmox-backup (0.8.19-1) unstable; urgency=medium rust-proxmox-backup (0.8.19-1) unstable; urgency=medium
* src/api2/reader.rs: use std::fs::read instead of tokio::fs::read * src/api2/reader.rs: use std::fs::read instead of tokio::fs::read

1
debian/control vendored
View File

@ -12,6 +12,7 @@ Build-Depends: debhelper (>= 11),
librust-bitflags-1+default-dev (>= 1.2.1-~~), librust-bitflags-1+default-dev (>= 1.2.1-~~),
librust-bytes-0.5+default-dev, librust-bytes-0.5+default-dev,
librust-crc32fast-1+default-dev, librust-crc32fast-1+default-dev,
librust-crossbeam-channel-0.4+default-dev,
librust-endian-trait-0.6+arrays-dev, librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev, librust-endian-trait-0.6+default-dev,
librust-futures-0.3+default-dev, librust-futures-0.3+default-dev,

View File

@ -74,7 +74,7 @@ onlinehelpinfo:
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs." @echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
.PHONY: html .PHONY: html
html: ${GENERATED_SYNOPSIS} html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/ cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/
cp custom.css $(BUILDDIR)/html/_static/ cp custom.css $(BUILDDIR)/html/_static/

11
docs/_templates/index-sidebar.html vendored Normal file
View File

@ -0,0 +1,11 @@
<h3>Navigation</h3>
{{ toctree(includehidden=theme_sidebar_includehidden, collapse=True, titles_only=True) }}
{% if theme_extra_nav_links %}
<hr />
<h3>Links</h3>
<ul>
{% for text, uri in theme_extra_nav_links.items() %}
<li class="toctree-l1"><a href="{{ uri }}">{{ text }}</a></li>
{% endfor %}
</ul>
{% endif %}

7
docs/_templates/sidebar-header.html vendored Normal file
View File

@ -0,0 +1,7 @@
<p class="logo">
<a href="index.html">
<img class="logo" src="_static/proxmox-logo.svg" alt="Logo">
</a>
</p>
<h1 class="logo logo-name"><a href="index.html">Proxmox Backup</a></h1>
<hr style="width:100%;">

View File

@ -127,7 +127,7 @@ Backup Server Management
The command line tool to configure and manage the backup server is called The command line tool to configure and manage the backup server is called
:command:`proxmox-backup-manager`. :command:`proxmox-backup-manager`.
.. _datastore_intro:
:term:`DataStore` :term:`DataStore`
~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~
@ -364,7 +364,7 @@ directories will store the chunked data after a backup operation has been execut
276489 drwxr-xr-x 3 backup backup 4.0K Jul 8 12:35 .. 276489 drwxr-xr-x 3 backup backup 4.0K Jul 8 12:35 ..
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 . 276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
.. _user_mgmt:
User Management User Management
~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~
@ -448,6 +448,8 @@ Or completely remove the user with:
# proxmox-backup-manager user remove john@pbs # proxmox-backup-manager user remove john@pbs
.. _user_acl:
Access Control Access Control
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~
@ -631,6 +633,8 @@ You can also configure DNS settings, from the **DNS** section
of **Configuration** or by using the ``dns`` subcommand of of **Configuration** or by using the ``dns`` subcommand of
``proxmox-backup-manager``. ``proxmox-backup-manager``.
.. _backup_remote:
:term:`Remote` :term:`Remote`
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~

View File

@ -162,18 +162,19 @@ html_theme = 'alabaster'
# #
html_theme_options = { html_theme_options = {
'fixed_sidebar': True, 'fixed_sidebar': True,
#'sidebar_includehidden': False, 'sidebar_includehidden': False,
'sidebar_collapse': False, # FIXME: documented, but does not works?! 'sidebar_collapse': False,
'show_relbar_bottom': True, # FIXME: documented, but does not works?! 'globaltoc_collapse': False,
'show_relbar_bottom': True,
'show_powered_by': False, 'show_powered_by': False,
'logo': 'proxmox-logo.svg', 'extra_nav_links': {
'logo_name': True, # show project name below logo 'Proxmox Homepage': 'https://proxmox.com',
#'logo_text_align': 'center', 'PDF': 'proxmox-backup.pdf',
#'description': 'Fast, Secure & Efficient.', },
'sidebar_width': '300px', 'sidebar_width': '320px',
'page_width': '1280px', 'page_width': '1320px',
# font styles # font styles
'head_font_family': 'Lato, sans-serif', 'head_font_family': 'Lato, sans-serif',
'caption_font_family': 'Lato, sans-serif', 'caption_font_family': 'Lato, sans-serif',
@ -181,6 +182,24 @@ html_theme_options = {
'font_family': 'Open Sans, sans-serif', 'font_family': 'Open Sans, sans-serif',
} }
# Alabaster theme recommends setting this fixed.
# If you switch theme this needs to removed, probably.
html_sidebars = {
'**': [
'sidebar-header.html',
'searchbox.html',
'navigation.html',
'relations.html',
],
'index': [
'sidebar-header.html',
'searchbox.html',
'index-sidebar.html',
]
}
# Add any paths that contain custom themes here, relative to this directory. # Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [] # html_theme_path = []
@ -226,10 +245,6 @@ html_static_path = ['_static']
# #
# html_use_smartypants = True # html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to # Additional templates that should be rendered to pages, maps page names to
# template names. # template names.
# #

View File

@ -13,3 +13,40 @@ div.body img {
pre { pre {
padding: 5px 10px; padding: 5px 10px;
} }
li a.current {
font-weight: bold;
border-bottom: 1px solid #000;
}
ul li.toctree-l1 {
margin-top: 0.5em;
}
ul li.toctree-l1 > a {
color: #000;
}
div.sphinxsidebar form.search {
margin-bottom: 5px;
}
div.sphinxsidebar h3 {
width: 100%;
}
div.sphinxsidebar h1.logo-name {
display: none;
}
@media screen and (max-width: 875px) {
div.sphinxsidebar p.logo {
display: initial;
}
div.sphinxsidebar h1.logo-name {
display: block;
}
div.sphinxsidebar span {
color: #AAA;
}
ul li.toctree-l1 > a {
color: #FFF;
}
}

View File

@ -45,9 +45,10 @@ in the section entitled "GNU Free Documentation License".
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
:hidden:
:caption: Developer Appendix :caption: Developer Appendix
todos.rst todos.rst
* :ref:`genindex` .. # * :ref:`genindex`

View File

@ -132,6 +132,8 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
datastore::save_config(&config)?; datastore::save_config(&config)?;
crate::config::jobstate::create_state_file("prune", &datastore.name)?; crate::config::jobstate::create_state_file("prune", &datastore.name)?;
crate::config::jobstate::create_state_file("garbage_collection", &datastore.name)?;
crate::config::jobstate::create_state_file("verify", &datastore.name)?;
Ok(()) Ok(())
} }
@ -313,13 +315,23 @@ pub fn update_datastore(
} }
} }
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; } let mut gc_schedule_changed = false;
if gc_schedule.is_some() {
gc_schedule_changed = data.gc_schedule != gc_schedule;
data.gc_schedule = gc_schedule;
}
let mut prune_schedule_changed = false; let mut prune_schedule_changed = false;
if prune_schedule.is_some() { if prune_schedule.is_some() {
prune_schedule_changed = true; prune_schedule_changed = data.prune_schedule != prune_schedule;
data.prune_schedule = prune_schedule; data.prune_schedule = prune_schedule;
} }
if verify_schedule.is_some() { data.verify_schedule = verify_schedule; }
let mut verify_schedule_changed = false;
if verify_schedule.is_some() {
verify_schedule_changed = data.verify_schedule != verify_schedule;
data.verify_schedule = verify_schedule;
}
if keep_last.is_some() { data.keep_last = keep_last; } if keep_last.is_some() { data.keep_last = keep_last; }
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; } if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
@ -332,12 +344,20 @@ pub fn update_datastore(
datastore::save_config(&config)?; datastore::save_config(&config)?;
// we want to reset the statefile, to avoid an immediate sync in some cases // we want to reset the statefiles, to avoid an immediate action in some cases
// (e.g. going from monthly to weekly in the second week of the month) // (e.g. going from monthly to weekly in the second week of the month)
if gc_schedule_changed {
crate::config::jobstate::create_state_file("garbage_collection", &name)?;
}
if prune_schedule_changed { if prune_schedule_changed {
crate::config::jobstate::create_state_file("prune", &name)?; crate::config::jobstate::create_state_file("prune", &name)?;
} }
if verify_schedule_changed {
crate::config::jobstate::create_state_file("verify", &name)?;
}
Ok(()) Ok(())
} }
@ -377,7 +397,10 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
datastore::save_config(&config)?; datastore::save_config(&config)?;
crate::config::jobstate::remove_state_file("prune", &name)?; // ignore errors
let _ = crate::config::jobstate::remove_state_file("prune", &name);
let _ = crate::config::jobstate::remove_state_file("garbage_collection", &name);
let _ = crate::config::jobstate::remove_state_file("verify", &name);
Ok(()) Ok(())
} }

View File

@ -198,7 +198,10 @@ impl DataBlob {
Ok(data) Ok(data)
} else if magic == &COMPRESSED_BLOB_MAGIC_1_0 { } else if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
let data_start = std::mem::size_of::<DataBlobHeader>(); let data_start = std::mem::size_of::<DataBlobHeader>();
let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?; let mut reader = &self.raw_data[data_start..];
let data = zstd::stream::decode_all(&mut reader)?;
// zstd::block::decompress is abou 10% slower
// let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
if let Some(digest) = digest { if let Some(digest) = digest {
Self::verify_digest(&data, None, digest)?; Self::verify_digest(&data, None, digest)?;
} }
@ -268,6 +271,12 @@ impl DataBlob {
} }
} }
/// Returns if chunk is encrypted
pub fn is_encrypted(&self) -> bool {
let magic = self.magic();
magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0
}
/// Verify digest and data length for unencrypted chunks. /// Verify digest and data length for unencrypted chunks.
/// ///
/// To do that, we need to decompress data first. Please note that /// To do that, we need to decompress data first. Please note that

View File

@ -202,40 +202,14 @@ async fn schedule_tasks() -> Result<(), Error> {
Ok(()) Ok(())
} }
fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> {
let list = proxmox_backup::server::read_task_list()?;
let mut last: Option<&server::UPID> = None;
for entry in list.iter() {
if entry.upid.worker_type == worker_type {
if let Some(ref id) = entry.upid.worker_id {
if id == worker_id {
match last {
Some(ref upid) => {
if upid.starttime < entry.upid.starttime {
last = Some(&entry.upid)
}
}
None => {
last = Some(&entry.upid)
}
}
}
}
}
}
Ok(last.cloned())
}
async fn schedule_datastore_garbage_collection() { async fn schedule_datastore_garbage_collection() {
use proxmox_backup::backup::DataStore; use proxmox_backup::backup::DataStore;
use proxmox_backup::server::{UPID, WorkerTask}; use proxmox_backup::server::{UPID, WorkerTask};
use proxmox_backup::config::datastore::{self, DataStoreConfig}; use proxmox_backup::config::{
jobstate::{self, Job},
datastore::{self, DataStoreConfig}
};
use proxmox_backup::tools::systemd::time::{ use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event}; parse_calendar_event, compute_next_event};
@ -291,11 +265,10 @@ async fn schedule_datastore_garbage_collection() {
} }
} }
} else { } else {
match lookup_last_worker(worker_type, &store) { match jobstate::last_run_time(worker_type, &store) {
Ok(Some(upid)) => upid.starttime, Ok(time) => time,
Ok(None) => 0,
Err(err) => { Err(err) => {
eprintln!("lookup_last_job_start failed: {}", err); eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue; continue;
} }
} }
@ -314,6 +287,11 @@ async fn schedule_datastore_garbage_collection() {
if next > now { continue; } if next > now { continue; }
let mut job = match Job::new(worker_type, &store) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
let store2 = store.clone(); let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread( if let Err(err) = WorkerTask::new_thread(
@ -322,9 +300,20 @@ async fn schedule_datastore_garbage_collection() {
Userid::backup_userid().clone(), Userid::backup_userid().clone(),
false, false,
move |worker| { move |worker| {
job.start(&worker.upid().to_string())?;
worker.log(format!("starting garbage collection on store {}", store)); worker.log(format!("starting garbage collection on store {}", store));
worker.log(format!("task triggered by schedule '{}'", event_str)); worker.log(format!("task triggered by schedule '{}'", event_str));
datastore.garbage_collection(&worker)
let result = datastore.garbage_collection(&worker);
let status = worker.create_state(&result);
if let Err(err) = job.finish(status) {
eprintln!("could not finish job state for {}: {}", worker_type, err);
}
result
} }
) { ) {
eprintln!("unable to start garbage collection on store {} - {}", store2, err); eprintln!("unable to start garbage collection on store {} - {}", store2, err);
@ -434,7 +423,7 @@ async fn schedule_datastore_prune() {
job.start(&worker.upid().to_string())?; job.start(&worker.upid().to_string())?;
let result = { let result = try_block!({
worker.log(format!("Starting datastore prune on store \"{}\"", store)); worker.log(format!("Starting datastore prune on store \"{}\"", store));
worker.log(format!("task triggered by schedule '{}'", event_str)); worker.log(format!("task triggered by schedule '{}'", event_str));
@ -463,7 +452,7 @@ async fn schedule_datastore_prune() {
} }
} }
Ok(()) Ok(())
}; });
let status = worker.create_state(&result); let status = worker.create_state(&result);
@ -482,7 +471,10 @@ async fn schedule_datastore_prune() {
async fn schedule_datastore_verification() { async fn schedule_datastore_verification() {
use proxmox_backup::backup::{DataStore, verify_all_backups}; use proxmox_backup::backup::{DataStore, verify_all_backups};
use proxmox_backup::server::{WorkerTask}; use proxmox_backup::server::{WorkerTask};
use proxmox_backup::config::datastore::{self, DataStoreConfig}; use proxmox_backup::config::{
jobstate::{self, Job},
datastore::{self, DataStoreConfig}
};
use proxmox_backup::tools::systemd::time::{ use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event}; parse_calendar_event, compute_next_event};
@ -526,16 +518,10 @@ async fn schedule_datastore_verification() {
let worker_type = "verify"; let worker_type = "verify";
let last = match lookup_last_worker(worker_type, &store) { let last = match jobstate::last_run_time(worker_type, &store) {
Ok(Some(upid)) => { Ok(time) => time,
if proxmox_backup::server::worker_is_active_local(&upid) {
continue;
}
upid.starttime
}
Ok(None) => 0,
Err(err) => { Err(err) => {
eprintln!("lookup_last_job_start failed: {}", err); eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue; continue;
} }
}; };
@ -553,6 +539,11 @@ async fn schedule_datastore_verification() {
if next > now { continue; } if next > now { continue; }
let mut job = match Job::new(worker_type, &store) {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
let worker_id = store.clone(); let worker_id = store.clone();
let store2 = store.clone(); let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread( if let Err(err) = WorkerTask::new_thread(
@ -561,18 +552,29 @@ async fn schedule_datastore_verification() {
Userid::backup_userid().clone(), Userid::backup_userid().clone(),
false, false,
move |worker| { move |worker| {
job.start(&worker.upid().to_string())?;
worker.log(format!("starting verification on store {}", store2)); worker.log(format!("starting verification on store {}", store2));
worker.log(format!("task triggered by schedule '{}'", event_str)); worker.log(format!("task triggered by schedule '{}'", event_str));
if let Ok(failed_dirs) = verify_all_backups(datastore, worker.clone()) { let result = try_block!({
let failed_dirs = verify_all_backups(datastore, worker.clone())?;
if failed_dirs.len() > 0 { if failed_dirs.len() > 0 {
worker.log("Failed to verify following snapshots:"); worker.log("Failed to verify following snapshots:");
for dir in failed_dirs { for dir in failed_dirs {
worker.log(format!("\t{}", dir)); worker.log(format!("\t{}", dir));
} }
bail!("verification failed - please check the log for details"); Err(format_err!("verification failed - please check the log for details"))
} else {
Ok(())
} }
});
let status = worker.create_state(&result);
if let Err(err) = job.finish(status) {
eprintln!("could not finish job state for {}: {}", worker_type, err);
} }
Ok(())
result
}, },
) { ) {
eprintln!("unable to start verification on store {} - {}", store, err); eprintln!("unable to start verification on store {} - {}", store, err);

View File

@ -21,6 +21,8 @@ use proxmox_backup::backup::{
load_and_decrypt_key, load_and_decrypt_key,
CryptConfig, CryptConfig,
KeyDerivationConfig, KeyDerivationConfig,
DataBlob,
DataChunkBuilder,
}; };
use proxmox_backup::client::*; use proxmox_backup::client::*;
@ -60,6 +62,9 @@ struct Speed {
"aes256_gcm": { "aes256_gcm": {
type: Speed, type: Speed,
}, },
"verify": {
type: Speed,
},
}, },
)] )]
#[derive(Copy, Clone, Serialize)] #[derive(Copy, Clone, Serialize)]
@ -75,9 +80,10 @@ struct BenchmarkResult {
decompress: Speed, decompress: Speed,
/// AES256 GCM encryption speed /// AES256 GCM encryption speed
aes256_gcm: Speed, aes256_gcm: Speed,
/// Verify speed
verify: Speed,
} }
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult { static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
tls: Speed { tls: Speed {
speed: None, speed: None,
@ -85,19 +91,23 @@ static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
}, },
sha256: Speed { sha256: Speed {
speed: None, speed: None,
top: 1_000_000.0 * 2120.0, // AMD Ryzen 7 2700X top: 1_000_000.0 * 2022.0, // AMD Ryzen 7 2700X
}, },
compress: Speed { compress: Speed {
speed: None, speed: None,
top: 1_000_000.0 * 2158.0, // AMD Ryzen 7 2700X top: 1_000_000.0 * 752.0, // AMD Ryzen 7 2700X
}, },
decompress: Speed { decompress: Speed {
speed: None, speed: None,
top: 1_000_000.0 * 8062.0, // AMD Ryzen 7 2700X top: 1_000_000.0 * 1198.0, // AMD Ryzen 7 2700X
}, },
aes256_gcm: Speed { aes256_gcm: Speed {
speed: None, speed: None,
top: 1_000_000.0 * 3803.0, // AMD Ryzen 7 2700X top: 1_000_000.0 * 3645.0, // AMD Ryzen 7 2700X
},
verify: Speed {
speed: None,
top: 1_000_000.0 * 758.0, // AMD Ryzen 7 2700X
}, },
}; };
@ -194,7 +204,10 @@ fn render_result(
.column(ColumnConfig::new("decompress") .column(ColumnConfig::new("decompress")
.header("ZStd level 1 decompression speed") .header("ZStd level 1 decompression speed")
.right_align(false).renderer(render_speed)) .right_align(false).renderer(render_speed))
.column(ColumnConfig::new("aes256_gcm") .column(ColumnConfig::new("verify")
.header("Chunk verification speed")
.right_align(false).renderer(render_speed))
.column(ColumnConfig::new("aes256_gcm")
.header("AES256 GCM encryption speed") .header("AES256 GCM encryption speed")
.right_align(false).renderer(render_speed)); .right_align(false).renderer(render_speed));
@ -257,7 +270,17 @@ fn test_crypt_speed(
let crypt_config = CryptConfig::new(testkey)?; let crypt_config = CryptConfig::new(testkey)?;
let random_data = proxmox::sys::linux::random_data(1024*1024)?; //let random_data = proxmox::sys::linux::random_data(1024*1024)?;
let mut random_data = vec![];
// generate pseudo random byte sequence
for i in 0..256*1024 {
for j in 0..4 {
let byte = ((i >> (j<<3))&0xff) as u8;
random_data.push(byte);
}
}
assert_eq!(random_data.len(), 1024*1024);
let start_time = std::time::Instant::now(); let start_time = std::time::Instant::now();
@ -322,5 +345,23 @@ fn test_crypt_speed(
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000_.0); eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000_.0);
let start_time = std::time::Instant::now();
let (chunk, digest) = DataChunkBuilder::new(&random_data)
.compress(true)
.build()?;
let mut bytes = 0;
loop {
chunk.verify_unencrypted(random_data.len(), &digest)?;
bytes += random_data.len();
if start_time.elapsed().as_micros() > 1_000_000 { break; }
}
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
benchmark_result.verify.speed = Some(speed);
eprintln!("Verify speed: {:.2} MB/s", speed/1_000_000_.0);
Ok(()) Ok(())
} }

View File

@ -6,10 +6,12 @@ use std::convert::TryFrom;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::collections::{HashSet, HashMap}; use std::collections::{HashSet, HashMap};
use std::io::{Seek, SeekFrom}; use std::io::{Seek, SeekFrom};
use std::time::SystemTime;
use std::sync::atomic::{AtomicUsize, Ordering};
use proxmox::api::error::{StatusCode, HttpError}; use proxmox::api::error::{StatusCode, HttpError};
use crate::{ use crate::{
tools::compute_file_csum, tools::{ParallelHandler, compute_file_csum},
server::WorkerTask, server::WorkerTask,
backup::*, backup::*,
api2::types::*, api2::types::*,
@ -22,7 +24,7 @@ use crate::{
// Todo: correctly lock backup groups // Todo: correctly lock backup groups
async fn pull_index_chunks<I: IndexFile>( async fn pull_index_chunks<I: IndexFile>(
_worker: &WorkerTask, worker: &WorkerTask,
chunk_reader: RemoteChunkReader, chunk_reader: RemoteChunkReader,
target: Arc<DataStore>, target: Arc<DataStore>,
index: I, index: I,
@ -31,6 +33,8 @@ async fn pull_index_chunks<I: IndexFile>(
use futures::stream::{self, StreamExt, TryStreamExt}; use futures::stream::{self, StreamExt, TryStreamExt};
let start_time = SystemTime::now();
let stream = stream::iter( let stream = stream::iter(
(0..index.index_count()) (0..index.index_count())
.map(|pos| index.chunk_info(pos).unwrap()) .map(|pos| index.chunk_info(pos).unwrap())
@ -46,11 +50,28 @@ async fn pull_index_chunks<I: IndexFile>(
}) })
); );
let target2 = target.clone();
let verify_pool = ParallelHandler::new(
"sync chunk writer", 4,
move |(chunk, digest, size): (DataBlob, [u8;32], u64)| {
// println!("verify and write {}", proxmox::tools::digest_to_hex(&digest));
chunk.verify_unencrypted(size as usize, &digest)?;
target2.insert_chunk(&chunk, &digest)?;
Ok(())
}
);
let verify_and_write_channel = verify_pool.channel();
let bytes = Arc::new(AtomicUsize::new(0));
stream stream
.map(|info| { .map(|info| {
let target = Arc::clone(&target); let target = Arc::clone(&target);
let chunk_reader = chunk_reader.clone(); let chunk_reader = chunk_reader.clone();
let bytes = Arc::clone(&bytes);
let verify_and_write_channel = verify_and_write_channel.clone();
Ok::<_, Error>(async move { Ok::<_, Error>(async move {
let chunk_exists = crate::tools::runtime::block_in_place(|| target.cond_touch_chunk(&info.digest, false))?; let chunk_exists = crate::tools::runtime::block_in_place(|| target.cond_touch_chunk(&info.digest, false))?;
@ -60,18 +81,30 @@ async fn pull_index_chunks<I: IndexFile>(
} }
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest))); //worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?; let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
let raw_size = chunk.raw_size() as usize;
crate::tools::runtime::block_in_place(|| { // decode, verify and write in a separate threads to maximize throughput
chunk.verify_unencrypted(info.size() as usize, &info.digest)?; crate::tools::runtime::block_in_place(|| verify_and_write_channel.send((chunk, info.digest, info.size())))?;
target.insert_chunk(&chunk, &info.digest)?;
Ok(()) bytes.fetch_add(raw_size, Ordering::SeqCst);
})
}) Ok(())
})
}) })
.try_buffer_unordered(20) .try_buffer_unordered(20)
.try_for_each(|_res| futures::future::ok(())) .try_for_each(|_res| futures::future::ok(()))
.await?; .await?;
drop(verify_and_write_channel);
verify_pool.complete()?;
let elapsed = start_time.elapsed()?.as_secs_f64();
let bytes = bytes.load(Ordering::SeqCst);
worker.log(format!("downloaded {} bytes ({} MiB/s)", bytes, (bytes as f64)/(1024.0*1024.0*elapsed)));
Ok(()) Ok(())
} }

View File

@ -17,17 +17,3 @@ pub trait BackupCatalogWriter {
fn add_fifo(&mut self, name: &CStr) -> Result<(), Error>; fn add_fifo(&mut self, name: &CStr) -> Result<(), Error>;
fn add_socket(&mut self, name: &CStr) -> Result<(), Error>; fn add_socket(&mut self, name: &CStr) -> Result<(), Error>;
} }
pub struct DummyCatalogWriter();
impl BackupCatalogWriter for DummyCatalogWriter {
fn start_directory(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
fn end_directory(&mut self) -> Result<(), Error> { Ok(()) }
fn add_file(&mut self, _name: &CStr, _size: u64, _mtime: u64) -> Result<(), Error> { Ok(()) }
fn add_symlink(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
fn add_hardlink(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
fn add_block_device(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
fn add_char_device(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
fn add_fifo(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
fn add_socket(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
}

View File

@ -33,6 +33,9 @@ pub mod statistics;
pub mod systemd; pub mod systemd;
pub mod nom; pub mod nom;
mod parallel_handler;
pub use parallel_handler::*;
mod wrapped_reader_stream; mod wrapped_reader_stream;
pub use wrapped_reader_stream::*; pub use wrapped_reader_stream::*;

View File

@ -0,0 +1,133 @@
use std::thread::{JoinHandle};
use std::sync::{Arc, Mutex};
use crossbeam_channel::{bounded, Sender};
use anyhow::{format_err, Error};
/// A handle to send data toö the worker thread (implements clone)
pub struct SendHandle<I> {
input: Sender<I>,
abort: Arc<Mutex<Option<String>>>,
}
/// A thread pool which run the supplied closure
///
/// The send command sends data to the worker threads. If one handler
/// returns an error, we mark the channel as failed and it is no
/// longer possible to send data.
///
/// When done, the 'complete()' method needs to be called to check for
/// outstanding errors.
pub struct ParallelHandler<I> {
handles: Vec<JoinHandle<()>>,
name: String,
input: SendHandle<I>,
}
impl <I: Send + Sync +'static> SendHandle<I> {
/// Returns the first error happened, if any
pub fn check_abort(&self) -> Result<(), Error> {
let guard = self.abort.lock().unwrap();
if let Some(err_msg) = &*guard {
return Err(format_err!("{}", err_msg));
}
Ok(())
}
/// Send data to the worker threads
pub fn send(&self, input: I) -> Result<(), Error> {
self.check_abort()?;
self.input.send(input)?;
Ok(())
}
}
impl <I> Clone for SendHandle<I> {
fn clone(&self) -> Self {
Self { input: self.input.clone(), abort: self.abort.clone() }
}
}
impl <I: Send + Sync + 'static> ParallelHandler<I> {
/// Create a new thread pool, each thread processing incoming data
/// with 'handler_fn'.
pub fn new<F>(
name: &str,
threads: usize,
handler_fn: F,
) -> Self
where F: Fn(I) -> Result<(), Error> + Send + Sync + Clone + 'static,
{
let mut handles = Vec::new();
let (input_tx, input_rx) = bounded::<I>(threads);
let abort = Arc::new(Mutex::new(None));
for i in 0..threads {
let input_rx = input_rx.clone();
let abort = abort.clone();
let handler_fn = handler_fn.clone();
handles.push(
std::thread::Builder::new()
.name(format!("{} ({})", name, i))
.spawn(move || {
loop {
let data = match input_rx.recv() {
Ok(data) => data,
Err(_) => return,
};
match (handler_fn)(data) {
Ok(()) => {},
Err(err) => {
let mut guard = abort.lock().unwrap();
if guard.is_none() {
*guard = Some(err.to_string());
}
}
}
}
})
.unwrap()
);
}
Self {
handles,
name: name.to_string(),
input: SendHandle {
input: input_tx,
abort,
},
}
}
/// Returns a cloneable channel to send data to the worker threads
pub fn channel(&self) -> SendHandle<I> {
self.input.clone()
}
/// Send data to the worker threads
pub fn send(&self, input: I) -> Result<(), Error> {
self.input.send(input)?;
Ok(())
}
/// Wait for worker threads to complete and check for errors
pub fn complete(self) -> Result<(), Error> {
self.input.check_abort()?;
drop(self.input);
let mut msg = Vec::new();
for (i, handle) in self.handles.into_iter().enumerate() {
if let Err(panic) = handle.join() {
match panic.downcast::<&str>() {
Ok(panic_msg) => msg.push(format!("thread {} ({}) paniced: {}", self.name, i, panic_msg)),
Err(_) => msg.push(format!("thread {} ({}) paniced", self.name, i)),
}
}
}
if msg.is_empty() {
return Ok(());
}
Err(format_err!("{}", msg.join("\n")))
}
}

View File

@ -3,6 +3,22 @@ const proxmoxOnlineHelpInfo = {
"link": "/docs/index.html", "link": "/docs/index.html",
"title": "Proxmox Backup Server Documentation Index" "title": "Proxmox Backup Server Documentation Index"
}, },
"datastore-intro": {
"link": "/docs/administration-guide.html#datastore-intro",
"title": ":term:`DataStore`"
},
"user-mgmt": {
"link": "/docs/administration-guide.html#user-mgmt",
"title": "User Management"
},
"user-acl": {
"link": "/docs/administration-guide.html#user-acl",
"title": "Access Control"
},
"backup-remote": {
"link": "/docs/administration-guide.html#backup-remote",
"title": ":term:`Remote`"
},
"syncjobs": { "syncjobs": {
"link": "/docs/administration-guide.html#syncjobs", "link": "/docs/administration-guide.html#syncjobs",
"title": "Sync Jobs" "title": "Sync Jobs"

View File

@ -3,6 +3,8 @@ Ext.define('PBS.window.ACLEdit', {
alias: 'widget.pbsACLAdd', alias: 'widget.pbsACLAdd',
mixins: ['Proxmox.Mixin.CBind'], mixins: ['Proxmox.Mixin.CBind'],
onlineHelp: 'user_acl',
url: '/access/acl', url: '/access/acl',
method: 'PUT', method: 'PUT',
isAdd: true, isAdd: true,

View File

@ -3,6 +3,9 @@ Ext.define('PBS.DataStoreEdit', {
alias: 'widget.pbsDataStoreEdit', alias: 'widget.pbsDataStoreEdit',
mixins: ['Proxmox.Mixin.CBind'], mixins: ['Proxmox.Mixin.CBind'],
onlineHelp: 'datastore_intro',
subject: gettext('Datastore'), subject: gettext('Datastore'),
isAdd: true, isAdd: true,

View File

@ -3,6 +3,8 @@ Ext.define('PBS.window.RemoteEdit', {
alias: 'widget.pbsRemoteEdit', alias: 'widget.pbsRemoteEdit',
mixins: ['Proxmox.Mixin.CBind'], mixins: ['Proxmox.Mixin.CBind'],
onlineHelp: 'backup_remote',
userid: undefined, userid: undefined,
isAdd: true, isAdd: true,

View File

@ -3,6 +3,8 @@ Ext.define('PBS.window.UserEdit', {
alias: 'widget.pbsUserEdit', alias: 'widget.pbsUserEdit',
mixins: ['Proxmox.Mixin.CBind'], mixins: ['Proxmox.Mixin.CBind'],
onlineHelp: 'user_mgmt',
userid: undefined, userid: undefined,
isAdd: true, isAdd: true,