Compare commits
28 Commits
Author | SHA1 | Date | |
---|---|---|---|
cfe01b2e6a | |||
b19b032be3 | |||
5441708634 | |||
3c9b370255 | |||
510544770b | |||
e8293841c2 | |||
46114bf28e | |||
0d7e61f06f | |||
fd6a54dfbc | |||
1ea5722b8f | |||
bc8fadf494 | |||
a76934ad33 | |||
d7a122a026 | |||
6c25588e63 | |||
17a1f579d0 | |||
998db63933 | |||
c0fa14d94a | |||
6fd129844d | |||
baae780c99 | |||
09a1da25ed | |||
298c6aaef6 | |||
a329324139 | |||
a83e2ffeab | |||
5d7449a121 | |||
ebbe4958c6 | |||
73b2cc4977 | |||
7ecfde8150 | |||
796480a38b |
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.8.17"
|
||||
version = "0.8.21"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -61,6 +61,7 @@ walkdir = "2"
|
||||
xdg = "2.2"
|
||||
zstd = { version = "0.4", features = [ "bindgen" ] }
|
||||
nom = "5.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
38
debian/changelog
vendored
38
debian/changelog
vendored
@ -1,3 +1,41 @@
|
||||
rust-proxmox-backup (0.8.21-1) unstable; urgency=medium
|
||||
|
||||
* depend on crossbeam-channel
|
||||
|
||||
* speedup sync jobs (allow up to 4 worker threads)
|
||||
|
||||
* improve docs
|
||||
|
||||
* use jobstate mechanism for verify/garbage_collection schedules
|
||||
|
||||
* proxy: fix error handling in prune scheduling
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 25 Sep 2020 13:20:19 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.20-1) unstable; urgency=medium
|
||||
|
||||
* improve sync speed
|
||||
|
||||
* benchmark: use compressable data to get more realistic result
|
||||
|
||||
* docs: add onlineHelp to some panels
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 24 Sep 2020 13:15:45 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.19-1) unstable; urgency=medium
|
||||
|
||||
* src/api2/reader.rs: use std::fs::read instead of tokio::fs::read
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 22 Sep 2020 13:30:27 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.18-1) unstable; urgency=medium
|
||||
|
||||
* src/client/pull.rs: allow up to 20 concurrent download streams
|
||||
|
||||
* docs: add version and date to HTML index
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 22 Sep 2020 12:39:26 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.17-1) unstable; urgency=medium
|
||||
|
||||
* src/client/pull.rs: open temporary manifest with truncate(true)
|
||||
|
1
debian/control
vendored
1
debian/control
vendored
@ -12,6 +12,7 @@ Build-Depends: debhelper (>= 11),
|
||||
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||
librust-bytes-0.5+default-dev,
|
||||
librust-crc32fast-1+default-dev,
|
||||
librust-crossbeam-channel-0.4+default-dev,
|
||||
librust-endian-trait-0.6+arrays-dev,
|
||||
librust-endian-trait-0.6+default-dev,
|
||||
librust-futures-0.3+default-dev,
|
||||
|
@ -74,7 +74,7 @@ onlinehelpinfo:
|
||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||
|
||||
.PHONY: html
|
||||
html: ${GENERATED_SYNOPSIS}
|
||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||
cp custom.css $(BUILDDIR)/html/_static/
|
||||
|
11
docs/_templates/index-sidebar.html
vendored
Normal file
11
docs/_templates/index-sidebar.html
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
<h3>Navigation</h3>
|
||||
{{ toctree(includehidden=theme_sidebar_includehidden, collapse=True, titles_only=True) }}
|
||||
{% if theme_extra_nav_links %}
|
||||
<hr />
|
||||
<h3>Links</h3>
|
||||
<ul>
|
||||
{% for text, uri in theme_extra_nav_links.items() %}
|
||||
<li class="toctree-l1"><a href="{{ uri }}">{{ text }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
7
docs/_templates/sidebar-header.html
vendored
Normal file
7
docs/_templates/sidebar-header.html
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
<p class="logo">
|
||||
<a href="index.html">
|
||||
<img class="logo" src="_static/proxmox-logo.svg" alt="Logo">
|
||||
</a>
|
||||
</p>
|
||||
<h1 class="logo logo-name"><a href="index.html">Proxmox Backup</a></h1>
|
||||
<hr style="width:100%;">
|
@ -127,7 +127,7 @@ Backup Server Management
|
||||
The command line tool to configure and manage the backup server is called
|
||||
:command:`proxmox-backup-manager`.
|
||||
|
||||
|
||||
.. _datastore_intro:
|
||||
|
||||
:term:`DataStore`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
@ -364,7 +364,7 @@ directories will store the chunked data after a backup operation has been execut
|
||||
276489 drwxr-xr-x 3 backup backup 4.0K Jul 8 12:35 ..
|
||||
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
|
||||
|
||||
|
||||
.. _user_mgmt:
|
||||
|
||||
User Management
|
||||
~~~~~~~~~~~~~~~
|
||||
@ -448,6 +448,8 @@ Or completely remove the user with:
|
||||
# proxmox-backup-manager user remove john@pbs
|
||||
|
||||
|
||||
.. _user_acl:
|
||||
|
||||
Access Control
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
@ -631,6 +633,8 @@ You can also configure DNS settings, from the **DNS** section
|
||||
of **Configuration** or by using the ``dns`` subcommand of
|
||||
``proxmox-backup-manager``.
|
||||
|
||||
.. _backup_remote:
|
||||
|
||||
:term:`Remote`
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
|
45
docs/conf.py
45
docs/conf.py
@ -97,12 +97,10 @@ language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#
|
||||
# today = ''
|
||||
#
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#
|
||||
# today_fmt = '%B %d, %Y'
|
||||
today_fmt = '%A, %d %B %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
@ -164,18 +162,19 @@ html_theme = 'alabaster'
|
||||
#
|
||||
html_theme_options = {
|
||||
'fixed_sidebar': True,
|
||||
#'sidebar_includehidden': False,
|
||||
'sidebar_collapse': False, # FIXME: documented, but does not works?!
|
||||
'show_relbar_bottom': True, # FIXME: documented, but does not works?!
|
||||
'sidebar_includehidden': False,
|
||||
'sidebar_collapse': False,
|
||||
'globaltoc_collapse': False,
|
||||
'show_relbar_bottom': True,
|
||||
'show_powered_by': False,
|
||||
|
||||
'logo': 'proxmox-logo.svg',
|
||||
'logo_name': True, # show project name below logo
|
||||
#'logo_text_align': 'center',
|
||||
#'description': 'Fast, Secure & Efficient.',
|
||||
'extra_nav_links': {
|
||||
'Proxmox Homepage': 'https://proxmox.com',
|
||||
'PDF': 'proxmox-backup.pdf',
|
||||
},
|
||||
|
||||
'sidebar_width': '300px',
|
||||
'page_width': '1280px',
|
||||
'sidebar_width': '320px',
|
||||
'page_width': '1320px',
|
||||
# font styles
|
||||
'head_font_family': 'Lato, sans-serif',
|
||||
'caption_font_family': 'Lato, sans-serif',
|
||||
@ -183,6 +182,24 @@ html_theme_options = {
|
||||
'font_family': 'Open Sans, sans-serif',
|
||||
}
|
||||
|
||||
# Alabaster theme recommends setting this fixed.
|
||||
# If you switch theme this needs to removed, probably.
|
||||
html_sidebars = {
|
||||
'**': [
|
||||
'sidebar-header.html',
|
||||
'searchbox.html',
|
||||
'navigation.html',
|
||||
'relations.html',
|
||||
],
|
||||
|
||||
'index': [
|
||||
'sidebar-header.html',
|
||||
'searchbox.html',
|
||||
'index-sidebar.html',
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
# html_theme_path = []
|
||||
|
||||
@ -228,10 +245,6 @@ html_static_path = ['_static']
|
||||
#
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#
|
||||
|
@ -13,3 +13,40 @@ div.body img {
|
||||
pre {
|
||||
padding: 5px 10px;
|
||||
}
|
||||
|
||||
li a.current {
|
||||
font-weight: bold;
|
||||
border-bottom: 1px solid #000;
|
||||
}
|
||||
ul li.toctree-l1 {
|
||||
margin-top: 0.5em;
|
||||
}
|
||||
ul li.toctree-l1 > a {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
div.sphinxsidebar form.search {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3 {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h1.logo-name {
|
||||
display: none;
|
||||
}
|
||||
@media screen and (max-width: 875px) {
|
||||
div.sphinxsidebar p.logo {
|
||||
display: initial;
|
||||
}
|
||||
div.sphinxsidebar h1.logo-name {
|
||||
display: block;
|
||||
}
|
||||
div.sphinxsidebar span {
|
||||
color: #AAA;
|
||||
}
|
||||
ul li.toctree-l1 > a {
|
||||
color: #FFF;
|
||||
}
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
Welcome to the Proxmox Backup documentation!
|
||||
============================================
|
||||
|
||||
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
| Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
| Version |version| -- |today|
|
||||
|
||||
Permission is granted to copy, distribute and/or modify this document under the
|
||||
terms of the GNU Free Documentation License, Version 1.3 or any later version
|
||||
@ -45,9 +45,10 @@ in the section entitled "GNU Free Documentation License".
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
:caption: Developer Appendix
|
||||
|
||||
todos.rst
|
||||
|
||||
|
||||
* :ref:`genindex`
|
||||
.. # * :ref:`genindex`
|
||||
|
@ -132,6 +132,8 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::create_state_file("prune", &datastore.name)?;
|
||||
crate::config::jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||
crate::config::jobstate::create_state_file("verify", &datastore.name)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -313,13 +315,23 @@ pub fn update_datastore(
|
||||
}
|
||||
}
|
||||
|
||||
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
|
||||
let mut gc_schedule_changed = false;
|
||||
if gc_schedule.is_some() {
|
||||
gc_schedule_changed = data.gc_schedule != gc_schedule;
|
||||
data.gc_schedule = gc_schedule;
|
||||
}
|
||||
|
||||
let mut prune_schedule_changed = false;
|
||||
if prune_schedule.is_some() {
|
||||
prune_schedule_changed = true;
|
||||
prune_schedule_changed = data.prune_schedule != prune_schedule;
|
||||
data.prune_schedule = prune_schedule;
|
||||
}
|
||||
if verify_schedule.is_some() { data.verify_schedule = verify_schedule; }
|
||||
|
||||
let mut verify_schedule_changed = false;
|
||||
if verify_schedule.is_some() {
|
||||
verify_schedule_changed = data.verify_schedule != verify_schedule;
|
||||
data.verify_schedule = verify_schedule;
|
||||
}
|
||||
|
||||
if keep_last.is_some() { data.keep_last = keep_last; }
|
||||
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
|
||||
@ -332,12 +344,20 @@ pub fn update_datastore(
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
// we want to reset the statefile, to avoid an immediate sync in some cases
|
||||
// we want to reset the statefiles, to avoid an immediate action in some cases
|
||||
// (e.g. going from monthly to weekly in the second week of the month)
|
||||
if gc_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("garbage_collection", &name)?;
|
||||
}
|
||||
|
||||
if prune_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("prune", &name)?;
|
||||
}
|
||||
|
||||
if verify_schedule_changed {
|
||||
crate::config::jobstate::create_state_file("verify", &name)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -377,7 +397,10 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::remove_state_file("prune", &name)?;
|
||||
// ignore errors
|
||||
let _ = crate::config::jobstate::remove_state_file("prune", &name);
|
||||
let _ = crate::config::jobstate::remove_state_file("garbage_collection", &name);
|
||||
let _ = crate::config::jobstate::remove_state_file("verify", &name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -229,8 +229,7 @@ fn download_chunk(
|
||||
|
||||
env.debug(format!("download chunk {:?}", path));
|
||||
|
||||
let data = tokio::fs::read(path)
|
||||
.await
|
||||
let data = tools::runtime::block_in_place(|| std::fs::read(path))
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err))?;
|
||||
|
||||
let body = Body::from(data);
|
||||
|
@ -198,7 +198,10 @@ impl DataBlob {
|
||||
Ok(data)
|
||||
} else if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data_start = std::mem::size_of::<DataBlobHeader>();
|
||||
let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
|
||||
let mut reader = &self.raw_data[data_start..];
|
||||
let data = zstd::stream::decode_all(&mut reader)?;
|
||||
// zstd::block::decompress is abou 10% slower
|
||||
// let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
|
||||
if let Some(digest) = digest {
|
||||
Self::verify_digest(&data, None, digest)?;
|
||||
}
|
||||
@ -268,6 +271,12 @@ impl DataBlob {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns if chunk is encrypted
|
||||
pub fn is_encrypted(&self) -> bool {
|
||||
let magic = self.magic();
|
||||
magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0
|
||||
}
|
||||
|
||||
/// Verify digest and data length for unencrypted chunks.
|
||||
///
|
||||
/// To do that, we need to decompress data first. Please note that
|
||||
|
@ -202,40 +202,14 @@ async fn schedule_tasks() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> {
|
||||
|
||||
let list = proxmox_backup::server::read_task_list()?;
|
||||
|
||||
let mut last: Option<&server::UPID> = None;
|
||||
|
||||
for entry in list.iter() {
|
||||
if entry.upid.worker_type == worker_type {
|
||||
if let Some(ref id) = entry.upid.worker_id {
|
||||
if id == worker_id {
|
||||
match last {
|
||||
Some(ref upid) => {
|
||||
if upid.starttime < entry.upid.starttime {
|
||||
last = Some(&entry.upid)
|
||||
}
|
||||
}
|
||||
None => {
|
||||
last = Some(&entry.upid)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(last.cloned())
|
||||
}
|
||||
|
||||
|
||||
async fn schedule_datastore_garbage_collection() {
|
||||
|
||||
use proxmox_backup::backup::DataStore;
|
||||
use proxmox_backup::server::{UPID, WorkerTask};
|
||||
use proxmox_backup::config::datastore::{self, DataStoreConfig};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
@ -291,11 +265,10 @@ async fn schedule_datastore_garbage_collection() {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match lookup_last_worker(worker_type, &store) {
|
||||
Ok(Some(upid)) => upid.starttime,
|
||||
Ok(None) => 0,
|
||||
match jobstate::last_run_time(worker_type, &store) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -314,6 +287,11 @@ async fn schedule_datastore_garbage_collection() {
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let mut job = match Job::new(worker_type, &store) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let store2 = store.clone();
|
||||
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
@ -322,9 +300,20 @@ async fn schedule_datastore_garbage_collection() {
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
datastore.garbage_collection(&worker)
|
||||
|
||||
let result = datastore.garbage_collection(&worker);
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
) {
|
||||
eprintln!("unable to start garbage collection on store {} - {}", store2, err);
|
||||
@ -434,7 +423,7 @@ async fn schedule_datastore_prune() {
|
||||
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
let result = {
|
||||
let result = try_block!({
|
||||
|
||||
worker.log(format!("Starting datastore prune on store \"{}\"", store));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
@ -463,7 +452,7 @@ async fn schedule_datastore_prune() {
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
});
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
@ -482,7 +471,10 @@ async fn schedule_datastore_prune() {
|
||||
async fn schedule_datastore_verification() {
|
||||
use proxmox_backup::backup::{DataStore, verify_all_backups};
|
||||
use proxmox_backup::server::{WorkerTask};
|
||||
use proxmox_backup::config::datastore::{self, DataStoreConfig};
|
||||
use proxmox_backup::config::{
|
||||
jobstate::{self, Job},
|
||||
datastore::{self, DataStoreConfig}
|
||||
};
|
||||
use proxmox_backup::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
@ -526,16 +518,10 @@ async fn schedule_datastore_verification() {
|
||||
|
||||
let worker_type = "verify";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &store) {
|
||||
Ok(Some(upid)) => {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
continue;
|
||||
}
|
||||
upid.starttime
|
||||
}
|
||||
Ok(None) => 0,
|
||||
let last = match jobstate::last_run_time(worker_type, &store) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@ -553,6 +539,11 @@ async fn schedule_datastore_verification() {
|
||||
|
||||
if next > now { continue; }
|
||||
|
||||
let mut job = match Job::new(worker_type, &store) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let worker_id = store.clone();
|
||||
let store2 = store.clone();
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
@ -561,18 +552,29 @@ async fn schedule_datastore_verification() {
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
worker.log(format!("starting verification on store {}", store2));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
if let Ok(failed_dirs) = verify_all_backups(datastore, worker.clone()) {
|
||||
let result = try_block!({
|
||||
let failed_dirs = verify_all_backups(datastore, worker.clone())?;
|
||||
if failed_dirs.len() > 0 {
|
||||
worker.log("Failed to verify following snapshots:");
|
||||
for dir in failed_dirs {
|
||||
worker.log(format!("\t{}", dir));
|
||||
}
|
||||
bail!("verification failed - please check the log for details");
|
||||
}
|
||||
}
|
||||
Err(format_err!("verification failed - please check the log for details"))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
});
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
if let Err(err) = job.finish(status) {
|
||||
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||
}
|
||||
|
||||
result
|
||||
},
|
||||
) {
|
||||
eprintln!("unable to start verification on store {} - {}", store, err);
|
||||
|
@ -21,6 +21,8 @@ use proxmox_backup::backup::{
|
||||
load_and_decrypt_key,
|
||||
CryptConfig,
|
||||
KeyDerivationConfig,
|
||||
DataBlob,
|
||||
DataChunkBuilder,
|
||||
};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
@ -60,6 +62,9 @@ struct Speed {
|
||||
"aes256_gcm": {
|
||||
type: Speed,
|
||||
},
|
||||
"verify": {
|
||||
type: Speed,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Copy, Clone, Serialize)]
|
||||
@ -75,9 +80,10 @@ struct BenchmarkResult {
|
||||
decompress: Speed,
|
||||
/// AES256 GCM encryption speed
|
||||
aes256_gcm: Speed,
|
||||
/// Verify speed
|
||||
verify: Speed,
|
||||
}
|
||||
|
||||
|
||||
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
||||
tls: Speed {
|
||||
speed: None,
|
||||
@ -85,19 +91,23 @@ static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
||||
},
|
||||
sha256: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 2120.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 2022.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
compress: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 2158.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 752.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
decompress: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 8062.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 1198.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
aes256_gcm: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 3803.0, // AMD Ryzen 7 2700X
|
||||
top: 1_000_000.0 * 3645.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
verify: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 758.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
};
|
||||
|
||||
@ -194,6 +204,9 @@ fn render_result(
|
||||
.column(ColumnConfig::new("decompress")
|
||||
.header("ZStd level 1 decompression speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("verify")
|
||||
.header("Chunk verification speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("aes256_gcm")
|
||||
.header("AES256 GCM encryption speed")
|
||||
.right_align(false).renderer(render_speed));
|
||||
@ -257,7 +270,17 @@ fn test_crypt_speed(
|
||||
|
||||
let crypt_config = CryptConfig::new(testkey)?;
|
||||
|
||||
let random_data = proxmox::sys::linux::random_data(1024*1024)?;
|
||||
//let random_data = proxmox::sys::linux::random_data(1024*1024)?;
|
||||
let mut random_data = vec![];
|
||||
// generate pseudo random byte sequence
|
||||
for i in 0..256*1024 {
|
||||
for j in 0..4 {
|
||||
let byte = ((i >> (j<<3))&0xff) as u8;
|
||||
random_data.push(byte);
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(random_data.len(), 1024*1024);
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
@ -322,5 +345,23 @@ fn test_crypt_speed(
|
||||
|
||||
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let (chunk, digest) = DataChunkBuilder::new(&random_data)
|
||||
.compress(true)
|
||||
.build()?;
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
chunk.verify_unencrypted(random_data.len(), &digest)?;
|
||||
bytes += random_data.len();
|
||||
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.verify.speed = Some(speed);
|
||||
|
||||
eprintln!("Verify speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -3,13 +3,15 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::json;
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::time::SystemTime;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use proxmox::api::error::{StatusCode, HttpError};
|
||||
use crate::{
|
||||
tools::compute_file_csum,
|
||||
tools::{ParallelHandler, compute_file_csum},
|
||||
server::WorkerTask,
|
||||
backup::*,
|
||||
api2::types::*,
|
||||
@ -22,27 +24,86 @@ use crate::{
|
||||
// Todo: correctly lock backup groups
|
||||
|
||||
async fn pull_index_chunks<I: IndexFile>(
|
||||
_worker: &WorkerTask,
|
||||
chunk_reader: &mut RemoteChunkReader,
|
||||
worker: &WorkerTask,
|
||||
chunk_reader: RemoteChunkReader,
|
||||
target: Arc<DataStore>,
|
||||
index: I,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
use futures::stream::{self, StreamExt, TryStreamExt};
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let chunk_exists = target.cond_touch_chunk(&info.digest, false)?;
|
||||
let start_time = SystemTime::now();
|
||||
|
||||
let stream = stream::iter(
|
||||
(0..index.index_count())
|
||||
.map(|pos| index.chunk_info(pos).unwrap())
|
||||
.filter(|info| {
|
||||
let mut guard = downloaded_chunks.lock().unwrap();
|
||||
let done = guard.contains(&info.digest);
|
||||
if !done {
|
||||
// Note: We mark a chunk as downloaded before its actually downloaded
|
||||
// to avoid duplicate downloads.
|
||||
guard.insert(info.digest);
|
||||
}
|
||||
!done
|
||||
})
|
||||
);
|
||||
|
||||
let target2 = target.clone();
|
||||
let verify_pool = ParallelHandler::new(
|
||||
"sync chunk writer", 4,
|
||||
move |(chunk, digest, size): (DataBlob, [u8;32], u64)| {
|
||||
// println!("verify and write {}", proxmox::tools::digest_to_hex(&digest));
|
||||
chunk.verify_unencrypted(size as usize, &digest)?;
|
||||
target2.insert_chunk(&chunk, &digest)?;
|
||||
Ok(())
|
||||
}
|
||||
);
|
||||
|
||||
let verify_and_write_channel = verify_pool.channel();
|
||||
|
||||
let bytes = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
stream
|
||||
.map(|info| {
|
||||
|
||||
let target = Arc::clone(&target);
|
||||
let chunk_reader = chunk_reader.clone();
|
||||
let bytes = Arc::clone(&bytes);
|
||||
let verify_and_write_channel = verify_and_write_channel.clone();
|
||||
|
||||
Ok::<_, Error>(async move {
|
||||
let chunk_exists = crate::tools::runtime::block_in_place(|| target.cond_touch_chunk(&info.digest, false))?;
|
||||
if chunk_exists {
|
||||
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
continue;
|
||||
return Ok::<_, Error>(());
|
||||
}
|
||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
|
||||
let raw_size = chunk.raw_size() as usize;
|
||||
|
||||
chunk.verify_unencrypted(info.size() as usize, &info.digest)?;
|
||||
// decode, verify and write in a separate threads to maximize throughput
|
||||
crate::tools::runtime::block_in_place(|| verify_and_write_channel.send((chunk, info.digest, info.size())))?;
|
||||
|
||||
target.insert_chunk(&chunk, &info.digest)?;
|
||||
}
|
||||
bytes.fetch_add(raw_size, Ordering::SeqCst);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.try_buffer_unordered(20)
|
||||
.try_for_each(|_res| futures::future::ok(()))
|
||||
.await?;
|
||||
|
||||
drop(verify_and_write_channel);
|
||||
|
||||
verify_pool.complete()?;
|
||||
|
||||
let elapsed = start_time.elapsed()?.as_secs_f64();
|
||||
|
||||
let bytes = bytes.load(Ordering::SeqCst);
|
||||
|
||||
worker.log(format!("downloaded {} bytes ({} MiB/s)", bytes, (bytes as f64)/(1024.0*1024.0*elapsed)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -89,6 +150,7 @@ async fn pull_single_archive(
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
archive_info: &FileInfo,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let archive_name = &archive_info.filename;
|
||||
@ -115,7 +177,7 @@ async fn pull_single_archive(
|
||||
let (csum, size) = index.compute_csum();
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
pull_index_chunks(worker, chunk_reader.clone(), tgt_store.clone(), index, downloaded_chunks).await?;
|
||||
}
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
@ -123,7 +185,7 @@ async fn pull_single_archive(
|
||||
let (csum, size) = index.compute_csum();
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
|
||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||
pull_index_chunks(worker, chunk_reader.clone(), tgt_store.clone(), index, downloaded_chunks).await?;
|
||||
}
|
||||
ArchiveType::Blob => {
|
||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||
@ -169,6 +231,7 @@ async fn pull_snapshot(
|
||||
reader: Arc<BackupReader>,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut manifest_name = tgt_store.base_path();
|
||||
@ -278,6 +341,7 @@ async fn pull_snapshot(
|
||||
tgt_store.clone(),
|
||||
snapshot,
|
||||
&item,
|
||||
downloaded_chunks.clone(),
|
||||
).await?;
|
||||
}
|
||||
|
||||
@ -300,6 +364,7 @@ pub async fn pull_snapshot_from(
|
||||
reader: Arc<BackupReader>,
|
||||
tgt_store: Arc<DataStore>,
|
||||
snapshot: &BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&snapshot)?;
|
||||
@ -307,7 +372,7 @@ pub async fn pull_snapshot_from(
|
||||
if is_new {
|
||||
worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
|
||||
|
||||
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await {
|
||||
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks).await {
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot, true) {
|
||||
worker.log(format!("cleanup error - {}", cleanup_err));
|
||||
}
|
||||
@ -316,7 +381,7 @@ pub async fn pull_snapshot_from(
|
||||
worker.log(format!("sync snapshot {:?} done", snapshot.relative_path()));
|
||||
} else {
|
||||
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks).await?;
|
||||
worker.log(format!("re-sync snapshot {:?} done", snapshot.relative_path()));
|
||||
}
|
||||
|
||||
@ -351,6 +416,9 @@ pub async fn pull_group(
|
||||
|
||||
let mut remote_snapshots = std::collections::HashSet::new();
|
||||
|
||||
// start with 16384 chunks (up to 65GB)
|
||||
let downloaded_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*64)));
|
||||
|
||||
for item in list {
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
|
||||
@ -384,7 +452,7 @@ pub async fn pull_group(
|
||||
true,
|
||||
).await?;
|
||||
|
||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks.clone()).await?;
|
||||
}
|
||||
|
||||
if delete {
|
||||
|
@ -15,7 +15,7 @@ pub struct RemoteChunkReader {
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
cache_hint: HashMap<[u8; 32], usize>,
|
||||
cache_hint: Arc<HashMap<[u8; 32], usize>>,
|
||||
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
|
||||
}
|
||||
|
||||
@ -33,7 +33,7 @@ impl RemoteChunkReader {
|
||||
client,
|
||||
crypt_config,
|
||||
crypt_mode,
|
||||
cache_hint,
|
||||
cache_hint: Arc::new(cache_hint),
|
||||
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
@ -17,17 +17,3 @@ pub trait BackupCatalogWriter {
|
||||
fn add_fifo(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
fn add_socket(&mut self, name: &CStr) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
pub struct DummyCatalogWriter();
|
||||
|
||||
impl BackupCatalogWriter for DummyCatalogWriter {
|
||||
fn start_directory(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn end_directory(&mut self) -> Result<(), Error> { Ok(()) }
|
||||
fn add_file(&mut self, _name: &CStr, _size: u64, _mtime: u64) -> Result<(), Error> { Ok(()) }
|
||||
fn add_symlink(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn add_hardlink(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn add_block_device(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn add_char_device(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn add_fifo(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
fn add_socket(&mut self, _name: &CStr) -> Result<(), Error> { Ok(()) }
|
||||
}
|
||||
|
@ -33,6 +33,9 @@ pub mod statistics;
|
||||
pub mod systemd;
|
||||
pub mod nom;
|
||||
|
||||
mod parallel_handler;
|
||||
pub use parallel_handler::*;
|
||||
|
||||
mod wrapped_reader_stream;
|
||||
pub use wrapped_reader_stream::*;
|
||||
|
||||
|
133
src/tools/parallel_handler.rs
Normal file
133
src/tools/parallel_handler.rs
Normal file
@ -0,0 +1,133 @@
|
||||
use std::thread::{JoinHandle};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use crossbeam_channel::{bounded, Sender};
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
/// A handle to send data toö the worker thread (implements clone)
|
||||
pub struct SendHandle<I> {
|
||||
input: Sender<I>,
|
||||
abort: Arc<Mutex<Option<String>>>,
|
||||
}
|
||||
|
||||
/// A thread pool which run the supplied closure
|
||||
///
|
||||
/// The send command sends data to the worker threads. If one handler
|
||||
/// returns an error, we mark the channel as failed and it is no
|
||||
/// longer possible to send data.
|
||||
///
|
||||
/// When done, the 'complete()' method needs to be called to check for
|
||||
/// outstanding errors.
|
||||
pub struct ParallelHandler<I> {
|
||||
handles: Vec<JoinHandle<()>>,
|
||||
name: String,
|
||||
input: SendHandle<I>,
|
||||
}
|
||||
|
||||
impl <I: Send + Sync +'static> SendHandle<I> {
|
||||
|
||||
/// Returns the first error happened, if any
|
||||
pub fn check_abort(&self) -> Result<(), Error> {
|
||||
let guard = self.abort.lock().unwrap();
|
||||
if let Some(err_msg) = &*guard {
|
||||
return Err(format_err!("{}", err_msg));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Send data to the worker threads
|
||||
pub fn send(&self, input: I) -> Result<(), Error> {
|
||||
self.check_abort()?;
|
||||
self.input.send(input)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl <I> Clone for SendHandle<I> {
|
||||
fn clone(&self) -> Self {
|
||||
Self { input: self.input.clone(), abort: self.abort.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
impl <I: Send + Sync + 'static> ParallelHandler<I> {
|
||||
|
||||
/// Create a new thread pool, each thread processing incoming data
|
||||
/// with 'handler_fn'.
|
||||
pub fn new<F>(
|
||||
name: &str,
|
||||
threads: usize,
|
||||
handler_fn: F,
|
||||
) -> Self
|
||||
where F: Fn(I) -> Result<(), Error> + Send + Sync + Clone + 'static,
|
||||
{
|
||||
let mut handles = Vec::new();
|
||||
let (input_tx, input_rx) = bounded::<I>(threads);
|
||||
|
||||
let abort = Arc::new(Mutex::new(None));
|
||||
|
||||
for i in 0..threads {
|
||||
let input_rx = input_rx.clone();
|
||||
let abort = abort.clone();
|
||||
let handler_fn = handler_fn.clone();
|
||||
handles.push(
|
||||
std::thread::Builder::new()
|
||||
.name(format!("{} ({})", name, i))
|
||||
.spawn(move || {
|
||||
loop {
|
||||
let data = match input_rx.recv() {
|
||||
Ok(data) => data,
|
||||
Err(_) => return,
|
||||
};
|
||||
match (handler_fn)(data) {
|
||||
Ok(()) => {},
|
||||
Err(err) => {
|
||||
let mut guard = abort.lock().unwrap();
|
||||
if guard.is_none() {
|
||||
*guard = Some(err.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
Self {
|
||||
handles,
|
||||
name: name.to_string(),
|
||||
input: SendHandle {
|
||||
input: input_tx,
|
||||
abort,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a cloneable channel to send data to the worker threads
|
||||
pub fn channel(&self) -> SendHandle<I> {
|
||||
self.input.clone()
|
||||
}
|
||||
|
||||
/// Send data to the worker threads
|
||||
pub fn send(&self, input: I) -> Result<(), Error> {
|
||||
self.input.send(input)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Wait for worker threads to complete and check for errors
|
||||
pub fn complete(self) -> Result<(), Error> {
|
||||
self.input.check_abort()?;
|
||||
drop(self.input);
|
||||
let mut msg = Vec::new();
|
||||
for (i, handle) in self.handles.into_iter().enumerate() {
|
||||
if let Err(panic) = handle.join() {
|
||||
match panic.downcast::<&str>() {
|
||||
Ok(panic_msg) => msg.push(format!("thread {} ({}) paniced: {}", self.name, i, panic_msg)),
|
||||
Err(_) => msg.push(format!("thread {} ({}) paniced", self.name, i)),
|
||||
}
|
||||
}
|
||||
}
|
||||
if msg.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Err(format_err!("{}", msg.join("\n")))
|
||||
}
|
||||
}
|
@ -3,6 +3,22 @@ const proxmoxOnlineHelpInfo = {
|
||||
"link": "/docs/index.html",
|
||||
"title": "Proxmox Backup Server Documentation Index"
|
||||
},
|
||||
"datastore-intro": {
|
||||
"link": "/docs/administration-guide.html#datastore-intro",
|
||||
"title": ":term:`DataStore`"
|
||||
},
|
||||
"user-mgmt": {
|
||||
"link": "/docs/administration-guide.html#user-mgmt",
|
||||
"title": "User Management"
|
||||
},
|
||||
"user-acl": {
|
||||
"link": "/docs/administration-guide.html#user-acl",
|
||||
"title": "Access Control"
|
||||
},
|
||||
"backup-remote": {
|
||||
"link": "/docs/administration-guide.html#backup-remote",
|
||||
"title": ":term:`Remote`"
|
||||
},
|
||||
"syncjobs": {
|
||||
"link": "/docs/administration-guide.html#syncjobs",
|
||||
"title": "Sync Jobs"
|
||||
|
@ -3,6 +3,8 @@ Ext.define('PBS.window.ACLEdit', {
|
||||
alias: 'widget.pbsACLAdd',
|
||||
mixins: ['Proxmox.Mixin.CBind'],
|
||||
|
||||
onlineHelp: 'user_acl',
|
||||
|
||||
url: '/access/acl',
|
||||
method: 'PUT',
|
||||
isAdd: true,
|
||||
|
@ -3,6 +3,9 @@ Ext.define('PBS.DataStoreEdit', {
|
||||
alias: 'widget.pbsDataStoreEdit',
|
||||
mixins: ['Proxmox.Mixin.CBind'],
|
||||
|
||||
|
||||
onlineHelp: 'datastore_intro',
|
||||
|
||||
subject: gettext('Datastore'),
|
||||
isAdd: true,
|
||||
|
||||
|
@ -3,6 +3,8 @@ Ext.define('PBS.window.RemoteEdit', {
|
||||
alias: 'widget.pbsRemoteEdit',
|
||||
mixins: ['Proxmox.Mixin.CBind'],
|
||||
|
||||
onlineHelp: 'backup_remote',
|
||||
|
||||
userid: undefined,
|
||||
|
||||
isAdd: true,
|
||||
|
@ -3,6 +3,8 @@ Ext.define('PBS.window.UserEdit', {
|
||||
alias: 'widget.pbsUserEdit',
|
||||
mixins: ['Proxmox.Mixin.CBind'],
|
||||
|
||||
onlineHelp: 'user_mgmt',
|
||||
|
||||
userid: undefined,
|
||||
|
||||
isAdd: true,
|
||||
|
Reference in New Issue
Block a user