Compare commits

...

18 Commits

Author SHA1 Message Date
7397f4a390 bump version to 0.8.14-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-02 10:41:42 +02:00
8317873c06 gc: improve percentage done logs 2020-09-02 10:04:18 +02:00
deef63699e verify: also fail on server shutdown 2020-09-02 09:50:17 +02:00
c6e07769e9 ui: datastore content: eslint fixes
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-02 09:30:57 +02:00
423df9b1f4 ui: datastore: show more granular verify state
Allows to differ the following situations:
* some snapshots in a group where not verified
* how many snapshots failed to verify in a group
* all snapshots verified but last verification task was over 30 days
  ago

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-02 09:30:57 +02:00
c879e5af11 ui: datastore: mark row invalid if last snapshot verification failed
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-02 09:12:05 +02:00
63d9aca96f verify: log progress 2020-09-02 07:43:28 +02:00
c3b1da9e41 datastore content: search: set emptytext to searched columns
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-01 18:30:54 +02:00
46388e6aef datastore content: reduce count column width
Using 75 as width we can display up to 9999999 which would allow
displaying over 19 years of snapshots done each minute, so quite
enough for the common cases.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-01 18:28:14 +02:00
484d439a7c datastore content: reload after verify
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-09-01 18:27:30 +02:00
ab6615134c d/postinst: always fixup termproxy user id and for all users
Anyone with a PAM account and Sys.Console access could have started a
termproxy session, adapt the regex.

Always test for broken entries and run the sed expression to make sure
eventually all occurences of the broken syntax are fixed.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-09-01 18:02:11 +02:00
b1149ebb36 ui: DataStoreContent.js: fix wrong comma
should be semicolon

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-01 15:33:55 +02:00
1bfdae7933 ui: DataStoreContent: improve encrypted column
do not count files where we do not have any information

such files exist in the backup dir, but are not in the manifest
so we cannot use those files for determining if the backups are
encrypted or not

this marks encrypted/signed backups with unencrypted client.log.blob files as
encrypted/signed (respectively) instead of 'Mixed'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-09-01 15:33:55 +02:00
4f09d31085 src/backup/verify.rs: use global hashes (instead of per group)
This makes verify more predictable.
2020-09-01 13:33:04 +02:00
58d73ddb1d src/backup/data_blob.rs: avoid useless &, data is already a reference 2020-09-01 12:56:25 +02:00
6b809ff59b src/backup/verify.rs: use separate thread to load data 2020-09-01 12:56:25 +02:00
afe08d2755 debian/control: fix versions 2020-09-01 10:19:40 +02:00
a7bc5d4eaf depend on proxmox 0.3.4 2020-08-28 06:32:33 +02:00
10 changed files with 336 additions and 132 deletions

View File

@ -1,6 +1,6 @@
[package] [package]
name = "proxmox-backup" name = "proxmox-backup"
version = "0.8.13" version = "0.8.14"
authors = ["Dietmar Maurer <dietmar@proxmox.com>"] authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
edition = "2018" edition = "2018"
license = "AGPL-3" license = "AGPL-3"
@ -39,7 +39,7 @@ pam-sys = "0.5"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-utils = "0.1.0" pin-utils = "0.1.0"
pathpatterns = "0.1.2" pathpatterns = "0.1.2"
proxmox = { version = "0.3.3", features = [ "sortable-macro", "api-macro", "websocket" ] } proxmox = { version = "0.3.4", features = [ "sortable-macro", "api-macro", "websocket" ] }
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] } #proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] } #proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
proxmox-fuse = "0.1.0" proxmox-fuse = "0.1.0"

14
debian/changelog vendored
View File

@ -1,3 +1,17 @@
rust-proxmox-backup (0.8.14-1) unstable; urgency=medium
* verify speed up: use separate IO thread, use datastore-wide cache (instead
of per group)
* ui: datastore content: improve encrypted column
* ui: datastore content: show more granular verify state, especially for
backup group rows
* verify: log progress in percent
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Sep 2020 09:36:47 +0200
rust-proxmox-backup (0.8.13-1) unstable; urgency=medium rust-proxmox-backup (0.8.13-1) unstable; urgency=medium
* improve and add to documentation * improve and add to documentation

8
debian/control vendored
View File

@ -34,10 +34,10 @@ Build-Depends: debhelper (>= 11),
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~), librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~), librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-utils-0.1+default-dev, librust-pin-utils-0.1+default-dev,
librust-proxmox-0.3+api-macro-dev (>= 0.3.3-~~), librust-proxmox-0.3+api-macro-dev (>= 0.3.4-~~),
librust-proxmox-0.3+default-dev (>= 0.3.3-~~), librust-proxmox-0.3+default-dev (>= 0.3.4-~~),
librust-proxmox-0.3+sortable-macro-dev (>= 0.3.3-~~), librust-proxmox-0.3+sortable-macro-dev (>= 0.3.4-~~),
librust-proxmox-0.3+websocket-dev (>= 0.3.3-~~), librust-proxmox-0.3+websocket-dev (>= 0.3.4-~~),
librust-proxmox-fuse-0.1+default-dev, librust-proxmox-fuse-0.1+default-dev,
librust-pxar-0.6+default-dev, librust-pxar-0.6+default-dev,
librust-pxar-0.6+futures-io-dev, librust-pxar-0.6+futures-io-dev,

9
debian/postinst vendored
View File

@ -15,11 +15,10 @@ case "$1" in
fi fi
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
if test -n "$2"; then # FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
if dpkg --compare-versions "$2" 'le' '0.8.10-1'; then if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
echo "Fixing up termproxy user id in task log..." echo "Fixing up termproxy user id in task log..."
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::root: /:termproxy::root@pam: /' /var/log/proxmox-backup/tasks/active flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active
fi
fi fi
;; ;;

View File

@ -1,6 +1,7 @@
use std::collections::{HashSet, HashMap}; use std::collections::{HashSet, HashMap};
use std::ffi::OsStr; use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt; use std::os::unix::ffi::OsStrExt;
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
@ -512,18 +513,27 @@ pub fn verify(
userid, userid,
to_stdout, to_stdout,
move |worker| { move |worker| {
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
let failed_dirs = if let Some(backup_dir) = backup_dir { let failed_dirs = if let Some(backup_dir) = backup_dir {
let mut verified_chunks = HashSet::with_capacity(1024*16);
let mut corrupt_chunks = HashSet::with_capacity(64);
let mut res = Vec::new(); let mut res = Vec::new();
if !verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)? { if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? {
res.push(backup_dir.to_string()); res.push(backup_dir.to_string());
} }
res res
} else if let Some(backup_group) = backup_group { } else if let Some(backup_group) = backup_group {
verify_backup_group(&datastore, &backup_group, &worker)? let (_count, failed_dirs) = verify_backup_group(
datastore,
&backup_group,
verified_chunks,
corrupt_chunks,
None,
worker.clone(),
)?;
failed_dirs
} else { } else {
verify_all_backups(&datastore, &worker)? verify_all_backups(datastore, worker.clone())?
}; };
if failed_dirs.len() > 0 { if failed_dirs.len() > 0 {
worker.log("Failed to verify following snapshots:"); worker.log("Failed to verify following snapshots:");

View File

@ -295,7 +295,7 @@ impl ChunkStore {
for (entry, percentage) in self.get_chunk_iterator()? { for (entry, percentage) in self.get_chunk_iterator()? {
if last_percentage != percentage { if last_percentage != percentage {
last_percentage = percentage; last_percentage = percentage;
worker.log(format!("{}%, processed {} chunks", percentage, chunk_count)); worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
} }
worker.fail_on_abort()?; worker.fail_on_abort()?;

View File

@ -304,7 +304,7 @@ impl DataBlob {
let digest = match config { let digest = match config {
Some(config) => config.compute_digest(data), Some(config) => config.compute_digest(data),
None => openssl::sha::sha256(&data), None => openssl::sha::sha256(data),
}; };
if &digest != expected_digest { if &digest != expected_digest {
bail!("detected chunk with wrong digest."); bail!("detected chunk with wrong digest.");

View File

@ -430,6 +430,12 @@ impl DataStore {
let image_list = self.list_images()?; let image_list = self.list_images()?;
let image_count = image_list.len();
let mut done = 0;
let mut last_percentage: usize = 0;
for path in image_list { for path in image_list {
worker.fail_on_abort()?; worker.fail_on_abort()?;
@ -444,6 +450,14 @@ impl DataStore {
self.index_mark_used_chunks(index, &path, status, worker)?; self.index_mark_used_chunks(index, &path, status, worker)?;
} }
} }
done += 1;
let percentage = done*100/image_count;
if percentage > last_percentage {
worker.log(format!("percentage done: phase1 {}% ({} of {} index files)",
percentage, done, image_count));
last_percentage = percentage;
}
} }
Ok(()) Ok(())

View File

@ -1,4 +1,7 @@
use std::collections::HashSet; use std::collections::HashSet;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{Ordering, AtomicUsize};
use std::time::Instant;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
@ -6,12 +9,12 @@ use crate::server::WorkerTask;
use crate::api2::types::*; use crate::api2::types::*;
use super::{ use super::{
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile, DataStore, DataBlob, BackupGroup, BackupDir, BackupInfo, IndexFile,
CryptMode, CryptMode,
FileInfo, ArchiveType, archive_type, FileInfo, ArchiveType, archive_type,
}; };
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
let blob = datastore.load_blob(backup_dir, &info.filename)?; let blob = datastore.load_blob(backup_dir, &info.filename)?;
@ -36,48 +39,97 @@ fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -
} }
} }
// We use a separate thread to read/load chunks, so that we can do
// load and verify in parallel to increase performance.
fn chunk_reader_thread(
datastore: Arc<DataStore>,
index: Box<dyn IndexFile + Send>,
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
errors: Arc<AtomicUsize>,
worker: Arc<WorkerTask>,
) -> std::sync::mpsc::Receiver<(DataBlob, [u8;32], u64)> {
let (sender, receiver) = std::sync::mpsc::sync_channel(3); // buffer up to 3 chunks
std::thread::spawn(move|| {
for pos in 0..index.index_count() {
let info = index.chunk_info(pos).unwrap();
let size = info.range.end - info.range.start;
if verified_chunks.lock().unwrap().contains(&info.digest) {
continue; // already verified
}
if corrupt_chunks.lock().unwrap().contains(&info.digest) {
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
worker.log(format!("chunk {} was marked as corrupt", digest_str));
errors.fetch_add(1, Ordering::SeqCst);
continue;
}
match datastore.load_chunk(&info.digest) {
Err(err) => {
corrupt_chunks.lock().unwrap().insert(info.digest);
worker.log(format!("can't verify chunk, load failed - {}", err));
errors.fetch_add(1, Ordering::SeqCst);
continue;
}
Ok(chunk) => {
if sender.send((chunk, info.digest, size)).is_err() {
break; // receiver gone - simply stop
}
}
}
}
});
receiver
}
fn verify_index_chunks( fn verify_index_chunks(
datastore: &DataStore, datastore: Arc<DataStore>,
index: Box<dyn IndexFile>, index: Box<dyn IndexFile + Send>,
verified_chunks: &mut HashSet<[u8;32]>, verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: &mut HashSet<[u8; 32]>, corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
crypt_mode: CryptMode, crypt_mode: CryptMode,
worker: &WorkerTask, worker: Arc<WorkerTask>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut errors = 0; let errors = Arc::new(AtomicUsize::new(0));
for pos in 0..index.index_count() {
let start_time = Instant::now();
let chunk_channel = chunk_reader_thread(
datastore,
index,
verified_chunks.clone(),
corrupt_chunks.clone(),
errors.clone(),
worker.clone(),
);
let mut read_bytes = 0;
let mut decoded_bytes = 0;
loop {
worker.fail_on_abort()?; worker.fail_on_abort()?;
crate::tools::fail_on_shutdown()?;
let info = index.chunk_info(pos).unwrap(); let (chunk, digest, size) = match chunk_channel.recv() {
Ok(tuple) => tuple,
if verified_chunks.contains(&info.digest) { Err(std::sync::mpsc::RecvError) => break,
continue; // already verified
}
if corrupt_chunks.contains(&info.digest) {
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
worker.log(format!("chunk {} was marked as corrupt", digest_str));
errors += 1;
continue;
}
let chunk = match datastore.load_chunk(&info.digest) {
Err(err) => {
corrupt_chunks.insert(info.digest);
worker.log(format!("can't verify chunk, load failed - {}", err));
errors += 1;
continue;
},
Ok(chunk) => chunk,
}; };
read_bytes += chunk.raw_size();
decoded_bytes += size;
let chunk_crypt_mode = match chunk.crypt_mode() { let chunk_crypt_mode = match chunk.crypt_mode() {
Err(err) => { Err(err) => {
corrupt_chunks.insert(info.digest); corrupt_chunks.lock().unwrap().insert(digest);
worker.log(format!("can't verify chunk, unknown CryptMode - {}", err)); worker.log(format!("can't verify chunk, unknown CryptMode - {}", err));
errors += 1; errors.fetch_add(1, Ordering::SeqCst);
continue; continue;
}, },
Ok(mode) => mode, Ok(mode) => mode,
@ -89,21 +141,32 @@ fn verify_index_chunks(
chunk_crypt_mode, chunk_crypt_mode,
crypt_mode crypt_mode
)); ));
errors += 1; errors.fetch_add(1, Ordering::SeqCst);
} }
let size = info.range.end - info.range.start; if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
corrupt_chunks.lock().unwrap().insert(digest);
if let Err(err) = chunk.verify_unencrypted(size as usize, &info.digest) {
corrupt_chunks.insert(info.digest);
worker.log(format!("{}", err)); worker.log(format!("{}", err));
errors += 1; errors.fetch_add(1, Ordering::SeqCst);
} else { } else {
verified_chunks.insert(info.digest); verified_chunks.lock().unwrap().insert(digest);
} }
} }
if errors > 0 { let elapsed = start_time.elapsed().as_secs_f64();
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
let decoded_bytes_mib = (decoded_bytes as f64)/(1024.0*1024.0);
let read_speed = read_bytes_mib/elapsed;
let decode_speed = decoded_bytes_mib/elapsed;
let error_count = errors.load(Ordering::SeqCst);
worker.log(format!(" verified {:.2}/{:.2} Mib in {:.2} seconds, speed {:.2}/{:.2} Mib/s ({} errors)",
read_bytes_mib, decoded_bytes_mib, elapsed, read_speed, decode_speed, error_count));
if errors.load(Ordering::SeqCst) > 0 {
bail!("chunks could not be verified"); bail!("chunks could not be verified");
} }
@ -111,12 +174,12 @@ fn verify_index_chunks(
} }
fn verify_fixed_index( fn verify_fixed_index(
datastore: &DataStore, datastore: Arc<DataStore>,
backup_dir: &BackupDir, backup_dir: &BackupDir,
info: &FileInfo, info: &FileInfo,
verified_chunks: &mut HashSet<[u8;32]>, verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: &mut HashSet<[u8;32]>, corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
worker: &WorkerTask, worker: Arc<WorkerTask>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut path = backup_dir.relative_path(); let mut path = backup_dir.relative_path();
@ -137,12 +200,12 @@ fn verify_fixed_index(
} }
fn verify_dynamic_index( fn verify_dynamic_index(
datastore: &DataStore, datastore: Arc<DataStore>,
backup_dir: &BackupDir, backup_dir: &BackupDir,
info: &FileInfo, info: &FileInfo,
verified_chunks: &mut HashSet<[u8;32]>, verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: &mut HashSet<[u8;32]>, corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
worker: &WorkerTask, worker: Arc<WorkerTask>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut path = backup_dir.relative_path(); let mut path = backup_dir.relative_path();
@ -172,11 +235,11 @@ fn verify_dynamic_index(
/// - Ok(false) if there were verification errors /// - Ok(false) if there were verification errors
/// - Err(_) if task was aborted /// - Err(_) if task was aborted
pub fn verify_backup_dir( pub fn verify_backup_dir(
datastore: &DataStore, datastore: Arc<DataStore>,
backup_dir: &BackupDir, backup_dir: &BackupDir,
verified_chunks: &mut HashSet<[u8;32]>, verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: &mut HashSet<[u8;32]>, corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
worker: &WorkerTask worker: Arc<WorkerTask>
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let mut manifest = match datastore.load_manifest(&backup_dir) { let mut manifest = match datastore.load_manifest(&backup_dir) {
@ -198,27 +261,28 @@ pub fn verify_backup_dir(
match archive_type(&info.filename)? { match archive_type(&info.filename)? {
ArchiveType::FixedIndex => ArchiveType::FixedIndex =>
verify_fixed_index( verify_fixed_index(
&datastore, datastore.clone(),
&backup_dir, &backup_dir,
info, info,
verified_chunks, verified_chunks.clone(),
corrupt_chunks, corrupt_chunks.clone(),
worker worker.clone(),
), ),
ArchiveType::DynamicIndex => ArchiveType::DynamicIndex =>
verify_dynamic_index( verify_dynamic_index(
&datastore, datastore.clone(),
&backup_dir, &backup_dir,
info, info,
verified_chunks, verified_chunks.clone(),
corrupt_chunks, corrupt_chunks.clone(),
worker worker.clone(),
), ),
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info), ArchiveType::Blob => verify_blob(datastore.clone(), &backup_dir, info),
} }
}); });
worker.fail_on_abort()?; worker.fail_on_abort()?;
crate::tools::fail_on_shutdown()?;
if let Err(err) = result { if let Err(err) = result {
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err)); worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
@ -245,32 +309,45 @@ pub fn verify_backup_dir(
/// Errors are logged to the worker log. /// Errors are logged to the worker log.
/// ///
/// Returns /// Returns
/// - Ok(failed_dirs) where failed_dirs had verification errors /// - Ok((count, failed_dirs)) where failed_dirs had verification errors
/// - Err(_) if task was aborted /// - Err(_) if task was aborted
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<Vec<String>, Error> { pub fn verify_backup_group(
datastore: Arc<DataStore>,
group: &BackupGroup,
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
progress: Option<(usize, usize)>, // (done, snapshot_count)
worker: Arc<WorkerTask>,
) -> Result<(usize, Vec<String>), Error> {
let mut errors = Vec::new(); let mut errors = Vec::new();
let mut list = match group.list_backups(&datastore.base_path()) { let mut list = match group.list_backups(&datastore.base_path()) {
Ok(list) => list, Ok(list) => list,
Err(err) => { Err(err) => {
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err)); worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
return Ok(errors); return Ok((0, errors));
} }
}; };
worker.log(format!("verify group {}:{}", datastore.name(), group)); worker.log(format!("verify group {}:{}", datastore.name(), group));
let mut verified_chunks = HashSet::with_capacity(1024*16); // start with 16384 chunks (up to 65GB) let (done, snapshot_count) = progress.unwrap_or((0, list.len()));
let mut corrupt_chunks = HashSet::with_capacity(64); // start with 64 chunks since we assume there are few corrupt ones
let mut count = 0;
BackupInfo::sort_list(&mut list, false); // newest first BackupInfo::sort_list(&mut list, false); // newest first
for info in list { for info in list {
if !verify_backup_dir(datastore, &info.backup_dir, &mut verified_chunks, &mut corrupt_chunks, worker)?{ count += 1;
if !verify_backup_dir(datastore.clone(), &info.backup_dir, verified_chunks.clone(), corrupt_chunks.clone(), worker.clone())?{
errors.push(info.backup_dir.to_string()); errors.push(info.backup_dir.to_string());
} }
if snapshot_count != 0 {
let pos = done + count;
let percentage = ((pos as f64) * 100.0)/(snapshot_count as f64);
worker.log(format!("percentage done: {:.2}% ({} of {} snapshots)", percentage, pos, snapshot_count));
}
} }
Ok(errors) Ok((count, errors))
} }
/// Verify all backups inside a datastore /// Verify all backups inside a datastore
@ -280,7 +357,7 @@ pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &
/// Returns /// Returns
/// - Ok(failed_dirs) where failed_dirs had verification errors /// - Ok(failed_dirs) where failed_dirs had verification errors
/// - Err(_) if task was aborted /// - Err(_) if task was aborted
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<Vec<String>, Error> { pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) -> Result<Vec<String>, Error> {
let mut errors = Vec::new(); let mut errors = Vec::new();
@ -294,11 +371,32 @@ pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<
list.sort_unstable(); list.sort_unstable();
worker.log(format!("verify datastore {}", datastore.name())); let mut snapshot_count = 0;
for group in list.iter() {
snapshot_count += group.list_backups(&datastore.base_path())?.len();
}
// start with 16384 chunks (up to 65GB)
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
// start with 64 chunks since we assume there are few corrupt ones
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
worker.log(format!("verify datastore {} ({} snapshots)", datastore.name(), snapshot_count));
let mut done = 0;
for group in list { for group in list {
let mut group_errors = verify_backup_group(datastore, &group, worker)?; let (count, mut group_errors) = verify_backup_group(
datastore.clone(),
&group,
verified_chunks.clone(),
corrupt_chunks.clone(),
Some((done, snapshot_count)),
worker.clone(),
)?;
errors.append(&mut group_errors); errors.append(&mut group_errors);
done += count;
} }
Ok(errors) Ok(errors)

View File

@ -6,17 +6,16 @@ Ext.define('pbs-data-store-snapshots', {
{ {
name: 'backup-time', name: 'backup-time',
type: 'date', type: 'date',
dateFormat: 'timestamp' dateFormat: 'timestamp',
}, },
'files', 'files',
'owner', 'owner',
'verification', 'verification',
{ name: 'size', type: 'int', allowNull: true, }, { name: 'size', type: 'int', allowNull: true },
{ {
name: 'crypt-mode', name: 'crypt-mode',
type: 'boolean', type: 'boolean',
calculate: function(data) { calculate: function(data) {
let encrypted = 0;
let crypt = { let crypt = {
none: 0, none: 0,
mixed: 0, mixed: 0,
@ -24,25 +23,24 @@ Ext.define('pbs-data-store-snapshots', {
encrypt: 0, encrypt: 0,
count: 0, count: 0,
}; };
let signed = 0;
data.files.forEach(file => { data.files.forEach(file => {
if (file.filename === 'index.json.blob') return; // is never encrypted if (file.filename === 'index.json.blob') return; // is never encrypted
let mode = PBS.Utils.cryptmap.indexOf(file['crypt-mode']); let mode = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
if (mode !== -1) { if (mode !== -1) {
crypt[file['crypt-mode']]++; crypt[file['crypt-mode']]++;
crypt.count++;
} }
crypt.count++;
}); });
return PBS.Utils.calculateCryptMode(crypt); return PBS.Utils.calculateCryptMode(crypt);
} },
}, },
{ {
name: 'matchesFilter', name: 'matchesFilter',
type: 'boolean', type: 'boolean',
defaultValue: true, defaultValue: true,
}, },
] ],
}); });
Ext.define('PBS.DataStoreContent', { Ext.define('PBS.DataStoreContent', {
@ -70,7 +68,7 @@ Ext.define('PBS.DataStoreContent', {
view.getStore().setSorters([ view.getStore().setSorters([
'backup-group', 'backup-group',
'text', 'text',
'backup-time' 'backup-time',
]); ]);
Proxmox.Utils.monStoreErrors(view, this.store); Proxmox.Utils.monStoreErrors(view, this.store);
this.reload(); // initial load this.reload(); // initial load
@ -88,7 +86,7 @@ Ext.define('PBS.DataStoreContent', {
this.store.setProxy({ this.store.setProxy({
type: 'proxmox', type: 'proxmox',
timeout: 300*1000, // 5 minutes, we should make that api call faster timeout: 300*1000, // 5 minutes, we should make that api call faster
url: url url: url,
}); });
this.store.load(); this.store.load();
@ -124,7 +122,7 @@ Ext.define('PBS.DataStoreContent', {
expanded: false, expanded: false,
backup_type: item.data["backup-type"], backup_type: item.data["backup-type"],
backup_id: item.data["backup-id"], backup_id: item.data["backup-id"],
children: [] children: [],
}; };
} }
@ -163,7 +161,7 @@ Ext.define('PBS.DataStoreContent', {
} }
return false; return false;
}, },
after: () => {}, after: Ext.emptyFn,
}); });
for (const item of records) { for (const item of records) {
@ -181,7 +179,7 @@ Ext.define('PBS.DataStoreContent', {
data.children = []; data.children = [];
for (const file of data.files) { for (const file of data.files) {
file.text = file.filename, file.text = file.filename;
file['crypt-mode'] = PBS.Utils.cryptmap.indexOf(file['crypt-mode']); file['crypt-mode'] = PBS.Utils.cryptmap.indexOf(file['crypt-mode']);
file.leaf = true; file.leaf = true;
file.matchesFilter = true; file.matchesFilter = true;
@ -192,6 +190,7 @@ Ext.define('PBS.DataStoreContent', {
children.push(data); children.push(data);
} }
let nowSeconds = Date.now() / 1000;
let children = []; let children = [];
for (const [name, group] of Object.entries(groups)) { for (const [name, group] of Object.entries(groups)) {
let last_backup = 0; let last_backup = 0;
@ -201,7 +200,13 @@ Ext.define('PBS.DataStoreContent', {
'sign-only': 0, 'sign-only': 0,
encrypt: 0, encrypt: 0,
}; };
for (const item of group.children) { let verify = {
outdated: 0,
none: 0,
failed: 0,
ok: 0,
};
for (let item of group.children) {
crypt[PBS.Utils.cryptmap[item['crypt-mode']]]++; crypt[PBS.Utils.cryptmap[item['crypt-mode']]]++;
if (item["backup-time"] > last_backup && item.size !== null) { if (item["backup-time"] > last_backup && item.size !== null) {
last_backup = item["backup-time"]; last_backup = item["backup-time"];
@ -209,13 +214,24 @@ Ext.define('PBS.DataStoreContent', {
group.files = item.files; group.files = item.files;
group.size = item.size; group.size = item.size;
group.owner = item.owner; group.owner = item.owner;
verify.lastFailed = item.verification && item.verification.state !== 'ok';
} }
if (item.verification && if (!item.verification) {
(!group.verification || group.verification.state !== 'failed')) { verify.none++;
group.verification = item.verification; } else {
if (item.verification.state === 'ok') {
verify.ok++;
} else {
verify.failed++;
}
let task = Proxmox.Utils.parse_task_upid(item.verification.upid);
item.verification.lastTime = task.starttime;
if (nowSeconds - task.starttime > 30 * 24 * 60 * 60) {
verify.outdated++;
}
} }
} }
group.verification = verify;
group.count = group.children.length; group.count = group.children.length;
group.matchesFilter = true; group.matchesFilter = true;
crypt.count = group.count; crypt.count = group.count;
@ -226,7 +242,7 @@ Ext.define('PBS.DataStoreContent', {
view.setRootNode({ view.setRootNode({
expanded: true, expanded: true,
children: children children: children,
}); });
if (selected !== undefined) { if (selected !== undefined) {
@ -246,13 +262,13 @@ Ext.define('PBS.DataStoreContent', {
Proxmox.Utils.setErrorMask(view, false); Proxmox.Utils.setErrorMask(view, false);
if (view.getStore().getFilters().length > 0) { if (view.getStore().getFilters().length > 0) {
let searchBox = me.lookup("searchbox"); let searchBox = me.lookup("searchbox");
let searchvalue = searchBox.getValue();; let searchvalue = searchBox.getValue();
me.search(searchBox, searchvalue); me.search(searchBox, searchvalue);
} }
}, },
onPrune: function(view, rI, cI, item, e, rec) { onPrune: function(view, rI, cI, item, e, rec) {
var view = this.getView(); view = this.getView();
if (!(rec && rec.data)) return; if (!(rec && rec.data)) return;
let data = rec.data; let data = rec.data;
@ -270,7 +286,8 @@ Ext.define('PBS.DataStoreContent', {
}, },
onVerify: function(view, rI, cI, item, e, rec) { onVerify: function(view, rI, cI, item, e, rec) {
var view = this.getView(); let me = this;
view = me.getView();
if (!view.datastore) return; if (!view.datastore) return;
@ -302,6 +319,7 @@ Ext.define('PBS.DataStoreContent', {
success: function(response, options) { success: function(response, options) {
Ext.create('Proxmox.window.TaskViewer', { Ext.create('Proxmox.window.TaskViewer', {
upid: response.result.data, upid: response.result.data,
taskDone: () => me.reload(),
}).show(); }).show();
}, },
}); });
@ -309,7 +327,7 @@ Ext.define('PBS.DataStoreContent', {
onForget: function(view, rI, cI, item, e, rec) { onForget: function(view, rI, cI, item, e, rec) {
let me = this; let me = this;
var view = this.getView(); view = this.getView();
if (!(rec && rec.data)) return; if (!(rec && rec.data)) return;
let data = rec.data; let data = rec.data;
@ -364,7 +382,8 @@ Ext.define('PBS.DataStoreContent', {
let atag = document.createElement('a'); let atag = document.createElement('a');
params['file-name'] = file; params['file-name'] = file;
atag.download = filename; atag.download = filename;
let url = new URL(`/api2/json/admin/datastore/${view.datastore}/download-decoded`, window.location.origin); let url = new URL(`/api2/json/admin/datastore/${view.datastore}/download-decoded`,
window.location.origin);
for (const [key, value] of Object.entries(params)) { for (const [key, value] of Object.entries(params)) {
url.searchParams.append(key, value); url.searchParams.append(key, value);
} }
@ -427,7 +446,7 @@ Ext.define('PBS.DataStoreContent', {
store.beginUpdate(); store.beginUpdate();
store.getRoot().cascadeBy({ store.getRoot().cascadeBy({
before: function(item) { before: function(item) {
if(me.filter(item, value)) { if (me.filter(item, value)) {
item.set('matchesFilter', true); item.set('matchesFilter', true);
if (item.parentNode && item.parentNode.id !== 'root') { if (item.parentNode && item.parentNode.id !== 'root') {
item.parentNode.childmatches = true; item.parentNode.childmatches = true;
@ -459,12 +478,22 @@ Ext.define('PBS.DataStoreContent', {
}, },
}, },
viewConfig: {
getRowClass: function(record, index) {
let verify = record.get('verification');
if (verify && verify.lastFailed) {
return 'proxmox-invalid-row';
}
return null;
},
},
columns: [ columns: [
{ {
xtype: 'treecolumn', xtype: 'treecolumn',
header: gettext("Backup Group"), header: gettext("Backup Group"),
dataIndex: 'text', dataIndex: 'text',
flex: 1 flex: 1,
}, },
{ {
header: gettext('Actions'), header: gettext('Actions'),
@ -511,9 +540,9 @@ Ext.define('PBS.DataStoreContent', {
data.filename && data.filename &&
data.filename.endsWith('pxar.didx') && data.filename.endsWith('pxar.didx') &&
data['crypt-mode'] < 3); data['crypt-mode'] < 3);
} },
}, },
] ],
}, },
{ {
xtype: 'datecolumn', xtype: 'datecolumn',
@ -521,7 +550,7 @@ Ext.define('PBS.DataStoreContent', {
sortable: true, sortable: true,
dataIndex: 'backup-time', dataIndex: 'backup-time',
format: 'Y-m-d H:i:s', format: 'Y-m-d H:i:s',
width: 150 width: 150,
}, },
{ {
header: gettext("Size"), header: gettext("Size"),
@ -543,6 +572,8 @@ Ext.define('PBS.DataStoreContent', {
format: '0', format: '0',
header: gettext("Count"), header: gettext("Count"),
sortable: true, sortable: true,
width: 75,
align: 'right',
dataIndex: 'count', dataIndex: 'count',
}, },
{ {
@ -565,29 +596,66 @@ Ext.define('PBS.DataStoreContent', {
if (iconCls) { if (iconCls) {
iconTxt = `<i class="fa fa-fw fa-${iconCls}"></i> `; iconTxt = `<i class="fa fa-fw fa-${iconCls}"></i> `;
} }
return (iconTxt + PBS.Utils.cryptText[v]) || Proxmox.Utils.unknownText return (iconTxt + PBS.Utils.cryptText[v]) || Proxmox.Utils.unknownText;
} },
}, },
{ {
header: gettext('Verify State'), header: gettext('Verify State'),
sortable: true, sortable: true,
dataIndex: 'verification', dataIndex: 'verification',
width: 120,
renderer: (v, meta, record) => { renderer: (v, meta, record) => {
if (v === undefined || v === null || !v.state) { let i = (cls, txt) => `<i class="fa fa-fw fa-${cls}"></i> ${txt}`;
//meta.tdCls = "x-grid-row-loading"; if (v === undefined || v === null) {
return record.data.leaf ? '' : gettext('None'); return record.data.leaf ? '' : i('question-circle-o warning', gettext('None'));
} }
let task = Proxmox.Utils.parse_task_upid(v.upid); let tip, iconCls, txt;
let verify_time = Proxmox.Utils.render_timestamp(task.starttime);
let iconCls = v.state === 'ok' ? 'check good' : 'times critical';
let tip = `Verify task started on ${verify_time}`;
if (record.parentNode.id === 'root') { if (record.parentNode.id === 'root') {
tip = v.state === 'ok' if (v.failed === 0) {
? 'All verification OK in backup group' if (v.none === 0) {
: 'At least one failed verification in backup group!'; if (v.outdated > 0) {
tip = 'All OK, but some snapshots were not verified in last 30 days';
iconCls = 'check warning';
txt = gettext('All OK (old)');
} else {
tip = 'All snapshots verified at least once in last 30 days';
iconCls = 'check good';
txt = gettext('All OK');
}
} else if (v.ok === 0) {
tip = `${v.none} not verified yet`;
iconCls = 'question-circle-o warning';
txt = gettext('None');
} else {
tip = `${v.ok} OK, ${v.none} not verified yet`;
iconCls = 'check faded';
txt = `${v.ok} OK`;
}
} else {
tip = `${v.ok} OK, ${v.failed} failed, ${v.none} not verified yet`;
iconCls = 'times critical';
txt = v.ok === 0 && v.none === 0
? gettext('All failed')
: `${v.failed} failed`;
}
} else if (!v.state) {
return record.data.leaf ? '' : gettext('None');
} else {
let verify_time = Proxmox.Utils.render_timestamp(v.lastTime);
tip = `Last verify task started on ${verify_time}`;
txt = v.state;
iconCls = 'times critical';
if (v.state === 'ok') {
iconCls = 'check good';
let now = Date.now() / 1000;
if (now - v.lastTime > 30 * 24 * 60 * 60) {
tip = `Last verify task over 30 days ago: ${verify_time}`;
iconCls = 'check warning';
}
}
} }
return `<span data-qtip="${tip}"> return `<span data-qtip="${tip}">
<i class="fa fa-fw fa-${iconCls}"></i> ${v.state} <i class="fa fa-fw fa-${iconCls}"></i> ${txt}
</span>`; </span>`;
}, },
listeners: { listeners: {
@ -619,6 +687,7 @@ Ext.define('PBS.DataStoreContent', {
{ {
xtype: 'textfield', xtype: 'textfield',
reference: 'searchbox', reference: 'searchbox',
emptyText: gettext('group, date or owner'),
triggers: { triggers: {
clear: { clear: {
cls: 'pmx-clear-trigger', cls: 'pmx-clear-trigger',
@ -628,7 +697,7 @@ Ext.define('PBS.DataStoreContent', {
this.triggers.clear.setVisible(false); this.triggers.clear.setVisible(false);
this.setValue(''); this.setValue('');
}, },
} },
}, },
listeners: { listeners: {
change: { change: {
@ -636,6 +705,6 @@ Ext.define('PBS.DataStoreContent', {
buffer: 500, buffer: 500,
}, },
}, },
} },
], ],
}); });