move chunk_store to pbs-datastore

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
Wolfgang Bumiller 2021-07-07 14:37:47 +02:00
parent 83771aa037
commit c23192d34e
20 changed files with 168 additions and 150 deletions

View File

@ -1,5 +1,8 @@
//! Basic API types used by most of the PBS code. //! Basic API types used by most of the PBS code.
use serde::{Deserialize, Serialize};
use proxmox::api::api;
use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
use proxmox::const_regex; use proxmox::const_regex;
@ -37,6 +40,7 @@ pub use userid::{Username, UsernameRef};
pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA}; pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA};
pub mod upid; pub mod upid;
pub use upid::UPID;
const_regex! { const_regex! {
pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
@ -84,3 +88,56 @@ pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (singl
.schema(); .schema();
pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
#[api(
properties: {
"upid": {
optional: true,
type: UPID,
},
},
)]
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Garbage collection status.
pub struct GarbageCollectionStatus {
pub upid: Option<String>,
/// Number of processed index files.
pub index_file_count: usize,
/// Sum of bytes referred by index files.
pub index_data_bytes: u64,
/// Bytes used on disk.
pub disk_bytes: u64,
/// Chunks used on disk.
pub disk_chunks: usize,
/// Sum of removed bytes.
pub removed_bytes: u64,
/// Number of removed chunks.
pub removed_chunks: usize,
/// Sum of pending bytes (pending removal - kept for safety).
pub pending_bytes: u64,
/// Number of pending chunks (pending removal - kept for safety).
pub pending_chunks: usize,
/// Number of chunks marked as .bad by verify that have been removed by GC.
pub removed_bad: usize,
/// Number of chunks still marked as .bad after garbage collection.
pub still_bad: usize,
}
impl Default for GarbageCollectionStatus {
fn default() -> Self {
GarbageCollectionStatus {
upid: None,
index_file_count: 0,
index_data_bytes: 0,
disk_bytes: 0,
disk_chunks: 0,
removed_bytes: 0,
removed_chunks: 0,
pending_bytes: 0,
pending_chunks: 0,
removed_bad: 0,
still_bad: 0,
}
}
}

View File

@ -1,17 +1,17 @@
use anyhow::{bail, format_err, Error};
use std::path::{Path, PathBuf};
use std::io::Write; use std::io::Write;
use std::sync::{Arc, Mutex};
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error};
use proxmox::tools::fs::{CreateOptions, create_path, create_dir}; use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
use crate::task_log; use pbs_api_types::GarbageCollectionStatus;
use crate::tools; use pbs_tools::process_locker::{self, ProcessLocker};
use crate::api2::types::GarbageCollectionStatus;
use super::DataBlob; use crate::DataBlob;
use crate::task_log;
use crate::task::TaskState; use crate::task::TaskState;
/// File system based chunk store /// File system based chunk store
@ -20,7 +20,7 @@ pub struct ChunkStore {
pub (crate) base: PathBuf, pub (crate) base: PathBuf,
chunk_dir: PathBuf, chunk_dir: PathBuf,
mutex: Mutex<()>, mutex: Mutex<()>,
locker: Arc<Mutex<tools::ProcessLocker>>, locker: Arc<Mutex<ProcessLocker>>,
} }
// TODO: what about sysctl setting vm.vfs_cache_pressure (0 - 100) ? // TODO: what about sysctl setting vm.vfs_cache_pressure (0 - 100) ?
@ -62,6 +62,10 @@ impl ChunkStore {
chunk_dir chunk_dir
} }
pub fn base(&self) -> &Path {
&self.base
}
pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid, worker: Option<&dyn TaskState>) -> Result<Self, Error> pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid, worker: Option<&dyn TaskState>) -> Result<Self, Error>
where where
P: Into<PathBuf>, P: Into<PathBuf>,
@ -139,7 +143,7 @@ impl ChunkStore {
let lockfile_path = Self::lockfile_path(&base); let lockfile_path = Self::lockfile_path(&base);
let locker = tools::ProcessLocker::new(&lockfile_path)?; let locker = ProcessLocker::new(&lockfile_path)?;
Ok(ChunkStore { Ok(ChunkStore {
name: name.to_owned(), name: name.to_owned(),
@ -274,15 +278,16 @@ impl ChunkStore {
} }
pub fn oldest_writer(&self) -> Option<i64> { pub fn oldest_writer(&self) -> Option<i64> {
tools::ProcessLocker::oldest_shared_lock(self.locker.clone()) ProcessLocker::oldest_shared_lock(self.locker.clone())
} }
pub fn sweep_unused_chunks( pub fn sweep_unused_chunks<F: Fn() -> Result<(), Error>>(
&self, &self,
oldest_writer: i64, oldest_writer: i64,
phase1_start_time: i64, phase1_start_time: i64,
status: &mut GarbageCollectionStatus, status: &mut GarbageCollectionStatus,
worker: &dyn TaskState, worker: &dyn TaskState,
fail_on_shutdown: F,
) -> Result<(), Error> { ) -> Result<(), Error> {
use nix::sys::stat::fstatat; use nix::sys::stat::fstatat;
use nix::unistd::{unlinkat, UnlinkatFlags}; use nix::unistd::{unlinkat, UnlinkatFlags};
@ -310,7 +315,7 @@ impl ChunkStore {
} }
worker.check_abort()?; worker.check_abort()?;
tools::fail_on_shutdown()?; fail_on_shutdown()?;
let (dirfd, entry) = match entry { let (dirfd, entry) = match entry {
Ok(entry) => (entry.parent_fd(), entry), Ok(entry) => (entry.parent_fd(), entry),
@ -442,12 +447,12 @@ impl ChunkStore {
self.base.clone() self.base.clone()
} }
pub fn try_shared_lock(&self) -> Result<tools::ProcessLockSharedGuard, Error> { pub fn try_shared_lock(&self) -> Result<process_locker::ProcessLockSharedGuard, Error> {
tools::ProcessLocker::try_shared_lock(self.locker.clone()) ProcessLocker::try_shared_lock(self.locker.clone())
} }
pub fn try_exclusive_lock(&self) -> Result<tools::ProcessLockExclusiveGuard, Error> { pub fn try_exclusive_lock(&self) -> Result<process_locker::ProcessLockExclusiveGuard, Error> {
tools::ProcessLocker::try_exclusive_lock(self.locker.clone()) ProcessLocker::try_exclusive_lock(self.locker.clone())
} }
} }
@ -466,7 +471,7 @@ fn test_chunk_store1() {
let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap(); let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap();
let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid, None).unwrap(); let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid, None).unwrap();
let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap(); let (chunk, digest) = crate::data_blob::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap();
let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap(); let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
assert!(!exists); assert!(!exists);

View File

@ -182,6 +182,7 @@ pub mod backup_info;
pub mod catalog; pub mod catalog;
pub mod checksum_reader; pub mod checksum_reader;
pub mod checksum_writer; pub mod checksum_writer;
pub mod chunk_store;
pub mod chunker; pub mod chunker;
pub mod crypt_config; pub mod crypt_config;
pub mod crypt_reader; pub mod crypt_reader;
@ -198,6 +199,7 @@ pub mod task;
pub use backup_info::{BackupDir, BackupGroup, BackupInfo}; pub use backup_info::{BackupDir, BackupGroup, BackupInfo};
pub use checksum_reader::ChecksumReader; pub use checksum_reader::ChecksumReader;
pub use checksum_writer::ChecksumWriter; pub use checksum_writer::ChecksumWriter;
pub use chunk_store::ChunkStore;
pub use chunker::Chunker; pub use chunker::Chunker;
pub use crypt_config::{CryptConfig, CryptMode, Fingerprint}; pub use crypt_config::{CryptConfig, CryptMode, Fingerprint};
pub use crypt_reader::CryptReader; pub use crypt_reader::CryptReader;

View File

@ -19,3 +19,38 @@ impl<T: TaskState + ?Sized> TaskState for std::sync::Arc<T> {
<T as TaskState>::log(&*self, level, message) <T as TaskState>::log(&*self, level, message)
} }
} }
#[macro_export]
macro_rules! task_error {
($task:expr, $($fmt:tt)+) => {{
$crate::task::TaskState::log(&*$task, log::Level::Error, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_warn {
($task:expr, $($fmt:tt)+) => {{
$crate::task::TaskState::log(&*$task, log::Level::Warn, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_log {
($task:expr, $($fmt:tt)+) => {{
$crate::task::TaskState::log(&*$task, log::Level::Info, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_debug {
($task:expr, $($fmt:tt)+) => {{
$crate::task::TaskState::log(&*$task, log::Level::Debug, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_trace {
($task:expr, $($fmt:tt)+) => {{
$crate::task::TaskState::log(&*$task, log::Level::Trace, &format_args!($($fmt)+))
}};
}

View File

@ -8,6 +8,8 @@ use proxmox::api::{api, Router, RpcEnvironment, Permission};
use proxmox::api::section_config::SectionConfigData; use proxmox::api::section_config::SectionConfigData;
use proxmox::api::schema::parse_property_string; use proxmox::api::schema::parse_property_string;
use pbs_datastore::task::TaskState;
use crate::api2::types::*; use crate::api2::types::*;
use crate::backup::*; use crate::backup::*;
use crate::config::cached_user_info::CachedUserInfo; use crate::config::cached_user_info::CachedUserInfo;
@ -54,7 +56,7 @@ pub(crate) fn do_create_datastore(
_lock: std::fs::File, _lock: std::fs::File,
mut config: SectionConfigData, mut config: SectionConfigData,
datastore: DataStoreConfig, datastore: DataStoreConfig,
worker: Option<&dyn crate::task::TaskState>, worker: Option<&dyn TaskState>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let path: PathBuf = datastore.path.clone().into(); let path: PathBuf = datastore.path.clone().into();

View File

@ -15,9 +15,10 @@ use proxmox::{
}, },
}; };
use pbs_datastore::{task_log, task_warn};
use pbs_datastore::task::TaskState;
use crate::{ use crate::{
task_log,
task_warn,
config::{ config::{
self, self,
cached_user_info::CachedUserInfo, cached_user_info::CachedUserInfo,
@ -55,7 +56,6 @@ use crate::{
Userid, Userid,
}, },
server::WorkerTask, server::WorkerTask,
task::TaskState,
tape::{ tape::{
TAPE_STATUS_DIR, TAPE_STATUS_DIR,
Inventory, Inventory,

View File

@ -22,8 +22,9 @@ use proxmox::{
}, },
}; };
use pbs_datastore::task_log;
use crate::{ use crate::{
task_log,
config::{ config::{
self, self,
cached_user_info::CachedUserInfo, cached_user_info::CachedUserInfo,

View File

@ -28,10 +28,10 @@ use proxmox::{
}, },
}; };
use pbs_datastore::{task_log, task_warn};
use pbs_datastore::task::TaskState;
use crate::{ use crate::{
task_log,
task_warn,
task::TaskState,
tools::ParallelHandler, tools::ParallelHandler,
api2::types::{ api2::types::{
DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_ARRAY_SCHEMA,

View File

@ -683,59 +683,6 @@ pub struct BackupContent {
pub size: Option<u64>, pub size: Option<u64>,
} }
#[api(
properties: {
"upid": {
optional: true,
schema: UPID_SCHEMA,
},
},
)]
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
/// Garbage collection status.
pub struct GarbageCollectionStatus {
pub upid: Option<String>,
/// Number of processed index files.
pub index_file_count: usize,
/// Sum of bytes referred by index files.
pub index_data_bytes: u64,
/// Bytes used on disk.
pub disk_bytes: u64,
/// Chunks used on disk.
pub disk_chunks: usize,
/// Sum of removed bytes.
pub removed_bytes: u64,
/// Number of removed chunks.
pub removed_chunks: usize,
/// Sum of pending bytes (pending removal - kept for safety).
pub pending_bytes: u64,
/// Number of pending chunks (pending removal - kept for safety).
pub pending_chunks: usize,
/// Number of chunks marked as .bad by verify that have been removed by GC.
pub removed_bad: usize,
/// Number of chunks still marked as .bad after garbage collection.
pub still_bad: usize,
}
impl Default for GarbageCollectionStatus {
fn default() -> Self {
GarbageCollectionStatus {
upid: None,
index_file_count: 0,
index_data_bytes: 0,
disk_bytes: 0,
disk_chunks: 0,
removed_bytes: 0,
removed_chunks: 0,
pending_bytes: 0,
pending_chunks: 0,
removed_bad: 0,
still_bad: 0,
}
}
}
#[api()] #[api()]
#[derive(Default, Serialize, Deserialize)] #[derive(Default, Serialize, Deserialize)]
/// Storage space usage information. /// Storage space usage information.

View File

@ -12,6 +12,8 @@ use lazy_static::lazy_static;
use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions, open_file_locked}; use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions, open_file_locked};
use pbs_datastore::{task_log, task_warn};
use pbs_datastore::task::TaskState;
use pbs_tools::format::HumanByte; use pbs_tools::format::HumanByte;
use pbs_tools::fs::{lock_dir_noblock, DirLockGuard}; use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
@ -23,7 +25,6 @@ use super::manifest::{MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, CLIENT_LOG_BLOB_NA
use super::index::*; use super::index::*;
use super::{DataBlob, ArchiveType, archive_type}; use super::{DataBlob, ArchiveType, archive_type};
use crate::config::datastore::{self, DataStoreConfig}; use crate::config::datastore::{self, DataStoreConfig};
use crate::task::TaskState;
use crate::tools; use crate::tools;
use crate::api2::types::{Authid, GarbageCollectionStatus}; use crate::api2::types::{Authid, GarbageCollectionStatus};
use crate::server::UPID; use crate::server::UPID;
@ -55,7 +56,7 @@ impl DataStore {
if let Some(datastore) = map.get(name) { if let Some(datastore) = map.get(name) {
// Compare Config - if changed, create new Datastore object! // Compare Config - if changed, create new Datastore object!
if datastore.chunk_store.base == path && if datastore.chunk_store.base() == path &&
datastore.verify_new == config.verify_new.unwrap_or(false) datastore.verify_new == config.verify_new.unwrap_or(false)
{ {
return Ok(datastore.clone()); return Ok(datastore.clone());
@ -487,7 +488,7 @@ impl DataStore {
tools::fail_on_shutdown()?; tools::fail_on_shutdown()?;
let digest = index.index_digest(pos).unwrap(); let digest = index.index_digest(pos).unwrap();
if !self.chunk_store.cond_touch_chunk(digest, false)? { if !self.chunk_store.cond_touch_chunk(digest, false)? {
crate::task_warn!( task_warn!(
worker, worker,
"warning: unable to access non-existent chunk {}, required by {:?}", "warning: unable to access non-existent chunk {}, required by {:?}",
proxmox::tools::digest_to_hex(digest), proxmox::tools::digest_to_hex(digest),
@ -558,7 +559,7 @@ impl DataStore {
let percentage = (i + 1) * 100 / image_count; let percentage = (i + 1) * 100 / image_count;
if percentage > last_percentage { if percentage > last_percentage {
crate::task_log!( task_log!(
worker, worker,
"marked {}% ({} of {} index files)", "marked {}% ({} of {} index files)",
percentage, percentage,
@ -570,7 +571,7 @@ impl DataStore {
} }
if strange_paths_count > 0 { if strange_paths_count > 0 {
crate::task_log!( task_log!(
worker, worker,
"found (and marked) {} index files outside of expected directory scheme", "found (and marked) {} index files outside of expected directory scheme",
strange_paths_count, strange_paths_count,
@ -604,26 +605,27 @@ impl DataStore {
let mut gc_status = GarbageCollectionStatus::default(); let mut gc_status = GarbageCollectionStatus::default();
gc_status.upid = Some(upid.to_string()); gc_status.upid = Some(upid.to_string());
crate::task_log!(worker, "Start GC phase1 (mark used chunks)"); task_log!(worker, "Start GC phase1 (mark used chunks)");
self.mark_used_chunks(&mut gc_status, worker)?; self.mark_used_chunks(&mut gc_status, worker)?;
crate::task_log!(worker, "Start GC phase2 (sweep unused chunks)"); task_log!(worker, "Start GC phase2 (sweep unused chunks)");
self.chunk_store.sweep_unused_chunks( self.chunk_store.sweep_unused_chunks(
oldest_writer, oldest_writer,
phase1_start_time, phase1_start_time,
&mut gc_status, &mut gc_status,
worker, worker,
crate::tools::fail_on_shutdown,
)?; )?;
crate::task_log!( task_log!(
worker, worker,
"Removed garbage: {}", "Removed garbage: {}",
HumanByte::from(gc_status.removed_bytes), HumanByte::from(gc_status.removed_bytes),
); );
crate::task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks); task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
if gc_status.pending_bytes > 0 { if gc_status.pending_bytes > 0 {
crate::task_log!( task_log!(
worker, worker,
"Pending removals: {} (in {} chunks)", "Pending removals: {} (in {} chunks)",
HumanByte::from(gc_status.pending_bytes), HumanByte::from(gc_status.pending_bytes),
@ -631,14 +633,14 @@ impl DataStore {
); );
} }
if gc_status.removed_bad > 0 { if gc_status.removed_bad > 0 {
crate::task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad); task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
} }
if gc_status.still_bad > 0 { if gc_status.still_bad > 0 {
crate::task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad); task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
} }
crate::task_log!( task_log!(
worker, worker,
"Original data usage: {}", "Original data usage: {}",
HumanByte::from(gc_status.index_data_bytes), HumanByte::from(gc_status.index_data_bytes),
@ -646,7 +648,7 @@ impl DataStore {
if gc_status.index_data_bytes > 0 { if gc_status.index_data_bytes > 0 {
let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64; let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
crate::task_log!( task_log!(
worker, worker,
"On-Disk usage: {} ({:.2}%)", "On-Disk usage: {} ({:.2}%)",
HumanByte::from(gc_status.disk_bytes), HumanByte::from(gc_status.disk_bytes),
@ -654,7 +656,7 @@ impl DataStore {
); );
} }
crate::task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks); task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
let deduplication_factor = if gc_status.disk_bytes > 0 { let deduplication_factor = if gc_status.disk_bytes > 0 {
(gc_status.index_data_bytes as f64)/(gc_status.disk_bytes as f64) (gc_status.index_data_bytes as f64)/(gc_status.disk_bytes as f64)
@ -662,11 +664,11 @@ impl DataStore {
1.0 1.0
}; };
crate::task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor); task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
if gc_status.disk_chunks > 0 { if gc_status.disk_chunks > 0 {
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64); let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk)); task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
} }
if let Ok(serialized) = serde_json::to_string(&gc_status) { if let Ok(serialized) = serde_json::to_string(&gc_status) {

View File

@ -186,6 +186,8 @@ pub use pbs_datastore::checksum_reader;
pub use pbs_datastore::checksum_reader::*; pub use pbs_datastore::checksum_reader::*;
pub use pbs_datastore::checksum_writer; pub use pbs_datastore::checksum_writer;
pub use pbs_datastore::checksum_writer::*; pub use pbs_datastore::checksum_writer::*;
pub use pbs_datastore::chunk_store;
pub use pbs_datastore::chunk_store::*;
pub use pbs_datastore::chunker; pub use pbs_datastore::chunker;
pub use pbs_datastore::chunker::*; pub use pbs_datastore::chunker::*;
pub use pbs_datastore::crypt_config; pub use pbs_datastore::crypt_config;
@ -218,9 +220,6 @@ pub use chunk_stat::*;
mod read_chunk; mod read_chunk;
pub use read_chunk::*; pub use read_chunk::*;
mod chunk_store;
pub use chunk_store::*;
mod fixed_index; mod fixed_index;
pub use fixed_index::*; pub use fixed_index::*;

View File

@ -6,6 +6,8 @@ use std::time::Instant;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use pbs_datastore::task_log;
use pbs_datastore::task::TaskState;
use pbs_tools::fs::lock_dir_noblock_shared; use pbs_tools::fs::lock_dir_noblock_shared;
use crate::{ use crate::{
@ -25,8 +27,6 @@ use crate::{
archive_type, archive_type,
}, },
server::UPID, server::UPID,
task::TaskState,
task_log,
tools::ParallelHandler, tools::ParallelHandler,
}; };

View File

@ -1,7 +1,5 @@
//! Sync datastore from remote server //! Sync datastore from remote server
use anyhow::{bail, format_err, Error};
use serde_json::json;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::convert::TryFrom; use std::convert::TryFrom;
use std::io::{Seek, SeekFrom}; use std::io::{Seek, SeekFrom};
@ -9,15 +7,20 @@ use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time::SystemTime; use std::time::SystemTime;
use anyhow::{bail, format_err, Error};
use serde_json::json;
use proxmox::api::error::{HttpError, StatusCode};
use pbs_datastore::task_log;
use crate::{ use crate::{
api2::types::*, api2::types::*,
backup::*, backup::*,
client::*, client::*,
server::WorkerTask, server::WorkerTask,
task_log,
tools::{compute_file_csum, ParallelHandler}, tools::{compute_file_csum, ParallelHandler},
}; };
use proxmox::api::error::{HttpError, StatusCode};
// fixme: implement filters // fixme: implement filters
// fixme: delete vanished groups // fixme: delete vanished groups

View File

@ -3,8 +3,6 @@
//! The [backup](backup/index.html) module contains some detailed information //! The [backup](backup/index.html) module contains some detailed information
//! on the inner workings of the backup server regarding data storage. //! on the inner workings of the backup server regarding data storage.
pub mod task;
#[macro_use] #[macro_use]
pub mod tools; pub mod tools;

View File

@ -2,13 +2,13 @@ use anyhow::Error;
use proxmox::try_block; use proxmox::try_block;
use pbs_datastore::{task_log, task_warn};
use crate::{ use crate::{
api2::types::*, api2::types::*,
backup::{compute_prune_info, BackupInfo, DataStore, PruneOptions}, backup::{compute_prune_info, BackupInfo, DataStore, PruneOptions},
server::jobstate::Job, server::jobstate::Job,
server::WorkerTask, server::WorkerTask,
task_log,
task_warn,
}; };
pub fn do_prune_job( pub fn do_prune_job(

View File

@ -1,5 +1,7 @@
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use pbs_datastore::task_log;
use crate::{ use crate::{
server::WorkerTask, server::WorkerTask,
api2::types::*, api2::types::*,
@ -10,7 +12,6 @@ use crate::{
verify_filter, verify_filter,
verify_all_backups, verify_all_backups,
}, },
task_log,
}; };
/// Runs a verification job. /// Runs a verification job.

View File

@ -789,7 +789,7 @@ impl WorkerTask {
} }
} }
impl crate::task::TaskState for WorkerTask { impl pbs_datastore::task::TaskState for WorkerTask {
fn check_abort(&self) -> Result<(), Error> { fn check_abort(&self) -> Result<(), Error> {
self.fail_on_abort() self.fail_on_abort()
} }

View File

@ -26,9 +26,10 @@ use proxmox::{
api::section_config::SectionConfigData, api::section_config::SectionConfigData,
}; };
use pbs_datastore::task_log;
use pbs_datastore::task::TaskState;
use crate::{ use crate::{
task_log,
task::TaskState,
backup::{ backup::{
Fingerprint, Fingerprint,
KeyConfig, KeyConfig,

View File

@ -13,8 +13,9 @@ use anyhow::{bail, Error};
use proxmox::tools::Uuid; use proxmox::tools::Uuid;
use pbs_datastore::task_log;
use crate::{ use crate::{
task_log,
backup::{ backup::{
DataStore, DataStore,
}, },

View File

@ -1,36 +0,0 @@
pub use pbs_datastore::task::TaskState;
#[macro_export]
macro_rules! task_error {
($task:expr, $($fmt:tt)+) => {{
$crate::task::TaskState::log(&*$task, log::Level::Error, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_warn {
($task:expr, $($fmt:tt)+) => {{
$crate::task::TaskState::log(&*$task, log::Level::Warn, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_log {
($task:expr, $($fmt:tt)+) => {{
$crate::task::TaskState::log(&*$task, log::Level::Info, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_debug {
($task:expr, $($fmt:tt)+) => {{
$crate::task::TaskState::log(&*$task, log::Level::Debug, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_trace {
($task:expr, $($fmt:tt)+) => {{
$crate::task::TaskState::log(&*$task, log::Level::Trace, &format_args!($($fmt)+))
}};
}