diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 50072bf4..cc5103f4 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -1,5 +1,8 @@ //! Basic API types used by most of the PBS code. +use serde::{Deserialize, Serialize}; + +use proxmox::api::api; use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema}; use proxmox::const_regex; @@ -37,6 +40,7 @@ pub use userid::{Username, UsernameRef}; pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA}; pub mod upid; +pub use upid::UPID; const_regex! { pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); @@ -84,3 +88,56 @@ pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (singl .schema(); pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX); + +#[api( + properties: { + "upid": { + optional: true, + type: UPID, + }, + }, +)] +#[derive(Clone, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Garbage collection status. +pub struct GarbageCollectionStatus { + pub upid: Option, + /// Number of processed index files. + pub index_file_count: usize, + /// Sum of bytes referred by index files. + pub index_data_bytes: u64, + /// Bytes used on disk. + pub disk_bytes: u64, + /// Chunks used on disk. + pub disk_chunks: usize, + /// Sum of removed bytes. + pub removed_bytes: u64, + /// Number of removed chunks. + pub removed_chunks: usize, + /// Sum of pending bytes (pending removal - kept for safety). + pub pending_bytes: u64, + /// Number of pending chunks (pending removal - kept for safety). + pub pending_chunks: usize, + /// Number of chunks marked as .bad by verify that have been removed by GC. + pub removed_bad: usize, + /// Number of chunks still marked as .bad after garbage collection. + pub still_bad: usize, +} + +impl Default for GarbageCollectionStatus { + fn default() -> Self { + GarbageCollectionStatus { + upid: None, + index_file_count: 0, + index_data_bytes: 0, + disk_bytes: 0, + disk_chunks: 0, + removed_bytes: 0, + removed_chunks: 0, + pending_bytes: 0, + pending_chunks: 0, + removed_bad: 0, + still_bad: 0, + } + } +} diff --git a/src/backup/chunk_store.rs b/pbs-datastore/src/chunk_store.rs similarity index 94% rename from src/backup/chunk_store.rs rename to pbs-datastore/src/chunk_store.rs index 1ae85d64..ddf0a769 100644 --- a/src/backup/chunk_store.rs +++ b/pbs-datastore/src/chunk_store.rs @@ -1,17 +1,17 @@ -use anyhow::{bail, format_err, Error}; - -use std::path::{Path, PathBuf}; use std::io::Write; -use std::sync::{Arc, Mutex}; use std::os::unix::io::AsRawFd; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex}; + +use anyhow::{bail, format_err, Error}; use proxmox::tools::fs::{CreateOptions, create_path, create_dir}; -use crate::task_log; -use crate::tools; -use crate::api2::types::GarbageCollectionStatus; +use pbs_api_types::GarbageCollectionStatus; +use pbs_tools::process_locker::{self, ProcessLocker}; -use super::DataBlob; +use crate::DataBlob; +use crate::task_log; use crate::task::TaskState; /// File system based chunk store @@ -20,7 +20,7 @@ pub struct ChunkStore { pub (crate) base: PathBuf, chunk_dir: PathBuf, mutex: Mutex<()>, - locker: Arc>, + locker: Arc>, } // TODO: what about sysctl setting vm.vfs_cache_pressure (0 - 100) ? @@ -62,6 +62,10 @@ impl ChunkStore { chunk_dir } + pub fn base(&self) -> &Path { + &self.base + } + pub fn create

(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid, worker: Option<&dyn TaskState>) -> Result where P: Into, @@ -139,7 +143,7 @@ impl ChunkStore { let lockfile_path = Self::lockfile_path(&base); - let locker = tools::ProcessLocker::new(&lockfile_path)?; + let locker = ProcessLocker::new(&lockfile_path)?; Ok(ChunkStore { name: name.to_owned(), @@ -274,15 +278,16 @@ impl ChunkStore { } pub fn oldest_writer(&self) -> Option { - tools::ProcessLocker::oldest_shared_lock(self.locker.clone()) + ProcessLocker::oldest_shared_lock(self.locker.clone()) } - pub fn sweep_unused_chunks( + pub fn sweep_unused_chunks Result<(), Error>>( &self, oldest_writer: i64, phase1_start_time: i64, status: &mut GarbageCollectionStatus, worker: &dyn TaskState, + fail_on_shutdown: F, ) -> Result<(), Error> { use nix::sys::stat::fstatat; use nix::unistd::{unlinkat, UnlinkatFlags}; @@ -310,7 +315,7 @@ impl ChunkStore { } worker.check_abort()?; - tools::fail_on_shutdown()?; + fail_on_shutdown()?; let (dirfd, entry) = match entry { Ok(entry) => (entry.parent_fd(), entry), @@ -442,12 +447,12 @@ impl ChunkStore { self.base.clone() } - pub fn try_shared_lock(&self) -> Result { - tools::ProcessLocker::try_shared_lock(self.locker.clone()) + pub fn try_shared_lock(&self) -> Result { + ProcessLocker::try_shared_lock(self.locker.clone()) } - pub fn try_exclusive_lock(&self) -> Result { - tools::ProcessLocker::try_exclusive_lock(self.locker.clone()) + pub fn try_exclusive_lock(&self) -> Result { + ProcessLocker::try_exclusive_lock(self.locker.clone()) } } @@ -466,7 +471,7 @@ fn test_chunk_store1() { let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap(); let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid, None).unwrap(); - let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap(); + let (chunk, digest) = crate::data_blob::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap(); let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap(); assert!(!exists); diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs index 25ec669c..cae66905 100644 --- a/pbs-datastore/src/lib.rs +++ b/pbs-datastore/src/lib.rs @@ -182,6 +182,7 @@ pub mod backup_info; pub mod catalog; pub mod checksum_reader; pub mod checksum_writer; +pub mod chunk_store; pub mod chunker; pub mod crypt_config; pub mod crypt_reader; @@ -198,6 +199,7 @@ pub mod task; pub use backup_info::{BackupDir, BackupGroup, BackupInfo}; pub use checksum_reader::ChecksumReader; pub use checksum_writer::ChecksumWriter; +pub use chunk_store::ChunkStore; pub use chunker::Chunker; pub use crypt_config::{CryptConfig, CryptMode, Fingerprint}; pub use crypt_reader::CryptReader; diff --git a/pbs-datastore/src/task.rs b/pbs-datastore/src/task.rs index 91a6bb11..8cfd6fe8 100644 --- a/pbs-datastore/src/task.rs +++ b/pbs-datastore/src/task.rs @@ -19,3 +19,38 @@ impl TaskState for std::sync::Arc { ::log(&*self, level, message) } } + +#[macro_export] +macro_rules! task_error { + ($task:expr, $($fmt:tt)+) => {{ + $crate::task::TaskState::log(&*$task, log::Level::Error, &format_args!($($fmt)+)) + }}; +} + +#[macro_export] +macro_rules! task_warn { + ($task:expr, $($fmt:tt)+) => {{ + $crate::task::TaskState::log(&*$task, log::Level::Warn, &format_args!($($fmt)+)) + }}; +} + +#[macro_export] +macro_rules! task_log { + ($task:expr, $($fmt:tt)+) => {{ + $crate::task::TaskState::log(&*$task, log::Level::Info, &format_args!($($fmt)+)) + }}; +} + +#[macro_export] +macro_rules! task_debug { + ($task:expr, $($fmt:tt)+) => {{ + $crate::task::TaskState::log(&*$task, log::Level::Debug, &format_args!($($fmt)+)) + }}; +} + +#[macro_export] +macro_rules! task_trace { + ($task:expr, $($fmt:tt)+) => {{ + $crate::task::TaskState::log(&*$task, log::Level::Trace, &format_args!($($fmt)+)) + }}; +} diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 316d9f97..d083d0c4 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -8,6 +8,8 @@ use proxmox::api::{api, Router, RpcEnvironment, Permission}; use proxmox::api::section_config::SectionConfigData; use proxmox::api::schema::parse_property_string; +use pbs_datastore::task::TaskState; + use crate::api2::types::*; use crate::backup::*; use crate::config::cached_user_info::CachedUserInfo; @@ -54,7 +56,7 @@ pub(crate) fn do_create_datastore( _lock: std::fs::File, mut config: SectionConfigData, datastore: DataStoreConfig, - worker: Option<&dyn crate::task::TaskState>, + worker: Option<&dyn TaskState>, ) -> Result<(), Error> { let path: PathBuf = datastore.path.clone().into(); diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index c3b541c7..8119482f 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -15,9 +15,10 @@ use proxmox::{ }, }; +use pbs_datastore::{task_log, task_warn}; +use pbs_datastore::task::TaskState; + use crate::{ - task_log, - task_warn, config::{ self, cached_user_info::CachedUserInfo, @@ -55,7 +56,6 @@ use crate::{ Userid, }, server::WorkerTask, - task::TaskState, tape::{ TAPE_STATUS_DIR, Inventory, diff --git a/src/api2/tape/drive.rs b/src/api2/tape/drive.rs index 0e4a539f..5b698e34 100644 --- a/src/api2/tape/drive.rs +++ b/src/api2/tape/drive.rs @@ -22,8 +22,9 @@ use proxmox::{ }, }; +use pbs_datastore::task_log; + use crate::{ - task_log, config::{ self, cached_user_info::CachedUserInfo, diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index 14e20ee4..68033c4a 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -28,10 +28,10 @@ use proxmox::{ }, }; +use pbs_datastore::{task_log, task_warn}; +use pbs_datastore::task::TaskState; + use crate::{ - task_log, - task_warn, - task::TaskState, tools::ParallelHandler, api2::types::{ DATASTORE_MAP_ARRAY_SCHEMA, diff --git a/src/api2/types/mod.rs b/src/api2/types/mod.rs index 652d7bf4..6698f4b7 100644 --- a/src/api2/types/mod.rs +++ b/src/api2/types/mod.rs @@ -683,59 +683,6 @@ pub struct BackupContent { pub size: Option, } -#[api( - properties: { - "upid": { - optional: true, - schema: UPID_SCHEMA, - }, - }, -)] -#[derive(Clone, Serialize, Deserialize)] -#[serde(rename_all="kebab-case")] -/// Garbage collection status. -pub struct GarbageCollectionStatus { - pub upid: Option, - /// Number of processed index files. - pub index_file_count: usize, - /// Sum of bytes referred by index files. - pub index_data_bytes: u64, - /// Bytes used on disk. - pub disk_bytes: u64, - /// Chunks used on disk. - pub disk_chunks: usize, - /// Sum of removed bytes. - pub removed_bytes: u64, - /// Number of removed chunks. - pub removed_chunks: usize, - /// Sum of pending bytes (pending removal - kept for safety). - pub pending_bytes: u64, - /// Number of pending chunks (pending removal - kept for safety). - pub pending_chunks: usize, - /// Number of chunks marked as .bad by verify that have been removed by GC. - pub removed_bad: usize, - /// Number of chunks still marked as .bad after garbage collection. - pub still_bad: usize, -} - -impl Default for GarbageCollectionStatus { - fn default() -> Self { - GarbageCollectionStatus { - upid: None, - index_file_count: 0, - index_data_bytes: 0, - disk_bytes: 0, - disk_chunks: 0, - removed_bytes: 0, - removed_chunks: 0, - pending_bytes: 0, - pending_chunks: 0, - removed_bad: 0, - still_bad: 0, - } - } -} - #[api()] #[derive(Default, Serialize, Deserialize)] /// Storage space usage information. diff --git a/src/backup/datastore.rs b/src/backup/datastore.rs index 55458de3..412e9f88 100644 --- a/src/backup/datastore.rs +++ b/src/backup/datastore.rs @@ -12,6 +12,8 @@ use lazy_static::lazy_static; use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions, open_file_locked}; +use pbs_datastore::{task_log, task_warn}; +use pbs_datastore::task::TaskState; use pbs_tools::format::HumanByte; use pbs_tools::fs::{lock_dir_noblock, DirLockGuard}; @@ -23,7 +25,6 @@ use super::manifest::{MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, CLIENT_LOG_BLOB_NA use super::index::*; use super::{DataBlob, ArchiveType, archive_type}; use crate::config::datastore::{self, DataStoreConfig}; -use crate::task::TaskState; use crate::tools; use crate::api2::types::{Authid, GarbageCollectionStatus}; use crate::server::UPID; @@ -55,7 +56,7 @@ impl DataStore { if let Some(datastore) = map.get(name) { // Compare Config - if changed, create new Datastore object! - if datastore.chunk_store.base == path && + if datastore.chunk_store.base() == path && datastore.verify_new == config.verify_new.unwrap_or(false) { return Ok(datastore.clone()); @@ -487,7 +488,7 @@ impl DataStore { tools::fail_on_shutdown()?; let digest = index.index_digest(pos).unwrap(); if !self.chunk_store.cond_touch_chunk(digest, false)? { - crate::task_warn!( + task_warn!( worker, "warning: unable to access non-existent chunk {}, required by {:?}", proxmox::tools::digest_to_hex(digest), @@ -558,7 +559,7 @@ impl DataStore { let percentage = (i + 1) * 100 / image_count; if percentage > last_percentage { - crate::task_log!( + task_log!( worker, "marked {}% ({} of {} index files)", percentage, @@ -570,7 +571,7 @@ impl DataStore { } if strange_paths_count > 0 { - crate::task_log!( + task_log!( worker, "found (and marked) {} index files outside of expected directory scheme", strange_paths_count, @@ -604,26 +605,27 @@ impl DataStore { let mut gc_status = GarbageCollectionStatus::default(); gc_status.upid = Some(upid.to_string()); - crate::task_log!(worker, "Start GC phase1 (mark used chunks)"); + task_log!(worker, "Start GC phase1 (mark used chunks)"); self.mark_used_chunks(&mut gc_status, worker)?; - crate::task_log!(worker, "Start GC phase2 (sweep unused chunks)"); + task_log!(worker, "Start GC phase2 (sweep unused chunks)"); self.chunk_store.sweep_unused_chunks( oldest_writer, phase1_start_time, &mut gc_status, worker, + crate::tools::fail_on_shutdown, )?; - crate::task_log!( + task_log!( worker, "Removed garbage: {}", HumanByte::from(gc_status.removed_bytes), ); - crate::task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks); + task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks); if gc_status.pending_bytes > 0 { - crate::task_log!( + task_log!( worker, "Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), @@ -631,14 +633,14 @@ impl DataStore { ); } if gc_status.removed_bad > 0 { - crate::task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad); + task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad); } if gc_status.still_bad > 0 { - crate::task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad); + task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad); } - crate::task_log!( + task_log!( worker, "Original data usage: {}", HumanByte::from(gc_status.index_data_bytes), @@ -646,7 +648,7 @@ impl DataStore { if gc_status.index_data_bytes > 0 { let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64; - crate::task_log!( + task_log!( worker, "On-Disk usage: {} ({:.2}%)", HumanByte::from(gc_status.disk_bytes), @@ -654,7 +656,7 @@ impl DataStore { ); } - crate::task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks); + task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks); let deduplication_factor = if gc_status.disk_bytes > 0 { (gc_status.index_data_bytes as f64)/(gc_status.disk_bytes as f64) @@ -662,11 +664,11 @@ impl DataStore { 1.0 }; - crate::task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor); + task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor); if gc_status.disk_chunks > 0 { let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64); - crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk)); + task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk)); } if let Ok(serialized) = serde_json::to_string(&gc_status) { diff --git a/src/backup/mod.rs b/src/backup/mod.rs index c0acc246..34d1c5ac 100644 --- a/src/backup/mod.rs +++ b/src/backup/mod.rs @@ -186,6 +186,8 @@ pub use pbs_datastore::checksum_reader; pub use pbs_datastore::checksum_reader::*; pub use pbs_datastore::checksum_writer; pub use pbs_datastore::checksum_writer::*; +pub use pbs_datastore::chunk_store; +pub use pbs_datastore::chunk_store::*; pub use pbs_datastore::chunker; pub use pbs_datastore::chunker::*; pub use pbs_datastore::crypt_config; @@ -218,9 +220,6 @@ pub use chunk_stat::*; mod read_chunk; pub use read_chunk::*; -mod chunk_store; -pub use chunk_store::*; - mod fixed_index; pub use fixed_index::*; diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 59aa25d0..57b1acf6 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -6,6 +6,8 @@ use std::time::Instant; use anyhow::{bail, format_err, Error}; +use pbs_datastore::task_log; +use pbs_datastore::task::TaskState; use pbs_tools::fs::lock_dir_noblock_shared; use crate::{ @@ -25,8 +27,6 @@ use crate::{ archive_type, }, server::UPID, - task::TaskState, - task_log, tools::ParallelHandler, }; diff --git a/src/client/pull.rs b/src/client/pull.rs index 19f91961..8db43f90 100644 --- a/src/client/pull.rs +++ b/src/client/pull.rs @@ -1,7 +1,5 @@ //! Sync datastore from remote server -use anyhow::{bail, format_err, Error}; -use serde_json::json; use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; use std::io::{Seek, SeekFrom}; @@ -9,15 +7,20 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::time::SystemTime; +use anyhow::{bail, format_err, Error}; +use serde_json::json; + +use proxmox::api::error::{HttpError, StatusCode}; + +use pbs_datastore::task_log; + use crate::{ api2::types::*, backup::*, client::*, server::WorkerTask, - task_log, tools::{compute_file_csum, ParallelHandler}, }; -use proxmox::api::error::{HttpError, StatusCode}; // fixme: implement filters // fixme: delete vanished groups diff --git a/src/lib.rs b/src/lib.rs index 0af303a4..4815c414 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,8 +3,6 @@ //! The [backup](backup/index.html) module contains some detailed information //! on the inner workings of the backup server regarding data storage. -pub mod task; - #[macro_use] pub mod tools; diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs index ac56d167..248068ea 100644 --- a/src/server/prune_job.rs +++ b/src/server/prune_job.rs @@ -2,13 +2,13 @@ use anyhow::Error; use proxmox::try_block; +use pbs_datastore::{task_log, task_warn}; + use crate::{ api2::types::*, backup::{compute_prune_info, BackupInfo, DataStore, PruneOptions}, server::jobstate::Job, server::WorkerTask, - task_log, - task_warn, }; pub fn do_prune_job( diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs index 878fade5..ee9a4532 100644 --- a/src/server/verify_job.rs +++ b/src/server/verify_job.rs @@ -1,5 +1,7 @@ use anyhow::{format_err, Error}; +use pbs_datastore::task_log; + use crate::{ server::WorkerTask, api2::types::*, @@ -10,7 +12,6 @@ use crate::{ verify_filter, verify_all_backups, }, - task_log, }; /// Runs a verification job. diff --git a/src/server/worker_task.rs b/src/server/worker_task.rs index f60556ef..13578446 100644 --- a/src/server/worker_task.rs +++ b/src/server/worker_task.rs @@ -789,7 +789,7 @@ impl WorkerTask { } } -impl crate::task::TaskState for WorkerTask { +impl pbs_datastore::task::TaskState for WorkerTask { fn check_abort(&self) -> Result<(), Error> { self.fail_on_abort() } diff --git a/src/tape/drive/mod.rs b/src/tape/drive/mod.rs index 7c218a46..8010d576 100644 --- a/src/tape/drive/mod.rs +++ b/src/tape/drive/mod.rs @@ -26,9 +26,10 @@ use proxmox::{ api::section_config::SectionConfigData, }; +use pbs_datastore::task_log; +use pbs_datastore::task::TaskState; + use crate::{ - task_log, - task::TaskState, backup::{ Fingerprint, KeyConfig, diff --git a/src/tape/pool_writer/mod.rs b/src/tape/pool_writer/mod.rs index a788cbeb..6f887c60 100644 --- a/src/tape/pool_writer/mod.rs +++ b/src/tape/pool_writer/mod.rs @@ -13,8 +13,9 @@ use anyhow::{bail, Error}; use proxmox::tools::Uuid; +use pbs_datastore::task_log; + use crate::{ - task_log, backup::{ DataStore, }, diff --git a/src/task.rs b/src/task.rs deleted file mode 100644 index 69498e20..00000000 --- a/src/task.rs +++ /dev/null @@ -1,36 +0,0 @@ -pub use pbs_datastore::task::TaskState; - -#[macro_export] -macro_rules! task_error { - ($task:expr, $($fmt:tt)+) => {{ - $crate::task::TaskState::log(&*$task, log::Level::Error, &format_args!($($fmt)+)) - }}; -} - -#[macro_export] -macro_rules! task_warn { - ($task:expr, $($fmt:tt)+) => {{ - $crate::task::TaskState::log(&*$task, log::Level::Warn, &format_args!($($fmt)+)) - }}; -} - -#[macro_export] -macro_rules! task_log { - ($task:expr, $($fmt:tt)+) => {{ - $crate::task::TaskState::log(&*$task, log::Level::Info, &format_args!($($fmt)+)) - }}; -} - -#[macro_export] -macro_rules! task_debug { - ($task:expr, $($fmt:tt)+) => {{ - $crate::task::TaskState::log(&*$task, log::Level::Debug, &format_args!($($fmt)+)) - }}; -} - -#[macro_export] -macro_rules! task_trace { - ($task:expr, $($fmt:tt)+) => {{ - $crate::task::TaskState::log(&*$task, log::Level::Trace, &format_args!($($fmt)+)) - }}; -}