cleanup worker task logging

In order to avoid name conflicts with WorkerTaskContext

- renamed WorkerTask::log to WorkerTask::log_message

Note: Methods have different fuction signatures

Also renamed WorkerTask::warn to WorkerTask::log_warning for
consistency reasons.

Use the task_log!() and task_warn!() macros more often.
This commit is contained in:
Dietmar Maurer
2021-09-24 09:30:00 +02:00
parent c8449217dc
commit 1ec0d70d09
21 changed files with 210 additions and 183 deletions

View File

@ -53,6 +53,7 @@ use pbs_datastore::prune::compute_prune_info;
use pbs_tools::blocking::WrappedReaderStream;
use pbs_tools::stream::{AsyncReaderStream, AsyncChannelWriter};
use pbs_tools::json::{required_integer_param, required_string_param};
use pbs_tools::{task_log, task_warn};
use pbs_config::CachedUserInfo;
use proxmox_rest_server::{WorkerTask, formatter};
@ -770,9 +771,9 @@ pub fn verify(
)?
};
if !failed_dirs.is_empty() {
worker.log("Failed to verify the following snapshots/groups:");
task_log!(worker, "Failed to verify the following snapshots/groups:");
for dir in failed_dirs {
worker.log(format!("\t{}", dir));
task_log!(worker, "\t{}", dir);
}
bail!("verification failed - please check the log for details");
}
@ -865,11 +866,11 @@ pub fn prune(
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
if keep_all {
worker.log("No prune selection - keeping all files.");
task_log!(worker, "No prune selection - keeping all files.");
} else {
worker.log(format!("retention options: {}", pbs_datastore::prune::cli_options_string(&prune_options)));
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
store, backup_type, backup_id));
task_log!(worker, "retention options: {}", pbs_datastore::prune::cli_options_string(&prune_options));
task_log!(worker, "Starting prune on store \"{}\" group \"{}/{}\"",
store, backup_type, backup_id);
}
for (info, mut keep) in prune_info {
@ -888,7 +889,7 @@ pub fn prune(
if keep { "keep" } else { "remove" },
);
worker.log(msg);
task_log!(worker, "{}", msg);
prune_result.push(json!({
"backup-type": group.backup_type(),
@ -899,11 +900,11 @@ pub fn prune(
if !(dry_run || keep) {
if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
worker.warn(
format!(
"failed to remove dir {:?}: {}",
info.backup_dir.relative_path(), err
)
task_warn!(
worker,
"failed to remove dir {:?}: {}",
info.backup_dir.relative_path(),
err,
);
}
}

View File

@ -528,7 +528,7 @@ impl BackupEnvironment {
self.auth_id.to_string(),
false,
move |worker| {
worker.log("Automatically verifying newly added snapshot");
worker.log_message("Automatically verifying newly added snapshot");
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
@ -548,11 +548,11 @@ impl BackupEnvironment {
}
pub fn log<S: AsRef<str>>(&self, msg: S) {
self.worker.log(msg);
self.worker.log_message(msg);
}
pub fn debug<S: AsRef<str>>(&self, msg: S) {
if self.debug { self.worker.log(msg); }
if self.debug { self.worker.log_message(msg); }
}
pub fn format_response(&self, result: Result<Value, Error>) -> Response<Body> {

View File

@ -18,6 +18,7 @@ use proxmox_acme_rs::Account;
use pbs_api_types::{Authid, PRIV_SYS_MODIFY};
use pbs_tools::ops::ControlFlow;
use pbs_tools::{task_log, task_warn};
use crate::acme::AcmeClient;
use crate::api2::types::{AcmeAccountName, AcmeChallengeSchema, KnownAcmeDirectory};
@ -220,15 +221,16 @@ fn register_account(
move |worker| async move {
let mut client = AcmeClient::new(directory);
worker.log(format!("Registering ACME account '{}'...", &name));
task_log!(worker, "Registering ACME account '{}'...", &name);
let account =
do_register_account(&mut client, &name, tos_url.is_some(), contact, None).await?;
worker.log(format!(
task_log!(
worker,
"Registration successful, account URL: {}",
account.location
));
);
Ok(())
},
@ -331,10 +333,11 @@ pub fn deactivate_account(
Ok(_account) => (),
Err(err) if !force => return Err(err),
Err(err) => {
worker.warn(format!(
task_warn!(
worker,
"error deactivating account {}, proceedeing anyway - {}",
name, err,
));
);
}
}
crate::config::acme::mark_account_deactivated(&name)?;

View File

@ -89,7 +89,7 @@ fn read_and_update_proxy_config() -> Result<Option<ProxyConfig>, Error> {
}
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
if !quiet { worker.log("starting apt-get update") }
if !quiet { worker.log_message("starting apt-get update") }
read_and_update_proxy_config()?;
@ -101,7 +101,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
.map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
if !quiet {
worker.log(String::from_utf8(output.stdout)?);
worker.log_message(String::from_utf8(output.stdout)?);
}
// TODO: improve run_command to allow outputting both, stderr and stdout
@ -110,7 +110,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
let msg = String::from_utf8(output.stderr)
.map(|m| if m.is_empty() { String::from("no error message") } else { m })
.unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
worker.warn(msg);
worker.log_warning(msg);
} else {
bail!("terminated by signal");
}

View File

@ -13,7 +13,7 @@ use proxmox::list_subdirs_api_method;
use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_MODIFY};
use pbs_buildcfg::configdir;
use pbs_tools::cert;
use pbs_tools::{task_log, task_warn, cert};
use crate::acme::AcmeClient;
use crate::api2::types::AcmeDomain;
@ -303,7 +303,7 @@ async fn order_certificate(
};
if domains.is_empty() {
worker.log("No domains configured to be ordered from an ACME server.");
task_log!(worker, "No domains configured to be ordered from an ACME server.");
return Ok(None);
}
@ -311,11 +311,11 @@ async fn order_certificate(
let mut acme = node_config.acme_client().await?;
worker.log("Placing ACME order");
task_log!(worker, "Placing ACME order");
let order = acme
.new_order(domains.iter().map(|d| d.domain.to_ascii_lowercase()))
.await?;
worker.log(format!("Order URL: {}", order.location));
task_log!(worker, "Order URL: {}", order.location);
let identifiers: Vec<String> = order
.data
@ -327,7 +327,7 @@ async fn order_certificate(
.collect();
for auth_url in &order.data.authorizations {
worker.log(format!("Getting authorization details from '{}'", auth_url));
task_log!(worker, "Getting authorization details from '{}'", auth_url);
let mut auth = acme.get_authorization(&auth_url).await?;
let domain = match &mut auth.identifier {
@ -335,11 +335,11 @@ async fn order_certificate(
};
if auth.status == Status::Valid {
worker.log(format!("{} is already validated!", domain));
task_log!(worker, "{} is already validated!", domain);
continue;
}
worker.log(format!("The validation for {} is pending", domain));
task_log!(worker, "The validation for {} is pending", domain);
let domain_config: &AcmeDomain = get_domain_config(&domain)?;
let plugin_id = domain_config.plugin.as_deref().unwrap_or("standalone");
let mut plugin_cfg =
@ -347,7 +347,7 @@ async fn order_certificate(
format_err!("plugin '{}' for domain '{}' not found!", plugin_id, domain)
})?;
worker.log("Setting up validation plugin");
task_log!(worker, "Setting up validation plugin");
let validation_url = plugin_cfg
.setup(&mut acme, &auth, domain_config, Arc::clone(&worker))
.await?;
@ -358,17 +358,18 @@ async fn order_certificate(
.teardown(&mut acme, &auth, domain_config, Arc::clone(&worker))
.await
{
worker.warn(format!(
task_warn!(
worker,
"Failed to teardown plugin '{}' for domain '{}' - {}",
plugin_id, domain, err
));
);
}
let _: () = result?;
}
worker.log("All domains validated");
worker.log("Creating CSR");
task_log!(worker, "All domains validated");
task_log!(worker, "Creating CSR");
let csr = proxmox_acme_rs::util::Csr::generate(&identifiers, &Default::default())?;
let mut finalize_error_cnt = 0u8;
@ -381,7 +382,7 @@ async fn order_certificate(
match order.status {
Status::Pending => {
worker.log("still pending, trying to finalize anyway");
task_log!(worker, "still pending, trying to finalize anyway");
let finalize = order
.finalize
.as_deref()
@ -396,7 +397,7 @@ async fn order_certificate(
tokio::time::sleep(Duration::from_secs(5)).await;
}
Status::Ready => {
worker.log("order is ready, finalizing");
task_log!(worker, "order is ready, finalizing");
let finalize = order
.finalize
.as_deref()
@ -405,18 +406,18 @@ async fn order_certificate(
tokio::time::sleep(Duration::from_secs(5)).await;
}
Status::Processing => {
worker.log("still processing, trying again in 30 seconds");
task_log!(worker, "still processing, trying again in 30 seconds");
tokio::time::sleep(Duration::from_secs(30)).await;
}
Status::Valid => {
worker.log("valid");
task_log!(worker, "valid");
break;
}
other => bail!("order status: {:?}", other),
}
}
worker.log("Downloading certificate");
task_log!(worker, "Downloading certificate");
let certificate = acme
.get_certificate(
order
@ -438,10 +439,10 @@ async fn request_validation(
auth_url: &str,
validation_url: &str,
) -> Result<(), Error> {
worker.log("Triggering validation");
task_log!(worker, "Triggering validation");
acme.request_challenge_validation(&validation_url).await?;
worker.log("Sleeping for 5 seconds");
task_log!(worker, "Sleeping for 5 seconds");
tokio::time::sleep(Duration::from_secs(5)).await;
loop {
@ -450,7 +451,7 @@ async fn request_validation(
let auth = acme.get_authorization(&auth_url).await?;
match auth.status {
Status::Pending => {
worker.log("Status is still 'pending', trying again in 10 seconds");
task_log!(worker, "Status is still 'pending', trying again in 10 seconds");
tokio::time::sleep(Duration::from_secs(10)).await;
}
Status::Valid => return Ok(()),
@ -567,11 +568,11 @@ pub fn revoke_acme_cert(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error
auth_id,
true,
move |worker| async move {
worker.log("Loading ACME account");
task_log!(worker, "Loading ACME account");
let mut acme = node_config.acme_client().await?;
worker.log("Revoking old certificate");
task_log!(worker, "Revoking old certificate");
acme.revoke_certificate(cert_pem.as_bytes(), None).await?;
worker.log("Deleting certificate and regenerating a self-signed one");
task_log!(worker, "Deleting certificate and regenerating a self-signed one");
delete_custom_certificate().await?;
Ok(())
},

View File

@ -10,6 +10,7 @@ use pbs_api_types::{
DataStoreConfig, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
DATASTORE_SCHEMA, UPID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
};
use pbs_tools::task_log;
use crate::tools::disks::{
DiskManage, FileSystemType, DiskUsageType,
@ -169,7 +170,7 @@ pub fn create_datastore_disk(
let upid_str = WorkerTask::new_thread(
"dircreate", Some(name.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("create datastore '{}' on disk {}", name, disk));
task_log!(worker, "create datastore '{}' on disk {}", name, disk);
let add_datastore = add_datastore.unwrap_or(false);
let filesystem = filesystem.unwrap_or(FileSystemType::Ext4);

View File

@ -16,6 +16,7 @@ use crate::tools::disks::{
get_disks, get_smart_data, get_disk_usage_info, inititialize_gpt_disk,
};
use proxmox_rest_server::WorkerTask;
use pbs_tools::task_log;
pub mod directory;
pub mod zfs;
@ -155,7 +156,7 @@ pub fn initialize_disk(
let upid_str = WorkerTask::new_thread(
"diskinit", Some(disk.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("initialize disk {}", disk));
task_log!(worker, "initialize disk {}", disk);
let disk_manager = DiskManage::new();
let disk_info = disk_manager.disk_by_name(&disk)?;

View File

@ -13,6 +13,7 @@ use pbs_api_types::{
DISK_LIST_SCHEMA, ZFS_ASHIFT_SCHEMA, UPID_SCHEMA,
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
};
use pbs_tools::task_log;
use crate::tools::disks::{
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
@ -231,7 +232,7 @@ pub fn create_zpool(
let upid_str = WorkerTask::new_thread(
"zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
task_log!(worker, "create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text);
let mut command = std::process::Command::new("zpool");
@ -265,10 +266,10 @@ pub fn create_zpool(
}
}
worker.log(format!("# {:?}", command));
task_log!(worker, "# {:?}", command);
let output = pbs_tools::run_command(command, None)?;
worker.log(output);
task_log!(worker, "{}", output);
if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() {
let import_unit = format!("zfs-import@{}.service", proxmox::tools::systemd::escape_unit(&name, false));
@ -278,9 +279,9 @@ pub fn create_zpool(
if let Some(compression) = compression {
let mut command = std::process::Command::new("zfs");
command.args(&["set", &format!("compression={}", compression), &name]);
worker.log(format!("# {:?}", command));
task_log!(worker, "# {:?}", command);
let output = pbs_tools::run_command(command, None)?;
worker.log(output);
task_log!(worker, "{}", output);
}
if add_datastore {

View File

@ -183,7 +183,7 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
let stdout_fut = async move {
let mut reader = BufReader::new(stdout).lines();
while let Some(line) = reader.next_line().await? {
worker_stdout.log(line);
worker_stdout.log_message(line);
}
Ok::<(), Error>(())
};
@ -192,7 +192,7 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
let stderr_fut = async move {
let mut reader = BufReader::new(stderr).lines();
while let Some(line) = reader.next_line().await? {
worker_stderr.warn(line);
worker_stderr.log_warning(line);
}
Ok::<(), Error>(())
};
@ -224,9 +224,9 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
}
if let Err(err) = child.kill().await {
worker.warn(format!("error killing termproxy: {}", err));
worker.log_warning(format!("error killing termproxy: {}", err));
} else if let Err(err) = child.wait().await {
worker.warn(format!("error awaiting termproxy: {}", err));
worker.log_warning(format!("error awaiting termproxy: {}", err));
}
}

View File

@ -13,11 +13,12 @@ use pbs_api_types::{
DATASTORE_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ,
};
use pbs_tools::task_log;
use proxmox_rest_server::WorkerTask;
use pbs_config::CachedUserInfo;
use crate::server::{jobstate::Job, pull::pull_store};
use crate::backup::DataStore;
use pbs_config::CachedUserInfo;
pub fn check_pull_privs(
auth_id: &Authid,
@ -97,16 +98,21 @@ pub fn do_sync_job(
let sync_owner = sync_job.owner.unwrap_or_else(|| Authid::root_auth_id().clone());
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
worker.log(format!("Starting datastore sync job '{}'", job_id));
task_log!(worker, "Starting datastore sync job '{}'", job_id);
if let Some(event_str) = schedule {
worker.log(format!("task triggered by schedule '{}'", event_str));
task_log!(worker, "task triggered by schedule '{}'", event_str);
}
worker.log(format!("Sync datastore '{}' from '{}/{}'",
sync_job.store, sync_job.remote, sync_job.remote_store));
task_log!(
worker,
"sync datastore '{}' from '{}/{}'",
sync_job.store,
sync_job.remote,
sync_job.remote_store,
);
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, sync_owner).await?;
worker.log(format!("sync job '{}' end", &job_id));
task_log!(worker, "sync job '{}' end", &job_id);
Ok(())
};
@ -186,7 +192,7 @@ async fn pull (
// fixme: set to_stdout to false?
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), auth_id.to_string(), true, move |worker| async move {
worker.log(format!("sync datastore '{}' start", store));
task_log!(worker, "sync datastore '{}' start", store);
let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, auth_id);
let future = select!{
@ -196,7 +202,7 @@ async fn pull (
let _ = future?;
worker.log(format!("sync datastore '{}' end", store));
task_log!(worker, "sync datastore '{}' end", store);
Ok(())
})?;

View File

@ -52,11 +52,11 @@ impl ReaderEnvironment {
}
pub fn log<S: AsRef<str>>(&self, msg: S) {
self.worker.log(msg);
self.worker.log_message(msg);
}
pub fn debug<S: AsRef<str>>(&self, msg: S) {
if self.debug { self.worker.log(msg); }
if self.debug { self.worker.log_message(msg); }
}

View File

@ -35,7 +35,7 @@ use pbs_tape::{
sg_tape::tape_alert_flags_critical,
linux_list_drives::{lto_tape_device_list, lookup_device_identification, open_lto_tape_device},
};
use pbs_tools::task_log;
use pbs_tools::{task_log, task_warn};
use proxmox_rest_server::WorkerTask;
use crate::{
@ -548,7 +548,7 @@ fn write_media_label(
let media_id = if let Some(ref pool) = pool {
// assign media to pool by writing special media set label
worker.log(format!("Label media '{}' for pool '{}'", label.label_text, pool));
task_log!(worker, "Label media '{}' for pool '{}'", label.label_text, pool);
let set = MediaSetLabel::with_data(&pool, [0u8; 16].into(), 0, label.ctime, None);
drive.write_media_set_label(&set, None)?;
@ -563,7 +563,7 @@ fn write_media_label(
media_id
} else {
worker.log(format!("Label media '{}' (no pool assignment)", label.label_text));
task_log!(worker, "Label media '{}' (no pool assignment)", label.label_text);
let media_id = MediaId { label, media_set_label: None };
@ -771,7 +771,7 @@ pub fn clean_drive(
move |worker, config| {
let (mut changer, _changer_name) = required_media_changer(&config, &drive)?;
worker.log("Starting drive clean");
task_log!(worker, "Starting drive clean");
changer.clean_drive()?;
@ -782,7 +782,7 @@ pub fn clean_drive(
// test for critical tape alert flags
if let Ok(alert_flags) = handle.tape_alert_flags() {
if !alert_flags.is_empty() {
worker.log(format!("TapeAlertFlags: {:?}", alert_flags));
task_log!(worker, "TapeAlertFlags: {:?}", alert_flags);
if tape_alert_flags_critical(alert_flags) {
bail!("found critical tape alert flags: {:?}", alert_flags);
}
@ -791,13 +791,13 @@ pub fn clean_drive(
// test wearout (max. 50 mounts)
if let Ok(volume_stats) = handle.volume_statistics() {
worker.log(format!("Volume mounts: {}", volume_stats.volume_mounts));
task_log!(worker, "Volume mounts: {}", volume_stats.volume_mounts);
let wearout = volume_stats.volume_mounts * 2; // (*100.0/50.0);
worker.log(format!("Cleaning tape wearout: {}%", wearout));
task_log!(worker, "Cleaning tape wearout: {}%", wearout);
}
}
worker.log("Drive cleaned successfully");
task_log!(worker, "Drive cleaned successfully");
Ok(())
},
@ -921,7 +921,7 @@ pub fn update_inventory(
let label_text_list = changer.online_media_label_texts()?;
if label_text_list.is_empty() {
worker.log("changer device does not list any media labels".to_string());
task_log!(worker, "changer device does not list any media labels");
}
let state_path = Path::new(TAPE_STATUS_DIR);
@ -932,36 +932,36 @@ pub fn update_inventory(
for label_text in label_text_list.iter() {
if label_text.starts_with("CLN") {
worker.log(format!("skip cleaning unit '{}'", label_text));
task_log!(worker, "skip cleaning unit '{}'", label_text);
continue;
}
let label_text = label_text.to_string();
if !read_all_labels.unwrap_or(false) && inventory.find_media_by_label_text(&label_text).is_some() {
worker.log(format!("media '{}' already inventoried", label_text));
task_log!(worker, "media '{}' already inventoried", label_text);
continue;
}
if let Err(err) = changer.load_media(&label_text) {
worker.warn(format!("unable to load media '{}' - {}", label_text, err));
task_warn!(worker, "unable to load media '{}' - {}", label_text, err);
continue;
}
let mut drive = open_drive(&config, &drive)?;
match drive.read_label() {
Err(err) => {
worker.warn(format!("unable to read label form media '{}' - {}", label_text, err));
task_warn!(worker, "unable to read label form media '{}' - {}", label_text, err);
}
Ok((None, _)) => {
worker.log(format!("media '{}' is empty", label_text));
task_log!(worker, "media '{}' is empty", label_text);
}
Ok((Some(media_id), _key_config)) => {
if label_text != media_id.label.label_text {
worker.warn(format!("label text mismatch ({} != {})", label_text, media_id.label.label_text));
task_warn!(worker, "label text mismatch ({} != {})", label_text, media_id.label.label_text);
continue;
}
worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid));
task_log!(worker, "inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid);
if let Some(MediaSetLabel { ref pool, ref uuid, ..}) = media_id.media_set_label {
let _pool_lock = lock_media_pool(state_path, pool)?;
@ -1057,14 +1057,14 @@ fn barcode_label_media_worker(
inventory.reload()?;
if inventory.find_media_by_label_text(&label_text).is_some() {
worker.log(format!("media '{}' already inventoried (already labeled)", label_text));
task_log!(worker, "media '{}' already inventoried (already labeled)", label_text);
continue;
}
worker.log(format!("checking/loading media '{}'", label_text));
task_log!(worker, "checking/loading media '{}'", label_text);
if let Err(err) = changer.load_media(&label_text) {
worker.warn(format!("unable to load media '{}' - {}", label_text, err));
task_warn!(worker, "unable to load media '{}' - {}", label_text, err);
continue;
}
@ -1073,13 +1073,13 @@ fn barcode_label_media_worker(
match drive.read_next_file() {
Ok(_reader) => {
worker.log(format!("media '{}' is not empty (format it first)", label_text));
task_log!(worker, "media '{}' is not empty (format it first)", label_text);
continue;
}
Err(BlockReadError::EndOfFile) => { /* EOF mark at BOT, assume tape is empty */ },
Err(BlockReadError::EndOfStream) => { /* tape is empty */ },
Err(_err) => {
worker.warn(format!("media '{}' read error (maybe not empty - format it first)", label_text));
task_warn!(worker, "media '{}' read error (maybe not empty - format it first)", label_text);
continue;
}
}
@ -1249,15 +1249,17 @@ pub fn catalog_media(
let media_id = match drive.read_label()? {
(Some(media_id), key_config) => {
worker.log(format!(
task_log!(
worker,
"found media label: {}",
serde_json::to_string_pretty(&serde_json::to_value(&media_id)?)?
));
);
if key_config.is_some() {
worker.log(format!(
task_log!(
worker,
"encryption key config: {}",
serde_json::to_string_pretty(&serde_json::to_value(&key_config)?)?
));
);
}
media_id
},
@ -1270,7 +1272,7 @@ pub fn catalog_media(
let (_media_set_lock, media_set_uuid) = match media_id.media_set_label {
None => {
worker.log("media is empty");
task_log!(worker, "media is empty");
let _lock = lock_unassigned_media_pool(status_path)?;
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
inventory.store(media_id.clone(), false)?;
@ -1278,7 +1280,7 @@ pub fn catalog_media(
}
Some(ref set) => {
if set.uuid.as_ref() == [0u8;16] { // media is empty
worker.log("media is empty");
task_log!(worker, "media is empty");
let _lock = lock_unassigned_media_pool(status_path)?;
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
inventory.store(media_id.clone(), false)?;