tape: backup - implement export-media-set option

This commit is contained in:
Dietmar Maurer 2021-01-10 11:59:55 +01:00
parent 0057f0e580
commit edb90f6afa
3 changed files with 50 additions and 2 deletions

View File

@ -56,6 +56,11 @@ use crate::{
type: bool, type: bool,
optional: true, optional: true,
}, },
"export-media-set": {
description: "Export media set upon job completion.",
type: bool,
optional: true,
},
}, },
}, },
returns: { returns: {
@ -67,6 +72,7 @@ pub fn backup(
store: String, store: String,
pool: String, pool: String,
eject_media: Option<bool>, eject_media: Option<bool>,
export_media_set: Option<bool>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
@ -84,6 +90,7 @@ pub fn backup(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let eject_media = eject_media.unwrap_or(false); let eject_media = eject_media.unwrap_or(false);
let export_media_set = export_media_set.unwrap_or(false);
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
"tape-backup", "tape-backup",
@ -91,7 +98,7 @@ pub fn backup(
auth_id, auth_id,
to_stdout, to_stdout,
move |worker| { move |worker| {
backup_worker(&worker, datastore, &pool_config, eject_media)?; backup_worker(&worker, datastore, &pool_config, eject_media, export_media_set)?;
Ok(()) Ok(())
} }
)?; )?;
@ -108,6 +115,7 @@ fn backup_worker(
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
pool_config: &MediaPoolConfig, pool_config: &MediaPoolConfig,
eject_media: bool, eject_media: bool,
export_media_set: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let status_path = Path::new(TAPE_STATUS_DIR); let status_path = Path::new(TAPE_STATUS_DIR);
@ -142,7 +150,10 @@ fn backup_worker(
pool_writer.commit()?; pool_writer.commit()?;
if eject_media { if export_media_set {
worker.log(format!("exporting current media set"));
pool_writer.export_media_set(worker)?;
} else if eject_media {
worker.log(format!("ejection backup media")); worker.log(format!("ejection backup media"));
pool_writer.eject_media()?; pool_writer.eject_media()?;
} }

View File

@ -723,6 +723,11 @@ async fn clean_drive(
type: bool, type: bool,
optional: true, optional: true,
}, },
"export-media-set": {
description: "Export media set upon job completion.",
type: bool,
optional: true,
},
}, },
}, },
)] )]

View File

@ -117,6 +117,7 @@ impl PoolWriter {
let (drive_config, _digest) = crate::config::drive::config()?; let (drive_config, _digest) = crate::config::drive::config()?;
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? { if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
drop(status); // close drive
changer.unload_media(None)?; changer.unload_media(None)?;
} else { } else {
status.drive.eject_media()?; status.drive.eject_media()?;
@ -125,6 +126,37 @@ impl PoolWriter {
Ok(()) Ok(())
} }
/// Export current media set and drop PoolWriterState (close drive)
pub fn export_media_set(&mut self, worker: &WorkerTask) -> Result<(), Error> {
let mut status = self.status.take();
let (drive_config, _digest) = crate::config::drive::config()?;
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
drop(status); // close drive
changer.unload_media(None)?;
for media_uuid in self.pool.current_media_list()? {
let media = self.pool.lookup_media(media_uuid)?;
let changer_id = media.changer_id();
if let Some(slot) = changer.export_media(changer_id)? {
worker.log(format!("exported media '{}' to import/export slot {}", changer_id, slot));
} else {
worker.warn(format!("export failed - media '{}' is not online", changer_id));
}
}
} else {
worker.log("standalone drive - ejecting media instead of export");
if let Some(mut status) = status {
status.drive.eject_media()?;
}
}
Ok(())
}
/// commit changes to tape and catalog /// commit changes to tape and catalog
/// ///
/// This is done automatically during a backupsession, but needs to /// This is done automatically during a backupsession, but needs to