tape: cleanup - rename CatalogBuilder to CatalogSet

This commit is contained in:
Dietmar Maurer 2021-03-19 07:22:54 +01:00
parent f24cbee77d
commit 1037f2bc2d

View File

@ -42,14 +42,14 @@ use crate::{
}; };
/// Helper to build and query sets of catalogs /// Helper to build and query sets of catalogs
pub struct CatalogBuilder { pub struct CatalogSet {
// read only part // read only part
media_set_catalog: MediaSetCatalog, media_set_catalog: MediaSetCatalog,
// catalog to modify (latest in set) // catalog to modify (latest in set)
catalog: Option<MediaCatalog>, catalog: Option<MediaCatalog>,
} }
impl CatalogBuilder { impl CatalogSet {
/// Test if the catalog already contains a snapshot /// Test if the catalog already contains a snapshot
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool { pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
@ -150,7 +150,7 @@ impl NewChunksIterator {
pub fn spawn( pub fn spawn(
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
snapshot_reader: Arc<Mutex<SnapshotReader>>, snapshot_reader: Arc<Mutex<SnapshotReader>>,
catalog_builder: Arc<Mutex<CatalogBuilder>>, catalog_set: Arc<Mutex<CatalogSet>>,
) -> Result<(std::thread::JoinHandle<()>, Self), Error> { ) -> Result<(std::thread::JoinHandle<()>, Self), Error> {
let (tx, rx) = std::sync::mpsc::sync_channel(3); let (tx, rx) = std::sync::mpsc::sync_channel(3);
@ -180,7 +180,7 @@ impl NewChunksIterator {
continue; continue;
} }
if catalog_builder.lock().unwrap().contains_chunk(&datastore_name, &digest) { if catalog_set.lock().unwrap().contains_chunk(&datastore_name, &digest) {
continue; continue;
}; };
@ -230,7 +230,7 @@ pub struct PoolWriter {
pool: MediaPool, pool: MediaPool,
drive_name: String, drive_name: String,
status: Option<PoolWriterState>, status: Option<PoolWriterState>,
catalog_builder: Arc<Mutex<CatalogBuilder>>, catalog_set: Arc<Mutex<CatalogSet>>,
notify_email: Option<String>, notify_email: Option<String>,
} }
@ -271,13 +271,13 @@ impl PoolWriter {
media_set_catalog.append_catalog(media_catalog)?; media_set_catalog.append_catalog(media_catalog)?;
} }
let catalog_builder = CatalogBuilder { media_set_catalog, catalog: None }; let catalog_set = CatalogSet { media_set_catalog, catalog: None };
Ok(Self { Ok(Self {
pool, pool,
drive_name: drive_name.to_string(), drive_name: drive_name.to_string(),
status: None, status: None,
catalog_builder: Arc::new(Mutex::new(catalog_builder)), catalog_set: Arc::new(Mutex::new(catalog_set)),
notify_email, notify_email,
}) })
} }
@ -293,7 +293,7 @@ impl PoolWriter {
} }
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool { pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
self.catalog_builder.lock().unwrap().contains_snapshot(store, snapshot) self.catalog_set.lock().unwrap().contains_snapshot(store, snapshot)
} }
/// Eject media and drop PoolWriterState (close drive) /// Eject media and drop PoolWriterState (close drive)
@ -362,13 +362,13 @@ impl PoolWriter {
if let Some(PoolWriterState {ref mut drive, .. }) = self.status { if let Some(PoolWriterState {ref mut drive, .. }) = self.status {
drive.sync()?; // sync all data to the tape drive.sync()?; // sync all data to the tape
} }
self.catalog_builder.lock().unwrap().commit()?; // then commit the catalog self.catalog_set.lock().unwrap().commit()?; // then commit the catalog
Ok(()) Ok(())
} }
/// Load a writable media into the drive /// Load a writable media into the drive
pub fn load_writable_media(&mut self, worker: &WorkerTask) -> Result<Uuid, Error> { pub fn load_writable_media(&mut self, worker: &WorkerTask) -> Result<Uuid, Error> {
let last_media_uuid = match self.catalog_builder.lock().unwrap().catalog { let last_media_uuid = match self.catalog_set.lock().unwrap().catalog {
Some(ref catalog) => Some(catalog.uuid().clone()), Some(ref catalog) => Some(catalog.uuid().clone()),
None => None, None => None,
}; };
@ -419,7 +419,7 @@ impl PoolWriter {
media.id(), media.id(),
)?; )?;
self.catalog_builder.lock().unwrap().append_catalog(catalog)?; self.catalog_set.lock().unwrap().append_catalog(catalog)?;
let media_set = media.media_set_label().clone().unwrap(); let media_set = media.media_set_label().clone().unwrap();
@ -482,9 +482,9 @@ impl PoolWriter {
bail!("got strange file position number from drive ({})", current_file_number); bail!("got strange file position number from drive ({})", current_file_number);
} }
let catalog_builder = self.catalog_builder.lock().unwrap(); let catalog_set = self.catalog_set.lock().unwrap();
let catalog = match catalog_builder.catalog { let catalog = match catalog_set.catalog {
None => bail!("append_catalog_archive failed: no catalog - internal error"), None => bail!("append_catalog_archive failed: no catalog - internal error"),
Some(ref catalog) => catalog, Some(ref catalog) => catalog,
}; };
@ -614,7 +614,7 @@ impl PoolWriter {
match tape_write_snapshot_archive(writer.as_mut(), snapshot_reader)? { match tape_write_snapshot_archive(writer.as_mut(), snapshot_reader)? {
Some(content_uuid) => { Some(content_uuid) => {
self.catalog_builder.lock().unwrap().register_snapshot( self.catalog_set.lock().unwrap().register_snapshot(
content_uuid, content_uuid,
current_file_number, current_file_number,
&snapshot_reader.datastore_name().to_string(), &snapshot_reader.datastore_name().to_string(),
@ -688,7 +688,7 @@ impl PoolWriter {
let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE; let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
// register chunks in media_catalog // register chunks in media_catalog
self.catalog_builder.lock().unwrap() self.catalog_set.lock().unwrap()
.register_chunk_archive(content_uuid, current_file_number, store, &saved_chunks)?; .register_chunk_archive(content_uuid, current_file_number, store, &saved_chunks)?;
if leom || request_sync { if leom || request_sync {
@ -706,7 +706,7 @@ impl PoolWriter {
NewChunksIterator::spawn( NewChunksIterator::spawn(
datastore, datastore,
snapshot_reader, snapshot_reader,
Arc::clone(&self.catalog_builder), Arc::clone(&self.catalog_set),
) )
} }
} }