Revert "tape: also abort backup/restore on server shutdown"

This reverts commit 9bd81bb384.

Turns out this is not really a good idea.
This commit is contained in:
Dietmar Maurer 2021-03-02 08:00:10 +01:00
parent 9bd81bb384
commit 1d14c31658
3 changed files with 0 additions and 10 deletions

View File

@ -366,7 +366,6 @@ pub fn backup_snapshot(
loop { loop {
worker.check_abort()?; worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
// test is we have remaining chunks // test is we have remaining chunks
if chunk_iter.peek().is_none() { if chunk_iter.peek().is_none() {
@ -376,7 +375,6 @@ pub fn backup_snapshot(
let uuid = pool_writer.load_writable_media(worker)?; let uuid = pool_writer.load_writable_media(worker)?;
worker.check_abort()?; worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?; let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
@ -386,12 +384,10 @@ pub fn backup_snapshot(
} }
worker.check_abort()?; worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
let uuid = pool_writer.load_writable_media(worker)?; let uuid = pool_writer.load_writable_media(worker)?;
worker.check_abort()?; worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
@ -400,7 +396,6 @@ pub fn backup_snapshot(
pool_writer.set_media_status_full(&uuid)?; pool_writer.set_media_status_full(&uuid)?;
worker.check_abort()?; worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
pool_writer.load_writable_media(worker)?; pool_writer.load_writable_media(worker)?;
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;

View File

@ -378,7 +378,6 @@ fn restore_chunk_archive<'a>(
while let Some((digest, blob)) = decoder.next_chunk()? { while let Some((digest, blob)) = decoder.next_chunk()? {
worker.check_abort()?; worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
if let Some(datastore) = datastore { if let Some(datastore) = datastore {
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?; let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
@ -478,7 +477,6 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
loop { loop {
worker.check_abort()?; worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
let entry = match decoder.next() { let entry = match decoder.next() {
None => break, None => break,

View File

@ -386,7 +386,6 @@ pub fn request_and_load_media(
loop { loop {
worker.check_abort()?; worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
let mut handle = match drive_config.open() { let mut handle = match drive_config.open() {
Ok(handle) => handle, Ok(handle) => handle,
@ -398,7 +397,6 @@ pub fn request_and_load_media(
} }
for _ in 0..50 { // delay 5 seconds for _ in 0..50 { // delay 5 seconds
worker.check_abort()?; worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
std::thread::sleep(std::time::Duration::from_millis(100)); std::thread::sleep(std::time::Duration::from_millis(100));
} }
continue; continue;
@ -443,7 +441,6 @@ pub fn request_and_load_media(
// eprintln!("read label failed - test again in 5 secs"); // eprintln!("read label failed - test again in 5 secs");
for _ in 0..50 { // delay 5 seconds for _ in 0..50 { // delay 5 seconds
worker.check_abort()?; worker.check_abort()?;
crate::tools::fail_on_shutdown()?;
std::thread::sleep(std::time::Duration::from_millis(100)); std::thread::sleep(std::time::Duration::from_millis(100));
} }
} }