tape: cleanup TapeJob implementation - uses AllOf Schema with SectionConfig
This commit is contained in:
parent
46d53e3e90
commit
5830e5620d
@ -19,6 +19,7 @@ use crate::{
|
|||||||
self,
|
self,
|
||||||
tape_job::{
|
tape_job::{
|
||||||
TapeBackupJobConfig,
|
TapeBackupJobConfig,
|
||||||
|
TapeBackupJobSetup,
|
||||||
TapeBackupJobStatus,
|
TapeBackupJobStatus,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -36,9 +37,6 @@ use crate::{
|
|||||||
},
|
},
|
||||||
api2::types::{
|
api2::types::{
|
||||||
Authid,
|
Authid,
|
||||||
DATASTORE_SCHEMA,
|
|
||||||
MEDIA_POOL_NAME_SCHEMA,
|
|
||||||
DRIVE_NAME_SCHEMA,
|
|
||||||
UPID_SCHEMA,
|
UPID_SCHEMA,
|
||||||
JOB_ID_SCHEMA,
|
JOB_ID_SCHEMA,
|
||||||
MediaPoolConfig,
|
MediaPoolConfig,
|
||||||
@ -109,28 +107,28 @@ pub fn list_tape_backup_jobs(
|
|||||||
|
|
||||||
pub fn do_tape_backup_job(
|
pub fn do_tape_backup_job(
|
||||||
mut job: Job,
|
mut job: Job,
|
||||||
tape_job: TapeBackupJobConfig,
|
setup: TapeBackupJobSetup,
|
||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
schedule: Option<String>,
|
schedule: Option<String>,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let job_id = format!("{}:{}:{}:{}",
|
let job_id = format!("{}:{}:{}:{}",
|
||||||
tape_job.store,
|
setup.store,
|
||||||
tape_job.pool,
|
setup.pool,
|
||||||
tape_job.drive,
|
setup.drive,
|
||||||
job.jobname());
|
job.jobname());
|
||||||
|
|
||||||
let worker_type = job.jobtype().to_string();
|
let worker_type = job.jobtype().to_string();
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&tape_job.store)?;
|
let datastore = DataStore::lookup_datastore(&setup.store)?;
|
||||||
|
|
||||||
let (config, _digest) = config::media_pool::config()?;
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
let pool_config: MediaPoolConfig = config.lookup("pool", &tape_job.pool)?;
|
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
|
||||||
|
|
||||||
let (drive_config, _digest) = config::drive::config()?;
|
let (drive_config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
// early check/lock before starting worker
|
// early check/lock before starting worker
|
||||||
let drive_lock = lock_tape_device(&drive_config, &tape_job.drive)?;
|
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
&worker_type,
|
&worker_type,
|
||||||
@ -140,7 +138,7 @@ pub fn do_tape_backup_job(
|
|||||||
move |worker| {
|
move |worker| {
|
||||||
let _drive_lock = drive_lock; // keep lock guard
|
let _drive_lock = drive_lock; // keep lock guard
|
||||||
|
|
||||||
set_tape_device_state(&tape_job.drive, &worker.upid().to_string())?;
|
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
|
|
||||||
task_log!(worker,"Starting tape backup job '{}'", job_id);
|
task_log!(worker,"Starting tape backup job '{}'", job_id);
|
||||||
@ -151,11 +149,8 @@ pub fn do_tape_backup_job(
|
|||||||
let job_result = backup_worker(
|
let job_result = backup_worker(
|
||||||
&worker,
|
&worker,
|
||||||
datastore,
|
datastore,
|
||||||
&tape_job.drive,
|
|
||||||
&pool_config,
|
&pool_config,
|
||||||
tape_job.eject_media.unwrap_or(false),
|
&setup,
|
||||||
tape_job.export_media_set.unwrap_or(false),
|
|
||||||
tape_job.latest_only.unwrap_or(false),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let status = worker.create_state(&job_result);
|
let status = worker.create_state(&job_result);
|
||||||
@ -168,10 +163,10 @@ pub fn do_tape_backup_job(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = set_tape_device_state(&tape_job.drive, "") {
|
if let Err(err) = set_tape_device_state(&setup.drive, "") {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"could not unset drive state for {}: {}",
|
"could not unset drive state for {}: {}",
|
||||||
tape_job.drive,
|
setup.drive,
|
||||||
err
|
err
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -204,7 +199,7 @@ pub fn run_tape_backup_job(
|
|||||||
|
|
||||||
let job = Job::new("tape-backup-job", &id)?;
|
let job = Job::new("tape-backup-job", &id)?;
|
||||||
|
|
||||||
let upid_str = do_tape_backup_job(job, backup_job, &auth_id, None)?;
|
let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None)?;
|
||||||
|
|
||||||
Ok(upid_str)
|
Ok(upid_str)
|
||||||
}
|
}
|
||||||
@ -212,29 +207,9 @@ pub fn run_tape_backup_job(
|
|||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
setup: {
|
||||||
schema: DATASTORE_SCHEMA,
|
type: TapeBackupJobSetup,
|
||||||
},
|
flatten: true,
|
||||||
pool: {
|
|
||||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
"eject-media": {
|
|
||||||
description: "Eject media upon job completion.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"export-media-set": {
|
|
||||||
description: "Export media set upon job completion.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"latest-only": {
|
|
||||||
description: "Backup latest snapshots only.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -244,34 +219,25 @@ pub fn run_tape_backup_job(
|
|||||||
)]
|
)]
|
||||||
/// Backup datastore to tape media pool
|
/// Backup datastore to tape media pool
|
||||||
pub fn backup(
|
pub fn backup(
|
||||||
store: String,
|
setup: TapeBackupJobSetup,
|
||||||
pool: String,
|
|
||||||
drive: String,
|
|
||||||
eject_media: Option<bool>,
|
|
||||||
export_media_set: Option<bool>,
|
|
||||||
latest_only: Option<bool>,
|
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&setup.store)?;
|
||||||
|
|
||||||
let (config, _digest) = config::media_pool::config()?;
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
|
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
|
||||||
|
|
||||||
let (drive_config, _digest) = config::drive::config()?;
|
let (drive_config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
// early check/lock before starting worker
|
// early check/lock before starting worker
|
||||||
let drive_lock = lock_tape_device(&drive_config, &drive)?;
|
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
|
||||||
|
|
||||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let eject_media = eject_media.unwrap_or(false);
|
let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
|
||||||
let export_media_set = export_media_set.unwrap_or(false);
|
|
||||||
let latest_only = latest_only.unwrap_or(false);
|
|
||||||
|
|
||||||
let job_id = format!("{}:{}:{}", store, pool, drive);
|
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"tape-backup",
|
"tape-backup",
|
||||||
@ -280,19 +246,16 @@ pub fn backup(
|
|||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
let _drive_lock = drive_lock; // keep lock guard
|
let _drive_lock = drive_lock; // keep lock guard
|
||||||
set_tape_device_state(&drive, &worker.upid().to_string())?;
|
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||||
backup_worker(
|
backup_worker(
|
||||||
&worker,
|
&worker,
|
||||||
datastore,
|
datastore,
|
||||||
&drive,
|
|
||||||
&pool_config,
|
&pool_config,
|
||||||
eject_media,
|
&setup,
|
||||||
export_media_set,
|
|
||||||
latest_only,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// ignore errors
|
// ignore errors
|
||||||
let _ = set_tape_device_state(&drive, "");
|
let _ = set_tape_device_state(&setup.drive, "");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
)?;
|
)?;
|
||||||
@ -303,11 +266,8 @@ pub fn backup(
|
|||||||
fn backup_worker(
|
fn backup_worker(
|
||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
drive: &str,
|
|
||||||
pool_config: &MediaPoolConfig,
|
pool_config: &MediaPoolConfig,
|
||||||
eject_media: bool,
|
setup: &TapeBackupJobSetup,
|
||||||
export_media_set: bool,
|
|
||||||
latest_only: bool,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
@ -315,20 +275,22 @@ fn backup_worker(
|
|||||||
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
|
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
|
||||||
|
|
||||||
task_log!(worker, "update media online status");
|
task_log!(worker, "update media online status");
|
||||||
let changer_name = update_media_online_status(drive)?;
|
let changer_name = update_media_online_status(&setup.drive)?;
|
||||||
|
|
||||||
let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
|
let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
|
||||||
|
|
||||||
let mut pool_writer = PoolWriter::new(pool, drive)?;
|
let mut pool_writer = PoolWriter::new(pool, &setup.drive)?;
|
||||||
|
|
||||||
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
||||||
|
|
||||||
group_list.sort_unstable();
|
group_list.sort_unstable();
|
||||||
|
|
||||||
|
let latest_only = setup.latest_only.unwrap_or(false);
|
||||||
|
|
||||||
if latest_only {
|
if latest_only {
|
||||||
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
||||||
}
|
}
|
||||||
|
|
||||||
for group in group_list {
|
for group in group_list {
|
||||||
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
||||||
|
|
||||||
@ -355,9 +317,9 @@ fn backup_worker(
|
|||||||
|
|
||||||
pool_writer.commit()?;
|
pool_writer.commit()?;
|
||||||
|
|
||||||
if export_media_set {
|
if setup.export_media_set.unwrap_or(false) {
|
||||||
pool_writer.export_media_set(worker)?;
|
pool_writer.export_media_set(worker)?;
|
||||||
} else if eject_media {
|
} else if setup.eject_media.unwrap_or(false) {
|
||||||
pool_writer.eject_media(worker)?;
|
pool_writer.eject_media(worker)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,7 +6,6 @@ use proxmox::{
|
|||||||
schema::{
|
schema::{
|
||||||
Schema,
|
Schema,
|
||||||
ObjectSchemaType,
|
ObjectSchemaType,
|
||||||
SchemaPropertyEntry,
|
|
||||||
ApiStringFormat,
|
ApiStringFormat,
|
||||||
},
|
},
|
||||||
router::{
|
router::{
|
||||||
|
@ -586,7 +586,7 @@ async fn schedule_tape_backup_jobs() {
|
|||||||
Ok(job) => job,
|
Ok(job) => job,
|
||||||
Err(_) => continue, // could not get lock
|
Err(_) => continue, // could not get lock
|
||||||
};
|
};
|
||||||
if let Err(err) = do_tape_backup_job(job, job_config, &auth_id, Some(event_str)) {
|
if let Err(err) = do_tape_backup_job(job, job_config.setup, &auth_id, Some(event_str)) {
|
||||||
eprintln!("unable to start tape bvackup job {} - {}", &job_id, err);
|
eprintln!("unable to start tape bvackup job {} - {}", &job_id, err);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -38,6 +38,7 @@ use proxmox_backup::{
|
|||||||
datastore::complete_datastore_name,
|
datastore::complete_datastore_name,
|
||||||
drive::complete_drive_name,
|
drive::complete_drive_name,
|
||||||
media_pool::complete_pool_name,
|
media_pool::complete_pool_name,
|
||||||
|
tape_job::TapeBackupJobSetup,
|
||||||
},
|
},
|
||||||
tape::{
|
tape::{
|
||||||
drive::{
|
drive::{
|
||||||
@ -790,27 +791,11 @@ async fn clean_drive(mut param: Value) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
store: {
|
setup: {
|
||||||
schema: DATASTORE_SCHEMA,
|
type: TapeBackupJobSetup,
|
||||||
},
|
flatten: true,
|
||||||
pool: {
|
|
||||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"eject-media": {
|
|
||||||
description: "Eject media upon job completion.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"export-media-set": {
|
|
||||||
description: "Export media set upon job completion.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
},
|
||||||
"output-format": {
|
"output-format": {
|
||||||
schema: OUTPUT_FORMAT,
|
schema: OUTPUT_FORMAT,
|
||||||
@ -926,6 +911,7 @@ fn main() {
|
|||||||
"backup",
|
"backup",
|
||||||
CliCommand::new(&API_METHOD_BACKUP)
|
CliCommand::new(&API_METHOD_BACKUP)
|
||||||
.arg_param(&["store", "pool"])
|
.arg_param(&["store", "pool"])
|
||||||
|
.completion_cb("drive", complete_drive_name)
|
||||||
.completion_cb("store", complete_datastore_name)
|
.completion_cb("store", complete_datastore_name)
|
||||||
.completion_cb("pool", complete_pool_name)
|
.completion_cb("pool", complete_pool_name)
|
||||||
)
|
)
|
||||||
|
@ -31,9 +31,6 @@ lazy_static! {
|
|||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
id: {
|
|
||||||
schema: JOB_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
store: {
|
store: {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: DATASTORE_SCHEMA,
|
||||||
},
|
},
|
||||||
@ -58,6 +55,31 @@ lazy_static! {
|
|||||||
type: bool,
|
type: bool,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
#[derive(Updater,Serialize,Deserialize,Clone)]
|
||||||
|
/// Tape Backup Job Setup
|
||||||
|
pub struct TapeBackupJobSetup {
|
||||||
|
pub store: String,
|
||||||
|
pub pool: String,
|
||||||
|
pub drive: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub eject_media: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub export_media_set: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub latest_only: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
setup: {
|
||||||
|
type: TapeBackupJobSetup,
|
||||||
|
},
|
||||||
comment: {
|
comment: {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
@ -74,15 +96,8 @@ lazy_static! {
|
|||||||
pub struct TapeBackupJobConfig {
|
pub struct TapeBackupJobConfig {
|
||||||
#[updater(fixed)]
|
#[updater(fixed)]
|
||||||
pub id: String,
|
pub id: String,
|
||||||
pub store: String,
|
#[serde(flatten)]
|
||||||
pub pool: String,
|
pub setup: TapeBackupJobSetup,
|
||||||
pub drive: String,
|
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
|
||||||
pub eject_media: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
|
||||||
pub export_media_set: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
|
||||||
pub latest_only: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
@ -111,7 +126,7 @@ pub struct TapeBackupJobStatus {
|
|||||||
|
|
||||||
fn init() -> SectionConfig {
|
fn init() -> SectionConfig {
|
||||||
let obj_schema = match TapeBackupJobConfig::API_SCHEMA {
|
let obj_schema = match TapeBackupJobConfig::API_SCHEMA {
|
||||||
Schema::Object(ref obj_schema) => obj_schema,
|
Schema::AllOf(ref allof_schema) => allof_schema,
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user