2019-05-09 16:01:24 +00:00
|
|
|
use failure::*;
|
2019-05-10 08:25:40 +00:00
|
|
|
use std::sync::{Arc, Mutex};
|
2019-05-08 10:41:58 +00:00
|
|
|
use std::collections::HashMap;
|
|
|
|
|
|
|
|
use serde_json::Value;
|
|
|
|
|
|
|
|
use crate::api_schema::router::{RpcEnvironment, RpcEnvironmentType};
|
|
|
|
use crate::server::WorkerTask;
|
2019-05-09 11:06:09 +00:00
|
|
|
use crate::backup::*;
|
2019-05-09 16:01:24 +00:00
|
|
|
use crate::server::formatter::*;
|
|
|
|
use hyper::{Body, Response};
|
2019-05-08 10:41:58 +00:00
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
struct UploadStatistic {
|
|
|
|
count: u64,
|
|
|
|
size: u64,
|
|
|
|
compressed_size: u64,
|
|
|
|
duplicates: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl UploadStatistic {
|
|
|
|
fn new() -> Self {
|
|
|
|
Self {
|
|
|
|
count: 0,
|
|
|
|
size: 0,
|
|
|
|
compressed_size: 0,
|
|
|
|
duplicates: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-05-23 06:50:36 +00:00
|
|
|
|
|
|
|
struct DynamicWriterState {
|
|
|
|
name: String,
|
|
|
|
index: DynamicIndexWriter,
|
|
|
|
offset: u64,
|
|
|
|
chunk_count: u64,
|
2019-05-30 07:20:32 +00:00
|
|
|
upload_stat: UploadStatistic,
|
2019-05-23 06:50:36 +00:00
|
|
|
}
|
|
|
|
|
2019-05-28 04:18:55 +00:00
|
|
|
struct FixedWriterState {
|
|
|
|
name: String,
|
|
|
|
index: FixedIndexWriter,
|
|
|
|
size: usize,
|
|
|
|
chunk_size: u32,
|
|
|
|
chunk_count: u64,
|
2019-05-30 07:20:32 +00:00
|
|
|
upload_stat: UploadStatistic,
|
2019-05-28 04:18:55 +00:00
|
|
|
}
|
|
|
|
|
2019-05-10 08:25:40 +00:00
|
|
|
struct SharedBackupState {
|
2019-05-15 10:58:55 +00:00
|
|
|
finished: bool,
|
2019-05-10 08:25:40 +00:00
|
|
|
uid_counter: usize,
|
2019-05-29 08:38:57 +00:00
|
|
|
file_counter: usize, // sucessfully uploaded files
|
2019-05-23 06:50:36 +00:00
|
|
|
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
2019-05-28 04:18:55 +00:00
|
|
|
fixed_writers: HashMap<usize, FixedWriterState>,
|
2019-05-20 16:05:10 +00:00
|
|
|
known_chunks: HashMap<[u8;32], u32>,
|
2019-05-10 08:25:40 +00:00
|
|
|
}
|
|
|
|
|
2019-05-15 10:58:55 +00:00
|
|
|
impl SharedBackupState {
|
|
|
|
|
|
|
|
// Raise error if finished flag is set
|
|
|
|
fn ensure_unfinished(&self) -> Result<(), Error> {
|
|
|
|
if self.finished {
|
|
|
|
bail!("backup already marked as finished.");
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get an unique integer ID
|
|
|
|
pub fn next_uid(&mut self) -> usize {
|
|
|
|
self.uid_counter += 1;
|
|
|
|
self.uid_counter
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-05-08 10:41:58 +00:00
|
|
|
/// `RpcEnvironmet` implementation for backup service
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct BackupEnvironment {
|
|
|
|
env_type: RpcEnvironmentType,
|
|
|
|
result_attributes: HashMap<String, Value>,
|
|
|
|
user: String,
|
2019-05-29 07:35:21 +00:00
|
|
|
pub debug: bool,
|
2019-05-09 16:01:24 +00:00
|
|
|
pub formatter: &'static OutputFormatter,
|
2019-05-09 11:06:09 +00:00
|
|
|
pub worker: Arc<WorkerTask>,
|
|
|
|
pub datastore: Arc<DataStore>,
|
2019-05-10 08:25:40 +00:00
|
|
|
pub backup_dir: BackupDir,
|
2019-05-11 09:21:13 +00:00
|
|
|
pub last_backup: Option<BackupInfo>,
|
2019-05-10 08:25:40 +00:00
|
|
|
state: Arc<Mutex<SharedBackupState>>
|
2019-05-08 10:41:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl BackupEnvironment {
|
2019-05-10 08:25:40 +00:00
|
|
|
pub fn new(
|
|
|
|
env_type: RpcEnvironmentType,
|
|
|
|
user: String,
|
|
|
|
worker: Arc<WorkerTask>,
|
|
|
|
datastore: Arc<DataStore>,
|
|
|
|
backup_dir: BackupDir,
|
|
|
|
) -> Self {
|
|
|
|
|
|
|
|
let state = SharedBackupState {
|
2019-05-15 10:58:55 +00:00
|
|
|
finished: false,
|
2019-05-10 08:25:40 +00:00
|
|
|
uid_counter: 0,
|
2019-05-29 08:38:57 +00:00
|
|
|
file_counter: 0,
|
2019-05-10 08:25:40 +00:00
|
|
|
dynamic_writers: HashMap::new(),
|
2019-05-28 04:18:55 +00:00
|
|
|
fixed_writers: HashMap::new(),
|
2019-05-20 16:05:10 +00:00
|
|
|
known_chunks: HashMap::new(),
|
2019-05-10 08:25:40 +00:00
|
|
|
};
|
|
|
|
|
2019-05-08 10:41:58 +00:00
|
|
|
Self {
|
|
|
|
result_attributes: HashMap::new(),
|
|
|
|
env_type,
|
|
|
|
user,
|
|
|
|
worker,
|
2019-05-09 11:06:09 +00:00
|
|
|
datastore,
|
2019-05-29 07:35:21 +00:00
|
|
|
debug: false,
|
2019-05-09 16:01:24 +00:00
|
|
|
formatter: &JSON_FORMATTER,
|
2019-05-10 08:25:40 +00:00
|
|
|
backup_dir,
|
2019-05-11 09:21:13 +00:00
|
|
|
last_backup: None,
|
2019-05-10 08:25:40 +00:00
|
|
|
state: Arc::new(Mutex::new(state)),
|
2019-05-08 10:41:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
/// Register a Chunk with associated length.
|
|
|
|
///
|
|
|
|
/// We do not fully trust clients, so a client may only use registered
|
|
|
|
/// chunks. Please use this method to register chunks from previous backups.
|
2019-05-20 16:05:10 +00:00
|
|
|
pub fn register_chunk(&self, digest: [u8; 32], length: u32) -> Result<(), Error> {
|
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
|
|
|
state.ensure_unfinished()?;
|
|
|
|
|
|
|
|
state.known_chunks.insert(digest, length);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
/// Register fixed length chunks after upload.
|
|
|
|
///
|
|
|
|
/// Like `register_chunk()`, but additionally record statistics for
|
|
|
|
/// the fixed index writer.
|
2019-05-30 06:10:06 +00:00
|
|
|
pub fn register_fixed_chunk(
|
|
|
|
&self,
|
|
|
|
wid: usize,
|
|
|
|
digest: [u8; 32],
|
|
|
|
size: u32,
|
|
|
|
compressed_size: u32,
|
|
|
|
is_duplicate: bool,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
|
|
|
state.ensure_unfinished()?;
|
|
|
|
|
|
|
|
let mut data = match state.fixed_writers.get_mut(&wid) {
|
|
|
|
Some(data) => data,
|
|
|
|
None => bail!("fixed writer '{}' not registered", wid),
|
|
|
|
};
|
|
|
|
|
|
|
|
if size != data.chunk_size {
|
|
|
|
bail!("fixed writer '{}' - got unexpected chunk size ({} != {}", data.name, size, data.chunk_size);
|
|
|
|
}
|
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
// record statistics
|
|
|
|
data.upload_stat.count += 1;
|
|
|
|
data.upload_stat.size += size as u64;
|
|
|
|
data.upload_stat.compressed_size += compressed_size as u64;
|
|
|
|
if is_duplicate { data.upload_stat.duplicates += 1; }
|
|
|
|
|
|
|
|
// register chunk
|
2019-05-30 06:10:06 +00:00
|
|
|
state.known_chunks.insert(digest, size);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
/// Register dynamic length chunks after upload.
|
|
|
|
///
|
|
|
|
/// Like `register_chunk()`, but additionally record statistics for
|
|
|
|
/// the dynamic index writer.
|
2019-05-30 06:10:06 +00:00
|
|
|
pub fn register_dynamic_chunk(
|
|
|
|
&self,
|
|
|
|
wid: usize,
|
|
|
|
digest: [u8; 32],
|
|
|
|
size: u32,
|
|
|
|
compressed_size: u32,
|
|
|
|
is_duplicate: bool,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
|
|
|
state.ensure_unfinished()?;
|
|
|
|
|
|
|
|
let mut data = match state.dynamic_writers.get_mut(&wid) {
|
|
|
|
Some(data) => data,
|
|
|
|
None => bail!("dynamic writer '{}' not registered", wid),
|
|
|
|
};
|
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
// record statistics
|
|
|
|
data.upload_stat.count += 1;
|
|
|
|
data.upload_stat.size += size as u64;
|
|
|
|
data.upload_stat.compressed_size += compressed_size as u64;
|
|
|
|
if is_duplicate { data.upload_stat.duplicates += 1; }
|
|
|
|
|
|
|
|
// register chunk
|
2019-05-30 06:10:06 +00:00
|
|
|
state.known_chunks.insert(digest, size);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-05-20 16:05:10 +00:00
|
|
|
pub fn lookup_chunk(&self, digest: &[u8; 32]) -> Option<u32> {
|
|
|
|
let state = self.state.lock().unwrap();
|
|
|
|
|
|
|
|
match state.known_chunks.get(digest) {
|
|
|
|
Some(len) => Some(*len),
|
|
|
|
None => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-15 10:58:55 +00:00
|
|
|
/// Store the writer with an unique ID
|
2019-05-23 06:50:36 +00:00
|
|
|
pub fn register_dynamic_writer(&self, index: DynamicIndexWriter, name: String) -> Result<usize, Error> {
|
2019-05-10 08:25:40 +00:00
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
2019-05-15 10:58:55 +00:00
|
|
|
state.ensure_unfinished()?;
|
|
|
|
|
|
|
|
let uid = state.next_uid();
|
2019-05-10 08:25:40 +00:00
|
|
|
|
2019-05-23 06:50:36 +00:00
|
|
|
state.dynamic_writers.insert(uid, DynamicWriterState {
|
2019-05-30 07:20:32 +00:00
|
|
|
index, name, offset: 0, chunk_count: 0, upload_stat: UploadStatistic::new(),
|
2019-05-23 06:50:36 +00:00
|
|
|
});
|
2019-05-15 10:58:55 +00:00
|
|
|
|
|
|
|
Ok(uid)
|
2019-05-10 08:25:40 +00:00
|
|
|
}
|
|
|
|
|
2019-05-28 04:18:55 +00:00
|
|
|
/// Store the writer with an unique ID
|
|
|
|
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32) -> Result<usize, Error> {
|
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
|
|
|
state.ensure_unfinished()?;
|
|
|
|
|
|
|
|
let uid = state.next_uid();
|
|
|
|
|
|
|
|
state.fixed_writers.insert(uid, FixedWriterState {
|
2019-05-30 07:20:32 +00:00
|
|
|
index, name, chunk_count: 0, size, chunk_size, upload_stat: UploadStatistic::new(),
|
2019-05-28 04:18:55 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
Ok(uid)
|
|
|
|
}
|
|
|
|
|
2019-05-10 08:25:40 +00:00
|
|
|
/// Append chunk to dynamic writer
|
2019-05-24 08:05:22 +00:00
|
|
|
pub fn dynamic_writer_append_chunk(&self, wid: usize, offset: u64, size: u32, digest: &[u8; 32]) -> Result<(), Error> {
|
2019-05-10 08:25:40 +00:00
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
2019-05-15 10:58:55 +00:00
|
|
|
state.ensure_unfinished()?;
|
|
|
|
|
2019-05-10 08:25:40 +00:00
|
|
|
let mut data = match state.dynamic_writers.get_mut(&wid) {
|
|
|
|
Some(data) => data,
|
|
|
|
None => bail!("dynamic writer '{}' not registered", wid),
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2019-05-24 08:05:22 +00:00
|
|
|
if data.offset != offset {
|
|
|
|
bail!("dynamic writer '{}' append chunk failed - got strange chunk offset ({} != {})",
|
|
|
|
data.name, data.offset, offset);
|
|
|
|
}
|
|
|
|
|
2019-05-28 07:01:01 +00:00
|
|
|
data.offset += size as u64;
|
|
|
|
data.chunk_count += 1;
|
|
|
|
|
2019-05-23 06:50:36 +00:00
|
|
|
data.index.add_chunk(data.offset, digest)?;
|
2019-05-10 08:25:40 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-05-28 04:18:55 +00:00
|
|
|
/// Append chunk to fixed writer
|
|
|
|
pub fn fixed_writer_append_chunk(&self, wid: usize, offset: u64, size: u32, digest: &[u8; 32]) -> Result<(), Error> {
|
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
|
|
|
state.ensure_unfinished()?;
|
|
|
|
|
|
|
|
let mut data = match state.fixed_writers.get_mut(&wid) {
|
|
|
|
Some(data) => data,
|
|
|
|
None => bail!("fixed writer '{}' not registered", wid),
|
|
|
|
};
|
|
|
|
|
|
|
|
data.chunk_count += 1;
|
|
|
|
|
|
|
|
if size != data.chunk_size {
|
|
|
|
bail!("fixed writer '{}' - got unexpected chunk size ({} != {}", data.name, size, data.chunk_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
let pos = (offset as usize)/(data.chunk_size as usize);
|
|
|
|
data.index.add_digest(pos, digest)?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
fn log_upload_stat(&self, archive_name: &str, size: u64, chunk_count: u64, upload_stat: &UploadStatistic) {
|
|
|
|
self.log(format!("Upload statistics for '{}'", archive_name));
|
|
|
|
self.log(format!("Size: {}", size));
|
|
|
|
self.log(format!("Chunk count: {}", chunk_count));
|
2019-06-14 05:12:30 +00:00
|
|
|
|
|
|
|
if size == 0 || chunk_count == 0 {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size));
|
2019-06-14 05:12:30 +00:00
|
|
|
|
|
|
|
let client_side_duplicates = chunk_count - upload_stat.count;
|
|
|
|
let server_side_duplicates = upload_stat.duplicates;
|
|
|
|
|
|
|
|
if (client_side_duplicates + server_side_duplicates) > 0 {
|
|
|
|
let per = (client_side_duplicates + server_side_duplicates)*100/chunk_count;
|
|
|
|
self.log(format!("Duplicates: {}+{} ({}%)", client_side_duplicates, server_side_duplicates, per));
|
|
|
|
}
|
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
if upload_stat.size > 0 {
|
2019-06-14 05:12:30 +00:00
|
|
|
self.log(format!("Compression: {}%", (upload_stat.compressed_size*100)/upload_stat.size));
|
2019-05-30 07:20:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-15 05:58:05 +00:00
|
|
|
/// Close dynamic writer
|
2019-05-23 06:50:36 +00:00
|
|
|
pub fn dynamic_writer_close(&self, wid: usize, chunk_count: u64, size: u64) -> Result<(), Error> {
|
2019-05-15 05:58:05 +00:00
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
2019-05-15 10:58:55 +00:00
|
|
|
state.ensure_unfinished()?;
|
|
|
|
|
2019-05-15 05:58:05 +00:00
|
|
|
let mut data = match state.dynamic_writers.remove(&wid) {
|
|
|
|
Some(data) => data,
|
|
|
|
None => bail!("dynamic writer '{}' not registered", wid),
|
|
|
|
};
|
|
|
|
|
2019-05-23 06:50:36 +00:00
|
|
|
if data.chunk_count != chunk_count {
|
|
|
|
bail!("dynamic writer '{}' close failed - unexpected chunk count ({} != {})", data.name, data.chunk_count, chunk_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
if data.offset != size {
|
|
|
|
bail!("dynamic writer '{}' close failed - unexpected file size ({} != {})", data.name, data.offset, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
data.index.close()?;
|
2019-05-15 05:58:05 +00:00
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
self.log_upload_stat(&data.name, size, chunk_count, &data.upload_stat);
|
|
|
|
|
2019-05-29 08:38:57 +00:00
|
|
|
state.file_counter += 1;
|
|
|
|
|
2019-05-15 05:58:05 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-05-28 04:18:55 +00:00
|
|
|
/// Close fixed writer
|
|
|
|
pub fn fixed_writer_close(&self, wid: usize, chunk_count: u64, size: u64) -> Result<(), Error> {
|
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
|
|
|
state.ensure_unfinished()?;
|
|
|
|
|
|
|
|
let mut data = match state.fixed_writers.remove(&wid) {
|
|
|
|
Some(data) => data,
|
|
|
|
None => bail!("fixed writer '{}' not registered", wid),
|
|
|
|
};
|
|
|
|
|
|
|
|
if data.chunk_count != chunk_count {
|
2019-05-28 07:12:38 +00:00
|
|
|
bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count);
|
2019-05-28 04:18:55 +00:00
|
|
|
}
|
|
|
|
|
2019-05-28 07:12:38 +00:00
|
|
|
let expected_count = data.index.index_length();
|
|
|
|
|
|
|
|
if chunk_count != (expected_count as u64) {
|
|
|
|
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
if size != (data.size as u64) {
|
|
|
|
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
|
|
|
}
|
2019-05-28 04:18:55 +00:00
|
|
|
|
|
|
|
data.index.close()?;
|
|
|
|
|
2019-05-30 07:20:32 +00:00
|
|
|
self.log_upload_stat(&data.name, size, chunk_count, &data.upload_stat);
|
|
|
|
|
2019-05-29 08:38:57 +00:00
|
|
|
state.file_counter += 1;
|
|
|
|
|
2019-05-28 04:18:55 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-05-15 10:58:55 +00:00
|
|
|
/// Mark backup as finished
|
|
|
|
pub fn finish_backup(&self) -> Result<(), Error> {
|
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
// test if all writer are correctly closed
|
|
|
|
|
|
|
|
state.ensure_unfinished()?;
|
|
|
|
|
|
|
|
state.finished = true;
|
|
|
|
|
|
|
|
if state.dynamic_writers.len() != 0 {
|
|
|
|
bail!("found open index writer - unable to finish backup");
|
|
|
|
}
|
|
|
|
|
2019-05-29 08:38:57 +00:00
|
|
|
if state.file_counter == 0 {
|
|
|
|
bail!("backup does not contain valid files (file count == 0)");
|
|
|
|
}
|
|
|
|
|
2019-05-15 10:58:55 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-05-08 10:41:58 +00:00
|
|
|
pub fn log<S: AsRef<str>>(&self, msg: S) {
|
|
|
|
self.worker.log(msg);
|
|
|
|
}
|
2019-05-09 16:01:24 +00:00
|
|
|
|
2019-05-29 07:35:21 +00:00
|
|
|
pub fn debug<S: AsRef<str>>(&self, msg: S) {
|
|
|
|
if self.debug { self.worker.log(msg); }
|
|
|
|
}
|
|
|
|
|
2019-05-09 16:01:24 +00:00
|
|
|
pub fn format_response(&self, result: Result<Value, Error>) -> Response<Body> {
|
|
|
|
match result {
|
|
|
|
Ok(data) => (self.formatter.format_data)(data, self),
|
|
|
|
Err(err) => (self.formatter.format_error)(err),
|
|
|
|
}
|
|
|
|
}
|
2019-05-15 10:58:55 +00:00
|
|
|
|
|
|
|
/// Raise error if finished flag is not set
|
|
|
|
pub fn ensure_finished(&self) -> Result<(), Error> {
|
|
|
|
let state = self.state.lock().unwrap();
|
|
|
|
if !state.finished {
|
|
|
|
bail!("backup ended but finished flag is not set.");
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Remove complete backup
|
|
|
|
pub fn remove_backup(&self) -> Result<(), Error> {
|
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
state.finished = true;
|
|
|
|
|
|
|
|
self.datastore.remove_backup_dir(&self.backup_dir)?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2019-05-08 10:41:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl RpcEnvironment for BackupEnvironment {
|
|
|
|
|
|
|
|
fn set_result_attrib(&mut self, name: &str, value: Value) {
|
|
|
|
self.result_attributes.insert(name.into(), value);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_result_attrib(&self, name: &str) -> Option<&Value> {
|
|
|
|
self.result_attributes.get(name)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn env_type(&self) -> RpcEnvironmentType {
|
|
|
|
self.env_type
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_user(&mut self, _user: Option<String>) {
|
|
|
|
panic!("unable to change user");
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_user(&self) -> Option<String> {
|
|
|
|
Some(self.user.clone())
|
|
|
|
}
|
|
|
|
}
|
2019-05-09 11:06:09 +00:00
|
|
|
|
2019-06-07 11:10:56 +00:00
|
|
|
impl AsRef<BackupEnvironment> for dyn RpcEnvironment {
|
2019-05-09 11:06:09 +00:00
|
|
|
fn as_ref(&self) -> &BackupEnvironment {
|
|
|
|
self.as_any().downcast_ref::<BackupEnvironment>().unwrap()
|
|
|
|
}
|
|
|
|
}
|
2019-06-07 11:10:56 +00:00
|
|
|
|
|
|
|
impl AsRef<BackupEnvironment> for Box<dyn RpcEnvironment> {
|
2019-05-09 16:01:24 +00:00
|
|
|
fn as_ref(&self) -> &BackupEnvironment {
|
|
|
|
self.as_any().downcast_ref::<BackupEnvironment>().unwrap()
|
|
|
|
}
|
|
|
|
}
|