src/api2/admin/datastore.rs: implement API to return last GC status

This commit is contained in:
Dietmar Maurer 2019-04-11 12:04:25 +02:00
parent 8d76e8b773
commit f2b99c34f7
3 changed files with 21 additions and 4 deletions

View File

@ -323,10 +323,13 @@ fn garbage_collection_status(
let store = param["store"].as_str().unwrap(); let store = param["store"].as_str().unwrap();
let datastore = DataStore::lookup_datastore(&store)?;
println!("Garbage collection status on store {}", store); println!("Garbage collection status on store {}", store);
Ok(json!(null)) let status = datastore.last_gc_status();
Ok(serde_json::to_value(&status)?)
} }
pub fn api_method_garbage_collection_status() -> ApiMethod { pub fn api_method_garbage_collection_status() -> ApiMethod {
@ -391,7 +394,7 @@ pub fn router() -> Router {
{"subdir": "gc" }, {"subdir": "gc" },
{"subdir": "groups" }, {"subdir": "groups" },
{"subdir": "snapshots" }, {"subdir": "snapshots" },
{"subdir": "status" }, //{"subdir": "status" },
{"subdir": "prune" }, {"subdir": "prune" },
])), ])),
ObjectSchema::new("Directory index.") ObjectSchema::new("Directory index.")

View File

@ -4,12 +4,15 @@ use std::path::{Path, PathBuf};
use std::io::{Read, Write}; use std::io::{Read, Write};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use serde_derive::Serialize;
use openssl::sha; use openssl::sha;
use crate::tools; use crate::tools;
#[derive(Clone, Serialize)]
pub struct GarbageCollectionStatus { pub struct GarbageCollectionStatus {
pub upid: Option<String>,
pub used_bytes: usize, pub used_bytes: usize,
pub used_chunks: usize, pub used_chunks: usize,
pub disk_bytes: usize, pub disk_bytes: usize,
@ -19,6 +22,7 @@ pub struct GarbageCollectionStatus {
impl Default for GarbageCollectionStatus { impl Default for GarbageCollectionStatus {
fn default() -> Self { fn default() -> Self {
GarbageCollectionStatus { GarbageCollectionStatus {
upid: None,
used_bytes: 0, used_bytes: 0,
used_chunks: 0, used_chunks: 0,
disk_bytes: 0, disk_bytes: 0,

View File

@ -26,6 +26,7 @@ lazy_static!{
pub struct DataStore { pub struct DataStore {
chunk_store: Arc<ChunkStore>, chunk_store: Arc<ChunkStore>,
gc_mutex: Mutex<bool>, gc_mutex: Mutex<bool>,
last_gc_status: Mutex<GarbageCollectionStatus>,
} }
impl DataStore { impl DataStore {
@ -65,9 +66,12 @@ impl DataStore {
let chunk_store = ChunkStore::open(store_name, path)?; let chunk_store = ChunkStore::open(store_name, path)?;
let gc_status = GarbageCollectionStatus::default();
Ok(Self { Ok(Self {
chunk_store: Arc::new(chunk_store), chunk_store: Arc::new(chunk_store),
gc_mutex: Mutex::new(false), gc_mutex: Mutex::new(false),
last_gc_status: Mutex::new(gc_status),
}) })
} }
@ -225,7 +229,11 @@ impl DataStore {
} }
Ok(()) Ok(())
} }
pub fn last_gc_status(&self) -> GarbageCollectionStatus {
self.last_gc_status.lock().unwrap().clone()
}
pub fn garbage_collection(&self, worker: Arc<WorkerTask>) -> Result<(), Error> { pub fn garbage_collection(&self, worker: Arc<WorkerTask>) -> Result<(), Error> {
@ -236,7 +244,7 @@ impl DataStore {
let oldest_writer = self.chunk_store.oldest_writer(); let oldest_writer = self.chunk_store.oldest_writer();
let mut gc_status = GarbageCollectionStatus::default(); let mut gc_status = GarbageCollectionStatus::default();
gc_status.used_bytes = 0; gc_status.upid = Some(worker.to_string());
worker.log("Start GC phase1 (mark chunks)"); worker.log("Start GC phase1 (mark chunks)");
@ -250,6 +258,8 @@ impl DataStore {
worker.log(&format!("Disk bytes: {}", gc_status.disk_bytes)); worker.log(&format!("Disk bytes: {}", gc_status.disk_bytes));
worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks)); worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks));
*self.last_gc_status.lock().unwrap() = gc_status;
} else { } else {
bail!("Start GC failed - (already running/locked)"); bail!("Start GC failed - (already running/locked)");
} }