diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index f0f66f47..b29ae0d7 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -323,10 +323,13 @@ fn garbage_collection_status( let store = param["store"].as_str().unwrap(); + let datastore = DataStore::lookup_datastore(&store)?; + println!("Garbage collection status on store {}", store); - Ok(json!(null)) + let status = datastore.last_gc_status(); + Ok(serde_json::to_value(&status)?) } pub fn api_method_garbage_collection_status() -> ApiMethod { @@ -391,7 +394,7 @@ pub fn router() -> Router { {"subdir": "gc" }, {"subdir": "groups" }, {"subdir": "snapshots" }, - {"subdir": "status" }, + //{"subdir": "status" }, {"subdir": "prune" }, ])), ObjectSchema::new("Directory index.") diff --git a/src/backup/chunk_store.rs b/src/backup/chunk_store.rs index 03c43198..33a50dde 100644 --- a/src/backup/chunk_store.rs +++ b/src/backup/chunk_store.rs @@ -4,12 +4,15 @@ use std::path::{Path, PathBuf}; use std::io::{Read, Write}; use std::sync::{Arc, Mutex}; use std::os::unix::io::AsRawFd; +use serde_derive::Serialize; use openssl::sha; use crate::tools; +#[derive(Clone, Serialize)] pub struct GarbageCollectionStatus { + pub upid: Option, pub used_bytes: usize, pub used_chunks: usize, pub disk_bytes: usize, @@ -19,6 +22,7 @@ pub struct GarbageCollectionStatus { impl Default for GarbageCollectionStatus { fn default() -> Self { GarbageCollectionStatus { + upid: None, used_bytes: 0, used_chunks: 0, disk_bytes: 0, diff --git a/src/backup/datastore.rs b/src/backup/datastore.rs index fa7126c6..b24ae412 100644 --- a/src/backup/datastore.rs +++ b/src/backup/datastore.rs @@ -26,6 +26,7 @@ lazy_static!{ pub struct DataStore { chunk_store: Arc, gc_mutex: Mutex, + last_gc_status: Mutex, } impl DataStore { @@ -65,9 +66,12 @@ impl DataStore { let chunk_store = ChunkStore::open(store_name, path)?; + let gc_status = GarbageCollectionStatus::default(); + Ok(Self { chunk_store: Arc::new(chunk_store), gc_mutex: Mutex::new(false), + last_gc_status: Mutex::new(gc_status), }) } @@ -225,7 +229,11 @@ impl DataStore { } Ok(()) - } + } + + pub fn last_gc_status(&self) -> GarbageCollectionStatus { + self.last_gc_status.lock().unwrap().clone() + } pub fn garbage_collection(&self, worker: Arc) -> Result<(), Error> { @@ -236,7 +244,7 @@ impl DataStore { let oldest_writer = self.chunk_store.oldest_writer(); let mut gc_status = GarbageCollectionStatus::default(); - gc_status.used_bytes = 0; + gc_status.upid = Some(worker.to_string()); worker.log("Start GC phase1 (mark chunks)"); @@ -250,6 +258,8 @@ impl DataStore { worker.log(&format!("Disk bytes: {}", gc_status.disk_bytes)); worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks)); + *self.last_gc_status.lock().unwrap() = gc_status; + } else { bail!("Start GC failed - (already running/locked)"); }