src/backup/dynamic_index.rs: Add LruCache for chunks.
In order to improve non-sequential reads of chunks as e.g. in FUSE. Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
This commit is contained in:
parent
35ddf0b419
commit
536683e73b
@ -266,6 +266,19 @@ pub struct BufferedDynamicReader<S> {
|
|||||||
buffered_chunk_idx: usize,
|
buffered_chunk_idx: usize,
|
||||||
buffered_chunk_start: u64,
|
buffered_chunk_start: u64,
|
||||||
read_offset: u64,
|
read_offset: u64,
|
||||||
|
lru_cache: crate::tools::lru_cache::LruCache<usize, (u64, u64, Vec<u8>)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ChunkCacher<'a, S> {
|
||||||
|
store: &'a mut S,
|
||||||
|
index: &'a DynamicIndexReader,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, (u64, u64, Vec<u8>)> for ChunkCacher<'a, S> {
|
||||||
|
fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, failure::Error> {
|
||||||
|
let (start, end, digest) = self.index.chunk_info(index)?;
|
||||||
|
self.store.read_chunk(&digest).and_then(|data| Ok(Some((start, end, data))))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||||
@ -279,6 +292,7 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
|||||||
buffered_chunk_idx: 0,
|
buffered_chunk_idx: 0,
|
||||||
buffered_chunk_start: 0,
|
buffered_chunk_start: 0,
|
||||||
read_offset: 0,
|
read_offset: 0,
|
||||||
|
lru_cache: crate::tools::lru_cache::LruCache::new(32),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,27 +301,29 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||||
let index = &self.index;
|
let (start, end, data) = self.lru_cache.access(
|
||||||
let (start, end, digest) = index.chunk_info(idx)?;
|
idx,
|
||||||
|
&mut ChunkCacher {
|
||||||
|
store: &mut self.store,
|
||||||
|
index: &self.index,
|
||||||
|
},
|
||||||
|
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
||||||
|
|
||||||
// fixme: avoid copy
|
if (*end - *start) != data.len() as u64 {
|
||||||
|
|
||||||
let data = self.store.read_chunk(&digest)?;
|
|
||||||
|
|
||||||
if (end - start) != data.len() as u64 {
|
|
||||||
bail!(
|
bail!(
|
||||||
"read chunk with wrong size ({} != {}",
|
"read chunk with wrong size ({} != {}",
|
||||||
(end - start),
|
(*end - *start),
|
||||||
data.len()
|
data.len()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fixme: avoid copy
|
||||||
self.read_buffer.clear();
|
self.read_buffer.clear();
|
||||||
self.read_buffer.extend_from_slice(&data);
|
self.read_buffer.extend_from_slice(&data);
|
||||||
|
|
||||||
self.buffered_chunk_idx = idx;
|
self.buffered_chunk_idx = idx;
|
||||||
|
|
||||||
self.buffered_chunk_start = start as u64;
|
self.buffered_chunk_start = *start;
|
||||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -100,6 +100,8 @@ pub struct LruCache<K, V> {
|
|||||||
_marker: PhantomData<Box<CacheNode<K, V>>>,
|
_marker: PhantomData<Box<CacheNode<K, V>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsafe impl<K, V> Send for LruCache<K, V> {}
|
||||||
|
|
||||||
impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
||||||
/// Create LRU cache instance which holds up to `capacity` nodes at once.
|
/// Create LRU cache instance which holds up to `capacity` nodes at once.
|
||||||
pub fn new(capacity: usize) -> Self {
|
pub fn new(capacity: usize) -> Self {
|
||||||
|
Loading…
Reference in New Issue
Block a user