floresta_chain/pruned_utreexo/
kv_chainstore.rsuse bitcoin::consensus::deserialize;
use bitcoin::consensus::serialize;
use bitcoin::BlockHash;
use floresta_common::prelude::*;
use kv::Batch;
use kv::Bucket;
use kv::Config;
use kv::Integer;
use kv::Store;
use spin::RwLock;
use super::chain_state::BestChain;
use super::ChainStore;
use crate::DiskBlockHeader;
pub struct KvChainStore<'a> {
_store: Store,
headers: Bucket<'a, Vec<u8>, Vec<u8>>,
index: Bucket<'a, Integer, Vec<u8>>,
meta: Bucket<'a, &'a str, Vec<u8>>,
roots: Bucket<'a, &'a str, Vec<u8>>,
headers_cache: RwLock<HashMap<BlockHash, DiskBlockHeader>>,
index_cache: RwLock<HashMap<u32, BlockHash>>,
}
impl<'a> KvChainStore<'a> {
pub fn new(datadir: String) -> Result<KvChainStore<'a>, kv::Error> {
let cfg = Config::new(datadir + "/chain_data").cache_capacity(100_000_000);
let store = Store::new(cfg)?;
Ok(KvChainStore {
headers: store.bucket(Some("headers"))?,
index: store.bucket(Some("index"))?,
roots: store.bucket(Some("roots"))?,
meta: store.bucket(None)?,
_store: store,
headers_cache: RwLock::new(HashMap::new()),
index_cache: RwLock::new(HashMap::new()),
})
}
}
impl ChainStore for KvChainStore<'_> {
type Error = kv::Error;
fn check_integrity(&self) -> Result<(), Self::Error> {
Ok(())
}
fn load_roots_for_block(&mut self, height: u32) -> Result<Option<Vec<u8>>, Self::Error> {
let key = format!("roots_{height}");
if let Some(roots) = self.roots.get(&key.as_str())? {
return Ok(Some(roots));
}
Ok(None)
}
fn save_roots_for_block(&mut self, roots: Vec<u8>, height: u32) -> Result<(), Self::Error> {
let key = format!("roots_{height}");
self.roots.set(&key.as_str(), &roots)?;
Ok(())
}
fn load_height(&self) -> Result<Option<BestChain>, Self::Error> {
if let Some(b) = self.meta.get(&"height")? {
let height = deserialize(&b).expect("infallible: came from `serialize(height)`");
return Ok(Some(height));
}
Ok(None)
}
fn save_height(&mut self, height: &BestChain) -> Result<(), Self::Error> {
let height = serialize(height);
self.meta.set(&"height", &height)?;
Ok(())
}
fn get_header(&self, block_hash: &BlockHash) -> Result<Option<DiskBlockHeader>, Self::Error> {
match self.headers_cache.read().get(block_hash) {
Some(header) => Ok(Some(*header)),
None => {
let block_hash = serialize(&block_hash);
Ok(self
.headers
.get(&block_hash)?
.and_then(|b| deserialize(&b).ok()))
}
}
}
fn get_header_by_height(&self, height: u32) -> Result<Option<DiskBlockHeader>, Self::Error> {
let hash = self.get_block_hash(height)?;
match hash {
Some(hash) => self.get_header(&hash),
None => Ok(None),
}
}
fn flush(&mut self) -> Result<(), Self::Error> {
let mut batch = Batch::new();
for header in self.headers_cache.read().iter() {
let ser_header = serialize(header.1);
let block_hash = serialize(&header.1.block_hash());
batch.set(&block_hash, &ser_header)?;
}
self.headers.batch(batch)?;
self.headers_cache.write().clear();
let mut batch = Batch::new();
for (height, hash) in self.index_cache.read().iter() {
let ser_hash = serialize(hash);
batch.set(&Integer::from(*height), &ser_hash)?;
}
self.index.batch(batch)?;
self.index_cache.write().clear();
self.headers.flush()?;
self.index.flush()?;
self.roots.flush()?;
self.meta.flush()?;
Ok(())
}
fn save_header(&mut self, header: &DiskBlockHeader) -> Result<(), Self::Error> {
self.headers_cache
.write()
.insert(header.block_hash(), *header);
Ok(())
}
fn get_block_hash(&self, height: u32) -> Result<Option<BlockHash>, Self::Error> {
match self.index_cache.read().get(&height).cloned() {
Some(hash) => Ok(Some(hash)),
None => Ok(self
.index
.get(&Integer::from(height))?
.and_then(|b| deserialize(&b).ok())),
}
}
fn update_block_index(&mut self, height: u32, hash: BlockHash) -> Result<(), Self::Error> {
self.index_cache.write().insert(height, hash);
Ok(())
}
}