synor/crates/synor-storage/src/stores.rs
Gulshan Yadav 5c643af64c fix: resolve all clippy warnings for CI
Fix all Rust clippy warnings that were causing CI failures when built
with RUSTFLAGS=-Dwarnings. Changes include:

- Replace derivable_impls with derive macros for BlockBody, Network, etc.
- Use div_ceil() instead of manual implementation
- Fix should_implement_trait by renaming from_str to parse
- Add type aliases for type_complexity warnings
- Use or_default(), is_some_and(), is_multiple_of() where appropriate
- Remove needless borrows and redundant closures
- Fix manual_strip with strip_prefix()
- Add allow attributes for intentional patterns (too_many_arguments,
  needless_range_loop in cryptographic code, assertions_on_constants)
- Remove unused imports, mut bindings, and dead code in tests
2026-01-08 05:58:22 +05:30

851 lines
26 KiB
Rust

//! Individual store implementations for Synor blockchain data.
//!
//! Each store provides typed access to a specific category of blockchain data.
use crate::{cf, db::Database, keys, DbError};
use borsh::{BorshDeserialize, BorshSerialize};
use std::sync::Arc;
use synor_types::{BlockHeader, BlockId, Hash256, Transaction, TransactionId};
/// Trait for stores that support batch operations.
pub trait BatchStore {
/// Returns the column family name.
fn cf_name(&self) -> &'static str;
}
/// Store for block headers.
pub struct HeaderStore {
db: Arc<Database>,
}
impl HeaderStore {
/// Creates a new header store.
pub fn new(db: Arc<Database>) -> Self {
HeaderStore { db }
}
/// Gets a header by block hash.
pub fn get(&self, hash: &Hash256) -> Result<Option<BlockHeader>, DbError> {
match self.db.get(cf::HEADERS, hash.as_bytes())? {
Some(bytes) => {
let header = BlockHeader::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(header))
}
None => Ok(None),
}
}
/// Stores a header.
pub fn put(&self, header: &BlockHeader) -> Result<(), DbError> {
let hash = header.block_id();
let bytes = borsh::to_vec(header).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::HEADERS, hash.as_bytes(), &bytes)
}
/// Deletes a header.
pub fn delete(&self, hash: &Hash256) -> Result<(), DbError> {
self.db.delete(cf::HEADERS, hash.as_bytes())
}
/// Checks if a header exists.
pub fn exists(&self, hash: &Hash256) -> Result<bool, DbError> {
self.db.exists(cf::HEADERS, hash.as_bytes())
}
/// Gets multiple headers by hash.
pub fn multi_get(&self, hashes: &[Hash256]) -> Result<Vec<Option<BlockHeader>>, DbError> {
let keys: Vec<&[u8]> = hashes.iter().map(|h| h.as_bytes() as &[u8]).collect();
let results = self.db.multi_get(cf::HEADERS, &keys)?;
results
.into_iter()
.map(|opt| {
opt.map(|bytes| {
BlockHeader::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))
})
.transpose()
})
.collect()
}
/// Gets the header at a specific height (using height index).
pub fn get_by_height(&self, height: u64) -> Result<Option<BlockHeader>, DbError> {
let key = keys::prefixed_key(keys::HEIGHT_TO_HASH, &keys::encode_u64(height));
match self.db.get(cf::METADATA, &key)? {
Some(hash_bytes) => {
if hash_bytes.len() != 32 {
return Err(DbError::Deserialization("Invalid hash length".to_string()));
}
let mut hash_arr = [0u8; 32];
hash_arr.copy_from_slice(&hash_bytes);
let hash = Hash256::from_bytes(hash_arr);
self.get(&hash)
}
None => Ok(None),
}
}
/// Indexes a header by height.
pub fn index_by_height(&self, height: u64, hash: &Hash256) -> Result<(), DbError> {
let key = keys::prefixed_key(keys::HEIGHT_TO_HASH, &keys::encode_u64(height));
self.db.put(cf::METADATA, &key, hash.as_bytes())
}
}
impl BatchStore for HeaderStore {
fn cf_name(&self) -> &'static str {
cf::HEADERS
}
}
/// Store for full blocks.
pub struct BlockStore {
db: Arc<Database>,
}
/// Serializable block body (transactions only, header stored separately).
#[derive(Clone, BorshSerialize, BorshDeserialize)]
pub struct BlockBody {
/// Transaction IDs in the block.
pub transaction_ids: Vec<TransactionId>,
}
impl BlockStore {
/// Creates a new block store.
pub fn new(db: Arc<Database>) -> Self {
BlockStore { db }
}
/// Gets a block body by hash.
pub fn get(&self, hash: &Hash256) -> Result<Option<BlockBody>, DbError> {
match self.db.get(cf::BLOCKS, hash.as_bytes())? {
Some(bytes) => {
let body = BlockBody::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(body))
}
None => Ok(None),
}
}
/// Stores a block body.
pub fn put(&self, hash: &Hash256, body: &BlockBody) -> Result<(), DbError> {
let bytes = borsh::to_vec(body).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::BLOCKS, hash.as_bytes(), &bytes)
}
/// Deletes a block body.
pub fn delete(&self, hash: &Hash256) -> Result<(), DbError> {
self.db.delete(cf::BLOCKS, hash.as_bytes())
}
/// Checks if a block exists.
pub fn exists(&self, hash: &Hash256) -> Result<bool, DbError> {
self.db.exists(cf::BLOCKS, hash.as_bytes())
}
}
impl BatchStore for BlockStore {
fn cf_name(&self) -> &'static str {
cf::BLOCKS
}
}
/// Store for transactions.
pub struct TransactionStore {
db: Arc<Database>,
}
impl TransactionStore {
/// Creates a new transaction store.
pub fn new(db: Arc<Database>) -> Self {
TransactionStore { db }
}
/// Gets a transaction by ID.
pub fn get(&self, txid: &TransactionId) -> Result<Option<Transaction>, DbError> {
match self.db.get(cf::TRANSACTIONS, txid.as_bytes())? {
Some(bytes) => {
let tx = Transaction::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(tx))
}
None => Ok(None),
}
}
/// Stores a transaction.
pub fn put(&self, tx: &Transaction) -> Result<(), DbError> {
let txid = tx.txid();
let bytes = borsh::to_vec(tx).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::TRANSACTIONS, txid.as_bytes(), &bytes)
}
/// Deletes a transaction.
pub fn delete(&self, txid: &TransactionId) -> Result<(), DbError> {
self.db.delete(cf::TRANSACTIONS, txid.as_bytes())
}
/// Checks if a transaction exists.
pub fn exists(&self, txid: &TransactionId) -> Result<bool, DbError> {
self.db.exists(cf::TRANSACTIONS, txid.as_bytes())
}
/// Gets multiple transactions.
pub fn multi_get(&self, txids: &[TransactionId]) -> Result<Vec<Option<Transaction>>, DbError> {
let keys: Vec<&[u8]> = txids.iter().map(|t| t.as_bytes() as &[u8]).collect();
let results = self.db.multi_get(cf::TRANSACTIONS, &keys)?;
results
.into_iter()
.map(|opt| {
opt.map(|bytes| {
Transaction::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))
})
.transpose()
})
.collect()
}
}
impl BatchStore for TransactionStore {
fn cf_name(&self) -> &'static str {
cf::TRANSACTIONS
}
}
/// UTXO entry for storage.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct StoredUtxo {
/// The transaction output.
pub amount: u64,
/// Script pubkey (address hash).
pub script_pubkey: Vec<u8>,
/// Block DAA score when created.
pub block_daa_score: u64,
/// Whether this is a coinbase output.
pub is_coinbase: bool,
}
/// Store for UTXOs.
pub struct UtxoStore {
db: Arc<Database>,
}
impl UtxoStore {
/// Creates a new UTXO store.
pub fn new(db: Arc<Database>) -> Self {
UtxoStore { db }
}
/// Creates a key for a UTXO (txid + output index).
fn make_key(txid: &TransactionId, index: u32) -> Vec<u8> {
let mut key = Vec::with_capacity(36);
key.extend_from_slice(txid.as_bytes());
key.extend_from_slice(&index.to_be_bytes());
key
}
/// Gets a UTXO by outpoint.
pub fn get(&self, txid: &TransactionId, index: u32) -> Result<Option<StoredUtxo>, DbError> {
let key = Self::make_key(txid, index);
match self.db.get(cf::UTXOS, &key)? {
Some(bytes) => {
let utxo = StoredUtxo::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(utxo))
}
None => Ok(None),
}
}
/// Stores a UTXO.
pub fn put(&self, txid: &TransactionId, index: u32, utxo: &StoredUtxo) -> Result<(), DbError> {
let key = Self::make_key(txid, index);
let bytes = borsh::to_vec(utxo).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::UTXOS, &key, &bytes)
}
/// Deletes a UTXO (marks as spent).
pub fn delete(&self, txid: &TransactionId, index: u32) -> Result<(), DbError> {
let key = Self::make_key(txid, index);
self.db.delete(cf::UTXOS, &key)
}
/// Checks if a UTXO exists (is unspent).
pub fn exists(&self, txid: &TransactionId, index: u32) -> Result<bool, DbError> {
let key = Self::make_key(txid, index);
self.db.exists(cf::UTXOS, &key)
}
/// Gets all UTXOs for a transaction.
pub fn get_by_tx(&self, txid: &TransactionId) -> Result<Vec<(u32, StoredUtxo)>, DbError> {
let mut utxos = Vec::new();
for result in self.db.prefix_iter(cf::UTXOS, txid.as_bytes())? {
let (key, value) = result;
if key.len() == 36 {
let index = u32::from_be_bytes(key[32..36].try_into().unwrap());
let utxo = StoredUtxo::try_from_slice(&value)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
utxos.push((index, utxo));
}
}
Ok(utxos)
}
/// Counts total UTXOs (for statistics).
pub fn count(&self) -> Result<usize, DbError> {
let mut count = 0;
for _ in self.db.iter(cf::UTXOS)? {
count += 1;
}
Ok(count)
}
}
impl BatchStore for UtxoStore {
fn cf_name(&self) -> &'static str {
cf::UTXOS
}
}
/// Stored relations for DAG.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct StoredRelations {
/// Parent block IDs.
pub parents: Vec<BlockId>,
/// Child block IDs.
pub children: Vec<BlockId>,
}
/// Store for DAG relations.
pub struct RelationsStore {
db: Arc<Database>,
}
impl RelationsStore {
/// Creates a new relations store.
pub fn new(db: Arc<Database>) -> Self {
RelationsStore { db }
}
/// Gets relations for a block.
pub fn get(&self, block_id: &BlockId) -> Result<Option<StoredRelations>, DbError> {
match self.db.get(cf::RELATIONS, block_id.as_bytes())? {
Some(bytes) => {
let relations = StoredRelations::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(relations))
}
None => Ok(None),
}
}
/// Stores relations for a block.
pub fn put(&self, block_id: &BlockId, relations: &StoredRelations) -> Result<(), DbError> {
let bytes = borsh::to_vec(relations).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::RELATIONS, block_id.as_bytes(), &bytes)
}
/// Gets parents of a block.
pub fn get_parents(&self, block_id: &BlockId) -> Result<Vec<BlockId>, DbError> {
Ok(self.get(block_id)?.map(|r| r.parents).unwrap_or_default())
}
/// Gets children of a block.
pub fn get_children(&self, block_id: &BlockId) -> Result<Vec<BlockId>, DbError> {
Ok(self.get(block_id)?.map(|r| r.children).unwrap_or_default())
}
/// Adds a child to a block's relations.
pub fn add_child(&self, parent_id: &BlockId, child_id: BlockId) -> Result<(), DbError> {
let mut relations = self.get(parent_id)?.unwrap_or(StoredRelations {
parents: Vec::new(),
children: Vec::new(),
});
if !relations.children.contains(&child_id) {
relations.children.push(child_id);
self.put(parent_id, &relations)?;
}
Ok(())
}
}
impl BatchStore for RelationsStore {
fn cf_name(&self) -> &'static str {
cf::RELATIONS
}
}
/// Stored GHOSTDAG data.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct StoredGhostdagData {
/// Blue score of this block.
pub blue_score: u64,
/// Selected parent block ID.
pub selected_parent: BlockId,
/// Blocks in the blue merge set.
pub merge_set_blues: Vec<BlockId>,
/// Blocks in the red merge set.
pub merge_set_reds: Vec<BlockId>,
/// Blues anticone sizes for each blue in merge set.
pub blues_anticone_sizes: Vec<(BlockId, u64)>,
}
/// Store for GHOSTDAG data.
pub struct GhostdagStore {
db: Arc<Database>,
}
impl GhostdagStore {
/// Creates a new GHOSTDAG store.
pub fn new(db: Arc<Database>) -> Self {
GhostdagStore { db }
}
/// Gets GHOSTDAG data for a block.
pub fn get(&self, block_id: &BlockId) -> Result<Option<StoredGhostdagData>, DbError> {
match self.db.get(cf::GHOSTDAG, block_id.as_bytes())? {
Some(bytes) => {
let data = StoredGhostdagData::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(data))
}
None => Ok(None),
}
}
/// Stores GHOSTDAG data for a block.
pub fn put(&self, block_id: &BlockId, data: &StoredGhostdagData) -> Result<(), DbError> {
let bytes = borsh::to_vec(data).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::GHOSTDAG, block_id.as_bytes(), &bytes)
}
/// Gets the blue score of a block.
pub fn get_blue_score(&self, block_id: &BlockId) -> Result<Option<u64>, DbError> {
Ok(self.get(block_id)?.map(|d| d.blue_score))
}
/// Gets the selected parent of a block.
pub fn get_selected_parent(&self, block_id: &BlockId) -> Result<Option<BlockId>, DbError> {
Ok(self.get(block_id)?.map(|d| d.selected_parent))
}
}
impl BatchStore for GhostdagStore {
fn cf_name(&self) -> &'static str {
cf::GHOSTDAG
}
}
/// Store for chain metadata.
pub struct MetadataStore {
db: Arc<Database>,
}
/// Key constants for metadata.
impl MetadataStore {
const KEY_TIPS: &'static [u8] = b"tips";
const KEY_PRUNING_POINT: &'static [u8] = b"pruning_point";
const KEY_CHAIN_STATE: &'static [u8] = b"chain_state";
const KEY_GENESIS: &'static [u8] = b"genesis";
const KEY_VIRTUAL_STATE: &'static [u8] = b"virtual_state";
}
/// Chain state metadata.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct ChainState {
/// Highest blue score in the DAG.
pub max_blue_score: u64,
/// Total number of blocks.
pub total_blocks: u64,
/// Current DAA score.
pub daa_score: u64,
/// Current difficulty bits.
pub difficulty_bits: u32,
/// Total work (as bytes).
pub total_work: Vec<u8>,
}
impl MetadataStore {
/// Creates a new metadata store.
pub fn new(db: Arc<Database>) -> Self {
MetadataStore { db }
}
/// Gets the current DAG tips.
pub fn get_tips(&self) -> Result<Vec<BlockId>, DbError> {
match self.db.get(cf::METADATA, Self::KEY_TIPS)? {
Some(bytes) => {
let tips = Vec::<BlockId>::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(tips)
}
None => Ok(Vec::new()),
}
}
/// Sets the current DAG tips.
pub fn set_tips(&self, tips: &[BlockId]) -> Result<(), DbError> {
let bytes = borsh::to_vec(tips).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::METADATA, Self::KEY_TIPS, &bytes)
}
/// Gets the pruning point.
pub fn get_pruning_point(&self) -> Result<Option<BlockId>, DbError> {
match self.db.get(cf::METADATA, Self::KEY_PRUNING_POINT)? {
Some(bytes) => {
let point = BlockId::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(point))
}
None => Ok(None),
}
}
/// Sets the pruning point.
pub fn set_pruning_point(&self, point: &BlockId) -> Result<(), DbError> {
let bytes = borsh::to_vec(point).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::METADATA, Self::KEY_PRUNING_POINT, &bytes)
}
/// Gets the chain state.
pub fn get_chain_state(&self) -> Result<Option<ChainState>, DbError> {
match self.db.get(cf::METADATA, Self::KEY_CHAIN_STATE)? {
Some(bytes) => {
let state = ChainState::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(state))
}
None => Ok(None),
}
}
/// Sets the chain state.
pub fn set_chain_state(&self, state: &ChainState) -> Result<(), DbError> {
let bytes = borsh::to_vec(state).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::METADATA, Self::KEY_CHAIN_STATE, &bytes)
}
/// Gets the genesis block ID.
pub fn get_genesis(&self) -> Result<Option<BlockId>, DbError> {
match self.db.get(cf::METADATA, Self::KEY_GENESIS)? {
Some(bytes) => {
let genesis = BlockId::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(genesis))
}
None => Ok(None),
}
}
/// Sets the genesis block ID.
pub fn set_genesis(&self, genesis: &BlockId) -> Result<(), DbError> {
let bytes = borsh::to_vec(genesis).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::METADATA, Self::KEY_GENESIS, &bytes)
}
/// Gets a generic metadata value.
pub fn get_raw(&self, key: &[u8]) -> Result<Option<Vec<u8>>, DbError> {
self.db.get(cf::METADATA, key)
}
/// Sets a generic metadata value.
pub fn put_raw(&self, key: &[u8], value: &[u8]) -> Result<(), DbError> {
self.db.put(cf::METADATA, key, value)
}
}
impl BatchStore for MetadataStore {
fn cf_name(&self) -> &'static str {
cf::METADATA
}
}
/// Stored contract data.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct StoredContract {
/// WASM bytecode.
pub code: Vec<u8>,
/// Code hash (contract ID).
pub code_hash: [u8; 32],
/// Deployer address.
pub deployer: Vec<u8>,
/// Deployment timestamp.
pub deployed_at: u64,
/// Deployment block height.
pub deployed_height: u64,
}
/// Store for smart contract bytecode and metadata.
pub struct ContractStore {
db: Arc<Database>,
}
impl ContractStore {
/// Creates a new contract store.
pub fn new(db: Arc<Database>) -> Self {
ContractStore { db }
}
/// Gets contract bytecode by contract ID (code hash).
pub fn get_code(&self, contract_id: &[u8; 32]) -> Result<Option<Vec<u8>>, DbError> {
match self.db.get(cf::CONTRACTS, contract_id)? {
Some(bytes) => {
let contract = StoredContract::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(contract.code))
}
None => Ok(None),
}
}
/// Gets full contract info by contract ID.
pub fn get(&self, contract_id: &[u8; 32]) -> Result<Option<StoredContract>, DbError> {
match self.db.get(cf::CONTRACTS, contract_id)? {
Some(bytes) => {
let contract = StoredContract::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(contract))
}
None => Ok(None),
}
}
/// Stores a contract.
pub fn put(&self, contract: &StoredContract) -> Result<(), DbError> {
let bytes = borsh::to_vec(contract).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::CONTRACTS, &contract.code_hash, &bytes)
}
/// Checks if a contract exists.
pub fn exists(&self, contract_id: &[u8; 32]) -> Result<bool, DbError> {
self.db.exists(cf::CONTRACTS, contract_id)
}
/// Deletes a contract.
pub fn delete(&self, contract_id: &[u8; 32]) -> Result<(), DbError> {
self.db.delete(cf::CONTRACTS, contract_id)
}
}
impl BatchStore for ContractStore {
fn cf_name(&self) -> &'static str {
cf::CONTRACTS
}
}
/// Store for smart contract state (key-value per contract).
pub struct ContractStateStore {
db: Arc<Database>,
}
impl ContractStateStore {
/// Creates a new contract state store.
pub fn new(db: Arc<Database>) -> Self {
ContractStateStore { db }
}
/// Creates a storage key (contract_id + storage_key).
fn make_key(contract_id: &[u8; 32], storage_key: &[u8; 32]) -> Vec<u8> {
let mut key = Vec::with_capacity(64);
key.extend_from_slice(contract_id);
key.extend_from_slice(storage_key);
key
}
/// Gets a storage value.
pub fn get(
&self,
contract_id: &[u8; 32],
storage_key: &[u8; 32],
) -> Result<Option<Vec<u8>>, DbError> {
let key = Self::make_key(contract_id, storage_key);
self.db.get(cf::CONTRACT_STATE, &key)
}
/// Sets a storage value.
pub fn set(
&self,
contract_id: &[u8; 32],
storage_key: &[u8; 32],
value: &[u8],
) -> Result<(), DbError> {
let key = Self::make_key(contract_id, storage_key);
self.db.put(cf::CONTRACT_STATE, &key, value)
}
/// Deletes a storage value.
pub fn delete(&self, contract_id: &[u8; 32], storage_key: &[u8; 32]) -> Result<(), DbError> {
let key = Self::make_key(contract_id, storage_key);
self.db.delete(cf::CONTRACT_STATE, &key)
}
/// Gets all storage entries for a contract.
#[allow(clippy::type_complexity)]
pub fn get_all(&self, contract_id: &[u8; 32]) -> Result<Vec<([u8; 32], Vec<u8>)>, DbError> {
let mut entries = Vec::new();
for result in self.db.prefix_iter(cf::CONTRACT_STATE, contract_id)? {
let (key, value) = result;
if key.len() == 64 {
let mut storage_key = [0u8; 32];
storage_key.copy_from_slice(&key[32..64]);
entries.push((storage_key, value.to_vec()));
}
}
Ok(entries)
}
/// Clears all storage for a contract.
pub fn clear_contract(&self, contract_id: &[u8; 32]) -> Result<(), DbError> {
for result in self.db.prefix_iter(cf::CONTRACT_STATE, contract_id)? {
let (key, _) = result;
self.db.delete(cf::CONTRACT_STATE, &key)?;
}
Ok(())
}
}
impl BatchStore for ContractStateStore {
fn cf_name(&self) -> &'static str {
cf::CONTRACT_STATE
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::DatabaseConfig;
use tempfile::tempdir;
fn create_test_db() -> Arc<Database> {
let dir = tempdir().unwrap();
let config = DatabaseConfig::for_testing();
Arc::new(Database::open(dir.path(), &config).unwrap())
}
#[test]
fn test_utxo_store() {
let db = create_test_db();
let store = UtxoStore::new(db);
let txid = TransactionId::from_bytes([1u8; 32]);
let utxo = StoredUtxo {
amount: 1000,
script_pubkey: vec![0x00, 0x14, 0xab],
block_daa_score: 100,
is_coinbase: false,
};
// Put
store.put(&txid, 0, &utxo).unwrap();
// Get
let retrieved = store.get(&txid, 0).unwrap().unwrap();
assert_eq!(retrieved.amount, 1000);
assert_eq!(retrieved.block_daa_score, 100);
// Exists
assert!(store.exists(&txid, 0).unwrap());
assert!(!store.exists(&txid, 1).unwrap());
// Delete
store.delete(&txid, 0).unwrap();
assert!(!store.exists(&txid, 0).unwrap());
}
#[test]
fn test_relations_store() {
let db = create_test_db();
let store = RelationsStore::new(db);
let block_id = BlockId::from_bytes([1u8; 32]);
let parent1 = BlockId::from_bytes([2u8; 32]);
let parent2 = BlockId::from_bytes([3u8; 32]);
let relations = StoredRelations {
parents: vec![parent1, parent2],
children: Vec::new(),
};
store.put(&block_id, &relations).unwrap();
let parents = store.get_parents(&block_id).unwrap();
assert_eq!(parents.len(), 2);
// Add child
let child = BlockId::from_bytes([4u8; 32]);
store.add_child(&block_id, child).unwrap();
let children = store.get_children(&block_id).unwrap();
assert_eq!(children.len(), 1);
assert_eq!(children[0], child);
}
#[test]
fn test_ghostdag_store() {
let db = create_test_db();
let store = GhostdagStore::new(db);
let block_id = BlockId::from_bytes([1u8; 32]);
let selected_parent = BlockId::from_bytes([2u8; 32]);
let data = StoredGhostdagData {
blue_score: 100,
selected_parent,
merge_set_blues: vec![BlockId::from_bytes([3u8; 32])],
merge_set_reds: vec![],
blues_anticone_sizes: vec![],
};
store.put(&block_id, &data).unwrap();
let score = store.get_blue_score(&block_id).unwrap().unwrap();
assert_eq!(score, 100);
let parent = store.get_selected_parent(&block_id).unwrap().unwrap();
assert_eq!(parent, selected_parent);
}
#[test]
fn test_metadata_store() {
let db = create_test_db();
let store = MetadataStore::new(db);
// Tips
let tips = vec![
BlockId::from_bytes([1u8; 32]),
BlockId::from_bytes([2u8; 32]),
];
store.set_tips(&tips).unwrap();
let retrieved_tips = store.get_tips().unwrap();
assert_eq!(retrieved_tips.len(), 2);
// Genesis
let genesis = BlockId::from_bytes([0u8; 32]);
store.set_genesis(&genesis).unwrap();
let retrieved_genesis = store.get_genesis().unwrap().unwrap();
assert_eq!(retrieved_genesis, genesis);
// Chain state
let state = ChainState {
max_blue_score: 1000,
total_blocks: 500,
daa_score: 1000,
difficulty_bits: 0x1d00ffff,
total_work: vec![0x00, 0x01, 0x02],
};
store.set_chain_state(&state).unwrap();
let retrieved_state = store.get_chain_state().unwrap().unwrap();
assert_eq!(retrieved_state.max_blue_score, 1000);
}
}