synor/crates/synor-storage/benches/storage_bench.rs
Gulshan Yadav 48949ebb3f Initial commit: Synor blockchain monorepo
A complete blockchain implementation featuring:
- synord: Full node with GHOSTDAG consensus
- explorer-web: Modern React blockchain explorer with 3D DAG visualization
- CLI wallet and tools
- Smart contract SDK and example contracts (DEX, NFT, token)
- WASM crypto library for browser/mobile
2026-01-08 05:22:17 +05:30

853 lines
24 KiB
Rust

//! Storage benchmarks for Synor blockchain.
//!
//! Benchmarks:
//! - Block header read/write
//! - UTXO lookup/insert/delete
//! - Batch operations
//! - Iterator performance
//! - Cache operations
//!
//! Run with: cargo bench -p synor-storage --bench storage_bench
use criterion::{
black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput,
};
use std::sync::Arc;
use synor_storage::{
cache::{CacheConfig, LruCache, StorageCache},
cf, Database, DatabaseConfig,
stores::{
ChainState, GhostdagStore, HeaderStore, MetadataStore, RelationsStore,
StoredGhostdagData, StoredRelations, StoredUtxo, UtxoStore,
},
};
use synor_types::{BlockHeader, BlockId, BlueScore, Hash256, Timestamp, TransactionId};
use tempfile::TempDir;
// ==================== Test Data Helpers ====================
/// Creates a deterministic hash.
fn make_hash(n: u64) -> Hash256 {
let mut bytes = [0u8; 32];
bytes[..8].copy_from_slice(&n.to_le_bytes());
bytes[24..32].copy_from_slice(&n.to_be_bytes());
Hash256::from_bytes(bytes)
}
/// Creates a deterministic block ID.
fn make_block_id(n: u64) -> BlockId {
make_hash(n)
}
/// Creates a deterministic transaction ID.
fn make_txid(n: u64) -> TransactionId {
TransactionId::from_bytes(*make_hash(n).as_bytes())
}
/// Creates a test block header.
fn make_header(n: u64) -> BlockHeader {
BlockHeader {
version: 1,
parents: vec![make_hash(n.saturating_sub(1))],
merkle_root: make_hash(n * 1000),
accepted_id_merkle_root: make_hash(n * 1001),
utxo_commitment: make_hash(n * 1002),
timestamp: Timestamp::from_millis(1700000000000 + n * 100),
bits: 0x1d00ffff,
nonce: n,
daa_score: n * 10,
blue_score: BlueScore::new(n * 10),
blue_work: make_hash(n * 1003),
pruning_point: make_hash(0),
}
}
/// Creates a test UTXO entry.
fn make_utxo(n: u64) -> StoredUtxo {
let mut script_pubkey = Vec::with_capacity(25);
script_pubkey.push(0x00);
script_pubkey.push(0x14);
for _ in 0..23 {
script_pubkey.push((n & 0xFF) as u8);
}
StoredUtxo {
amount: 1_000_000_000 + n * 1000,
script_pubkey,
block_daa_score: n * 10,
is_coinbase: n % 10 == 0,
}
}
/// Creates a test GHOSTDAG data entry.
fn make_ghostdag_data(n: u64) -> StoredGhostdagData {
StoredGhostdagData {
blue_score: n * 10,
selected_parent: make_block_id(n.saturating_sub(1)),
merge_set_blues: (0..5).map(|i| make_block_id(n.saturating_sub(i + 1))).collect(),
merge_set_reds: (0..2).map(|i| make_block_id(n + i + 100)).collect(),
blues_anticone_sizes: (0..3).map(|i| (make_block_id(n.saturating_sub(i + 1)), i + 1)).collect(),
}
}
/// Creates a test relations entry.
fn make_relations(n: u64) -> StoredRelations {
StoredRelations {
parents: (1..=3).map(|i| make_block_id(n.saturating_sub(i))).collect(),
children: (1..=2).map(|i| make_block_id(n + i)).collect(),
}
}
/// Sets up a test database.
fn setup_db() -> (TempDir, Arc<Database>) {
let dir = TempDir::new().expect("Failed to create temp dir");
let config = DatabaseConfig::for_testing();
let db = Database::open(dir.path(), &config).expect("Failed to open database");
(dir, Arc::new(db))
}
/// Pre-populates a database with headers.
fn populate_headers(db: &Arc<Database>, count: usize) -> HeaderStore {
let store = HeaderStore::new(db.clone());
for i in 0..count {
let header = make_header(i as u64);
store.put(&header).unwrap();
}
store
}
/// Pre-populates a database with UTXOs.
fn populate_utxos(db: &Arc<Database>, count: usize) -> UtxoStore {
let store = UtxoStore::new(db.clone());
for i in 0..count {
let txid = make_txid(i as u64);
let utxo = make_utxo(i as u64);
store.put(&txid, 0, &utxo).unwrap();
}
store
}
// ==================== Block Header Read/Write ====================
fn header_write_single(c: &mut Criterion) {
let mut group = c.benchmark_group("header_write");
group.throughput(Throughput::Elements(1));
group.bench_function("single", |b| {
b.iter_batched(
|| {
let (dir, db) = setup_db();
let store = HeaderStore::new(db);
(dir, store)
},
|(_dir, store)| {
let header = make_header(12345);
black_box(store.put(&header))
},
criterion::BatchSize::SmallInput,
)
});
group.finish();
}
fn header_write_batch(c: &mut Criterion) {
let mut group = c.benchmark_group("header_write_batch");
for count in [10, 50, 100, 500] {
let headers: Vec<BlockHeader> = (0..count).map(|i| make_header(i as u64)).collect();
group.throughput(Throughput::Elements(count as u64));
group.bench_with_input(
BenchmarkId::from_parameter(count),
&headers,
|b, hdrs| {
b.iter_batched(
|| {
let (dir, db) = setup_db();
let store = HeaderStore::new(db);
(dir, store)
},
|(_dir, store)| {
for header in hdrs {
store.put(header).unwrap();
}
},
criterion::BatchSize::SmallInput,
)
},
);
}
group.finish();
}
fn header_read_single(c: &mut Criterion) {
let mut group = c.benchmark_group("header_read");
group.throughput(Throughput::Elements(1));
let (dir, db) = setup_db();
let store = populate_headers(&db, 1000);
let target_hash = make_header(500).block_id();
group.bench_function("single", |b| {
b.iter(|| black_box(store.get(&target_hash)))
});
drop(dir);
group.finish();
}
fn header_read_varying_db_sizes(c: &mut Criterion) {
let mut group = c.benchmark_group("header_read_db_size");
for db_size in [100, 1000, 10000] {
let (dir, db) = setup_db();
let store = populate_headers(&db, db_size);
let target_hash = make_header((db_size / 2) as u64).block_id();
group.bench_with_input(
BenchmarkId::from_parameter(db_size),
&target_hash,
|b, hash| {
b.iter(|| black_box(store.get(hash)))
},
);
drop(dir);
}
group.finish();
}
fn header_multi_get(c: &mut Criterion) {
let mut group = c.benchmark_group("header_multi_get");
let (dir, db) = setup_db();
let store = populate_headers(&db, 1000);
for count in [5, 10, 50, 100] {
let hashes: Vec<Hash256> = (0..count)
.map(|i| make_header((i * 10) as u64).block_id())
.collect();
group.throughput(Throughput::Elements(count as u64));
group.bench_with_input(
BenchmarkId::from_parameter(count),
&hashes,
|b, h| {
b.iter(|| black_box(store.multi_get(h)))
},
);
}
drop(dir);
group.finish();
}
fn header_exists_check(c: &mut Criterion) {
let (dir, db) = setup_db();
let store = populate_headers(&db, 1000);
let mut group = c.benchmark_group("header_exists");
// Existing key
let existing = make_header(500).block_id();
group.bench_function("exists_true", |b| {
b.iter(|| black_box(store.exists(&existing)))
});
// Non-existing key
let missing = make_hash(99999);
group.bench_function("exists_false", |b| {
b.iter(|| black_box(store.exists(&missing)))
});
drop(dir);
group.finish();
}
// ==================== UTXO Lookup/Insert/Delete ====================
fn utxo_insert(c: &mut Criterion) {
let mut group = c.benchmark_group("utxo_insert");
group.throughput(Throughput::Elements(1));
group.bench_function("single", |b| {
b.iter_batched(
|| {
let (dir, db) = setup_db();
let store = UtxoStore::new(db);
(dir, store)
},
|(dir, store)| {
let txid = make_txid(12345);
let utxo = make_utxo(12345);
let result = store.put(&txid, 0, &utxo);
drop(dir);
black_box(result)
},
criterion::BatchSize::SmallInput,
)
});
group.finish();
}
fn utxo_lookup(c: &mut Criterion) {
let mut group = c.benchmark_group("utxo_lookup");
for db_size in [100, 1000, 10000, 100000] {
let (dir, db) = setup_db();
let store = populate_utxos(&db, db_size);
let target_txid = make_txid((db_size / 2) as u64);
group.bench_with_input(
BenchmarkId::new("size", db_size),
&target_txid,
|b, txid| {
b.iter(|| black_box(store.get(txid, 0)))
},
);
drop(dir);
}
group.finish();
}
fn utxo_delete(c: &mut Criterion) {
let mut group = c.benchmark_group("utxo_delete");
group.throughput(Throughput::Elements(1));
for db_size in [1000, 10000] {
group.bench_with_input(
BenchmarkId::new("size", db_size),
&db_size,
|b, &size| {
b.iter_batched(
|| {
let (dir, db) = setup_db();
let store = populate_utxos(&db, size);
let target_txid = make_txid((size / 2) as u64);
(dir, store, target_txid)
},
|(dir, store, txid)| {
let result = store.delete(&txid, 0);
drop(dir);
black_box(result)
},
criterion::BatchSize::SmallInput,
)
},
);
}
group.finish();
}
fn utxo_exists(c: &mut Criterion) {
let (dir, db) = setup_db();
let store = populate_utxos(&db, 10000);
let mut group = c.benchmark_group("utxo_exists");
// Existing UTXO
let existing_txid = make_txid(5000);
group.bench_function("exists_true", |b| {
b.iter(|| black_box(store.exists(&existing_txid, 0)))
});
// Spent (non-existing) UTXO
let missing_txid = make_txid(99999);
group.bench_function("exists_false", |b| {
b.iter(|| black_box(store.exists(&missing_txid, 0)))
});
drop(dir);
group.finish();
}
fn utxo_get_by_tx(c: &mut Criterion) {
let (dir, db) = setup_db();
let store = UtxoStore::new(db.clone());
// Create a transaction with multiple outputs
let txid = make_txid(1);
for i in 0..10 {
store.put(&txid, i, &make_utxo(i as u64)).unwrap();
}
let mut group = c.benchmark_group("utxo_get_by_tx");
group.bench_function("10_outputs", |b| {
b.iter(|| black_box(store.get_by_tx(&txid)))
});
drop(dir);
group.finish();
}
// ==================== Batch Operations ====================
fn batch_write_mixed(c: &mut Criterion) {
let mut group = c.benchmark_group("batch_write");
for batch_size in [10, 50, 100, 500] {
group.throughput(Throughput::Elements(batch_size as u64));
group.bench_with_input(
BenchmarkId::new("mixed", batch_size),
&batch_size,
|b, &size| {
b.iter_batched(
|| {
let (dir, db) = setup_db();
(dir, db)
},
|(_dir, db)| {
let mut batch = db.batch();
for i in 0..size {
let key = make_hash(i as u64);
let value = vec![0xABu8; 100];
batch.put(cf::HEADERS, key.as_bytes(), &value).unwrap();
}
black_box(db.write_batch(batch))
},
criterion::BatchSize::SmallInput,
)
},
);
}
group.finish();
}
fn batch_headers_and_utxos(c: &mut Criterion) {
let mut group = c.benchmark_group("batch_mixed_cf");
for item_count in [10, 50, 100] {
let headers: Vec<Vec<u8>> = (0..item_count)
.map(|i| borsh::to_vec(&make_header(i as u64)).unwrap())
.collect();
let utxos: Vec<Vec<u8>> = (0..item_count)
.map(|i| borsh::to_vec(&make_utxo(i as u64)).unwrap())
.collect();
group.throughput(Throughput::Elements((item_count * 2) as u64));
group.bench_with_input(
BenchmarkId::from_parameter(item_count),
&(headers, utxos),
|b, (hdrs, utxs)| {
b.iter_batched(
|| {
let (dir, db) = setup_db();
(dir, db)
},
|(_dir, db)| {
let mut batch = db.batch();
for (i, h) in hdrs.iter().enumerate() {
let key = make_hash(i as u64);
batch.put(cf::HEADERS, key.as_bytes(), h).unwrap();
}
for (i, u) in utxs.iter().enumerate() {
let mut key = Vec::with_capacity(36);
key.extend_from_slice(make_txid(i as u64).as_bytes());
key.extend_from_slice(&0u32.to_be_bytes());
batch.put(cf::UTXOS, &key, u).unwrap();
}
black_box(db.write_batch(batch))
},
criterion::BatchSize::SmallInput,
)
},
);
}
group.finish();
}
// ==================== Iterator Performance ====================
fn iterator_full_scan(c: &mut Criterion) {
let mut group = c.benchmark_group("iterator_scan");
for db_size in [100, 1000, 5000] {
let (dir, db) = setup_db();
let _ = populate_headers(&db, db_size);
group.throughput(Throughput::Elements(db_size as u64));
group.bench_with_input(
BenchmarkId::from_parameter(db_size),
&db,
|b, database| {
b.iter(|| {
let mut count = 0;
for _ in database.iter(cf::HEADERS).unwrap() {
count += 1;
}
black_box(count)
})
},
);
drop(dir);
}
group.finish();
}
fn iterator_prefix_scan(c: &mut Criterion) {
let (dir, db) = setup_db();
let store = UtxoStore::new(db.clone());
// Create UTXOs for several transactions
for tx_num in 0..100 {
let txid = make_txid(tx_num);
for output_idx in 0..10 {
store.put(&txid, output_idx, &make_utxo(tx_num * 10 + output_idx as u64)).unwrap();
}
}
let mut group = c.benchmark_group("iterator_prefix");
// Scan UTXOs for a single transaction
let target_txid = make_txid(50);
group.bench_function("single_tx_utxos", |b| {
b.iter(|| black_box(store.get_by_tx(&target_txid)))
});
drop(dir);
group.finish();
}
// ==================== GHOSTDAG Store ====================
fn ghostdag_operations(c: &mut Criterion) {
let (dir, db) = setup_db();
let store = GhostdagStore::new(db.clone());
// Pre-populate
for i in 0..1000 {
let block_id = make_block_id(i);
let data = make_ghostdag_data(i);
store.put(&block_id, &data).unwrap();
}
let mut group = c.benchmark_group("ghostdag_store");
// Read
let target = make_block_id(500);
group.bench_function("get", |b| {
b.iter(|| black_box(store.get(&target)))
});
group.bench_function("get_blue_score", |b| {
b.iter(|| black_box(store.get_blue_score(&target)))
});
group.bench_function("get_selected_parent", |b| {
b.iter(|| black_box(store.get_selected_parent(&target)))
});
// Write
group.bench_function("put", |b| {
b.iter_batched(
|| {
let id = make_block_id(99999);
let data = make_ghostdag_data(99999);
(id, data)
},
|(id, data)| black_box(store.put(&id, &data)),
criterion::BatchSize::SmallInput,
)
});
drop(dir);
group.finish();
}
// ==================== Relations Store ====================
fn relations_operations(c: &mut Criterion) {
let (dir, db) = setup_db();
let store = RelationsStore::new(db.clone());
// Pre-populate
for i in 0..1000 {
let block_id = make_block_id(i);
let relations = make_relations(i);
store.put(&block_id, &relations).unwrap();
}
let mut group = c.benchmark_group("relations_store");
let target = make_block_id(500);
group.bench_function("get", |b| {
b.iter(|| black_box(store.get(&target)))
});
group.bench_function("get_parents", |b| {
b.iter(|| black_box(store.get_parents(&target)))
});
group.bench_function("get_children", |b| {
b.iter(|| black_box(store.get_children(&target)))
});
group.bench_function("add_child", |b| {
b.iter_batched(
|| make_block_id(999),
|parent| {
let child = make_block_id(1001);
black_box(store.add_child(&parent, child))
},
criterion::BatchSize::SmallInput,
)
});
drop(dir);
group.finish();
}
// ==================== Metadata Store ====================
fn metadata_operations(c: &mut Criterion) {
let (dir, db) = setup_db();
let store = MetadataStore::new(db.clone());
// Setup some initial data
let tips = vec![make_block_id(100), make_block_id(101), make_block_id(102)];
store.set_tips(&tips).unwrap();
store.set_genesis(&make_block_id(0)).unwrap();
let mut group = c.benchmark_group("metadata_store");
group.bench_function("get_tips", |b| {
b.iter(|| black_box(store.get_tips()))
});
group.bench_function("set_tips", |b| {
let new_tips = vec![make_block_id(200), make_block_id(201)];
b.iter(|| black_box(store.set_tips(&new_tips)))
});
group.bench_function("get_genesis", |b| {
b.iter(|| black_box(store.get_genesis()))
});
group.bench_function("get_chain_state", |b| {
b.iter(|| black_box(store.get_chain_state()))
});
let state = ChainState {
max_blue_score: 10000,
total_blocks: 5000,
daa_score: 10000,
difficulty_bits: 0x1d00ffff,
total_work: vec![0x00, 0x01, 0x02, 0x03],
};
group.bench_function("set_chain_state", |b| {
b.iter(|| black_box(store.set_chain_state(&state)))
});
drop(dir);
group.finish();
}
// ==================== Cache Operations ====================
fn lru_cache_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("lru_cache");
// Insert performance
for capacity in [100, 1000, 10000] {
group.bench_with_input(
BenchmarkId::new("insert", capacity),
&capacity,
|b, &cap| {
b.iter_batched(
|| LruCache::<u64, u64>::new(cap),
|mut cache| {
for i in 0..(cap as u64) {
cache.insert(i, i * 2);
}
black_box(cache)
},
criterion::BatchSize::SmallInput,
)
},
);
}
// Get performance (with eviction)
for capacity in [100, 1000, 10000] {
group.bench_with_input(
BenchmarkId::new("get_hit", capacity),
&capacity,
|b, &cap| {
b.iter_batched(
|| {
let mut cache = LruCache::<u64, u64>::new(cap);
for i in 0..(cap as u64) {
cache.insert(i, i * 2);
}
let key = (cap / 2) as u64;
(cache, key)
},
|(mut cache, key)| black_box(cache.get(&key)),
criterion::BatchSize::SmallInput,
)
},
);
}
group.finish();
}
fn storage_cache_operations(c: &mut Criterion) {
let config = CacheConfig {
header_cache_size: 1000,
block_cache_size: 100,
tx_cache_size: 5000,
utxo_cache_size: 10000,
ghostdag_cache_size: 5000,
relations_cache_size: 5000,
};
let cache = StorageCache::new(config);
// Pre-populate cache
for i in 0..500 {
let txid = make_txid(i);
let utxo = make_utxo(i);
cache.insert_utxo(txid, 0, utxo);
}
let mut group = c.benchmark_group("storage_cache");
// UTXO cache hit
let hit_txid = make_txid(250);
group.bench_function("utxo_get_hit", |b| {
b.iter(|| black_box(cache.get_utxo(&hit_txid, 0)))
});
// UTXO cache miss
let miss_txid = make_txid(99999);
group.bench_function("utxo_get_miss", |b| {
b.iter(|| black_box(cache.get_utxo(&miss_txid, 0)))
});
// UTXO insert
group.bench_function("utxo_insert", |b| {
let mut n = 10000u64;
b.iter(|| {
let txid = make_txid(n);
let utxo = make_utxo(n);
n += 1;
cache.insert_utxo(txid, 0, utxo);
})
});
// Stats access
group.bench_function("stats", |b| {
b.iter(|| black_box(cache.stats()))
});
// Total entries
group.bench_function("total_entries", |b| {
b.iter(|| black_box(cache.total_entries()))
});
group.finish();
}
// ==================== Database Configuration ====================
fn database_creation(c: &mut Criterion) {
let mut group = c.benchmark_group("database_creation");
group.bench_function("config_default", |b| {
b.iter(|| black_box(DatabaseConfig::default()))
});
group.bench_function("config_ssd", |b| {
b.iter(|| black_box(DatabaseConfig::ssd()))
});
group.bench_function("config_low_memory", |b| {
b.iter(|| black_box(DatabaseConfig::low_memory()))
});
group.bench_function("open_database", |b| {
b.iter_batched(
|| TempDir::new().unwrap(),
|dir| {
let config = DatabaseConfig::for_testing();
let db = Database::open(dir.path(), &config);
black_box(db)
},
criterion::BatchSize::SmallInput,
)
});
group.finish();
}
// ==================== Criterion Groups ====================
criterion_group!(
header_benches,
header_write_single,
header_write_batch,
header_read_single,
header_read_varying_db_sizes,
header_multi_get,
header_exists_check,
);
criterion_group!(
utxo_benches,
utxo_insert,
utxo_lookup,
utxo_delete,
utxo_exists,
utxo_get_by_tx,
);
criterion_group!(
batch_benches,
batch_write_mixed,
batch_headers_and_utxos,
);
criterion_group!(
iterator_benches,
iterator_full_scan,
iterator_prefix_scan,
);
criterion_group!(
store_benches,
ghostdag_operations,
relations_operations,
metadata_operations,
);
criterion_group!(
cache_benches,
lru_cache_operations,
storage_cache_operations,
);
criterion_group!(
db_benches,
database_creation,
);
criterion_main!(
header_benches,
utxo_benches,
batch_benches,
iterator_benches,
store_benches,
cache_benches,
db_benches
);