synor/crates/synor-dag/src/dagknight.rs
Gulshan Yadav 4983193f63 feat(dag): add Phase 13 M1 - DAGKnight 32/100 BPS support
- Add BlockRateConfig enum with Standard (10 BPS), Enhanced (32 BPS),
  and Maximum (100 BPS) presets
- Add AdaptiveKBounds with scaled k ranges per block rate:
  - Standard: k 8-64, default 18
  - Enhanced: k 16-128, default 32
  - Maximum: k 50-255, default 64
- Add DagKnightManager::with_config() constructor for block rate selection
- Update adaptive k calculation to use configurable bounds
- Add NetworkConfig module in synor-consensus with:
  - BpsMode enum and NetworkConfig struct
  - DAA window, finality depth, pruning depth scaling
  - BPS comparison table generator
- Add comprehensive tests for all block rate configurations
2026-01-19 20:10:05 +05:30

726 lines
26 KiB
Rust

//! DAGKnight adaptive consensus protocol.
//!
//! DAGKnight is an evolution of GHOSTDAG that eliminates fixed network delay
//! assumptions. Instead of using a static k parameter, DAGKnight adapts based
//! on observed network conditions.
//!
//! # Key Improvements Over GHOSTDAG
//!
//! 1. **Adaptive K Parameter**: Adjusts based on measured network latency
//! 2. **Probabilistic Confirmation**: Provides confidence-based finality estimates
//! 3. **No Fixed Delay Assumption**: Learns actual network behavior
//! 4. **Faster Confirmation**: Converges faster under good network conditions
//!
//! # Block Rate Configurations
//!
//! DAGKnight supports multiple block rates via `BlockRateConfig`:
//! - **Standard (10 BPS)**: 100ms blocks, k range 8-64 (default GHOSTDAG)
//! - **Enhanced (32 BPS)**: ~31ms blocks, k range 16-128 (Phase 13 upgrade)
//! - **Maximum (100 BPS)**: 10ms blocks, k range 50-256 (stretch goal)
//!
//! Higher block rates require better network conditions and scale k bounds
//! proportionally to maintain security under increased parallel block creation.
//!
//! # Algorithm Overview
//!
//! DAGKnight maintains the core GHOSTDAG blue set selection but adds:
//! - Network latency tracking via `LatencyTracker`
//! - Dynamic k calculation based on observed anticone growth
//! - Probabilistic confirmation time estimation
//!
//! # References
//!
//! - DAGKnight Paper (2022): "DAGKnight: A Parameterless GHOSTDAG"
//! - Kaspa 2025 Roadmap: Implementation plans for production use
use std::sync::Arc;
use std::time::Duration;
use parking_lot::RwLock;
use crate::{
dag::BlockDag,
ghostdag::{GhostdagData, GhostdagError, GhostdagManager},
latency::{LatencyStats, LatencyTracker},
reachability::ReachabilityStore,
BlockId, BlockRateConfig, BlueScore, GHOSTDAG_K,
};
/// Number of samples required before adapting k.
const MIN_SAMPLES_FOR_ADAPTATION: usize = 100;
/// Safety margin multiplier for k calculation.
/// Higher values = more conservative (safer but lower throughput).
const SAFETY_MARGIN: f64 = 1.5;
/// K parameter bounds for different block rates.
/// Higher block rates need higher k to accommodate network latency.
#[derive(Clone, Copy, Debug)]
pub struct AdaptiveKBounds {
/// Minimum k (security lower bound).
pub min_k: u8,
/// Maximum k (performance upper bound).
pub max_k: u8,
/// Default k when insufficient data.
pub default_k: u8,
}
impl AdaptiveKBounds {
/// Creates bounds for the given block rate configuration.
pub const fn for_block_rate(config: BlockRateConfig) -> Self {
match config {
// Standard 10 BPS: k 8-64, default 18
BlockRateConfig::Standard => Self {
min_k: 8,
max_k: 64,
default_k: GHOSTDAG_K,
},
// Enhanced 32 BPS: k 16-128, default 32
// Scaled: min * 3.2, max * 2 (with safety margin)
BlockRateConfig::Enhanced => Self {
min_k: 16,
max_k: 128,
default_k: 32,
},
// Maximum 100 BPS: k 50-256, default 64
// Requires extremely low latency (data center grade)
BlockRateConfig::Maximum => Self {
min_k: 50,
max_k: 255, // u8 max
default_k: 64,
},
}
}
}
/// Confirmation confidence levels.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ConfirmationConfidence {
/// ~68% confidence (1 sigma).
Low,
/// ~95% confidence (2 sigma).
Medium,
/// ~99.7% confidence (3 sigma).
High,
/// ~99.99% confidence (4 sigma).
VeryHigh,
}
impl ConfirmationConfidence {
/// Returns the sigma multiplier for this confidence level.
fn sigma_multiplier(&self) -> f64 {
match self {
ConfirmationConfidence::Low => 1.0,
ConfirmationConfidence::Medium => 2.0,
ConfirmationConfidence::High => 3.0,
ConfirmationConfidence::VeryHigh => 4.0,
}
}
}
/// Confirmation status for a block.
#[derive(Clone, Debug)]
pub struct ConfirmationStatus {
/// Block being queried.
pub block_id: BlockId,
/// Current blue score depth from virtual tip.
pub depth: u64,
/// Estimated time to reach requested confidence.
pub estimated_time: Duration,
/// Current confidence level achieved.
pub current_confidence: f64,
/// Whether the block is considered final.
pub is_final: bool,
}
/// DAGKnight manager extending GHOSTDAG with adaptive consensus.
pub struct DagKnightManager {
/// Underlying GHOSTDAG manager.
ghostdag: Arc<GhostdagManager>,
/// The DAG structure.
dag: Arc<BlockDag>,
/// Reachability queries.
reachability: Arc<ReachabilityStore>,
/// Network latency tracker.
latency_tracker: Arc<LatencyTracker>,
/// Current adaptive k value.
adaptive_k: RwLock<u8>,
/// Block rate configuration.
block_rate_config: BlockRateConfig,
/// Block rate (blocks per second).
block_rate_bps: f64,
/// Adaptive k bounds for this configuration.
k_bounds: AdaptiveKBounds,
}
impl DagKnightManager {
/// Creates a new DAGKnight manager with standard 10 BPS configuration.
pub fn new(
dag: Arc<BlockDag>,
reachability: Arc<ReachabilityStore>,
) -> Self {
Self::with_config(dag, reachability, BlockRateConfig::Standard)
}
/// Creates a DAGKnight manager with the specified block rate configuration.
///
/// # Block Rate Configurations
///
/// - `Standard` (10 BPS): Default configuration, suitable for most networks
/// - `Enhanced` (32 BPS): High-throughput mode, requires ~50ms P95 latency
/// - `Maximum` (100 BPS): Ultra-high throughput, requires data center conditions
pub fn with_config(
dag: Arc<BlockDag>,
reachability: Arc<ReachabilityStore>,
config: BlockRateConfig,
) -> Self {
let ghostdag = Arc::new(GhostdagManager::new(dag.clone(), reachability.clone()));
let latency_tracker = Arc::new(LatencyTracker::new());
let k_bounds = AdaptiveKBounds::for_block_rate(config);
Self {
ghostdag,
dag,
reachability,
latency_tracker,
adaptive_k: RwLock::new(k_bounds.default_k),
block_rate_config: config,
block_rate_bps: config.bps(),
k_bounds,
}
}
/// Creates a DAGKnight manager with custom block rate (for testing/advanced use).
///
/// Prefer `with_config()` for standard configurations. This method allows
/// custom BPS values but uses Standard k bounds - scale manually if needed.
pub fn with_block_rate(
dag: Arc<BlockDag>,
reachability: Arc<ReachabilityStore>,
block_rate_bps: f64,
) -> Self {
let ghostdag = Arc::new(GhostdagManager::new(dag.clone(), reachability.clone()));
let latency_tracker = Arc::new(LatencyTracker::new());
// Determine closest config for k bounds
let config = if block_rate_bps <= 15.0 {
BlockRateConfig::Standard
} else if block_rate_bps <= 50.0 {
BlockRateConfig::Enhanced
} else {
BlockRateConfig::Maximum
};
let k_bounds = AdaptiveKBounds::for_block_rate(config);
Self {
ghostdag,
dag,
reachability,
latency_tracker,
adaptive_k: RwLock::new(k_bounds.default_k),
block_rate_config: config,
block_rate_bps,
k_bounds,
}
}
/// Creates a DAGKnight manager wrapping an existing GHOSTDAG manager.
pub fn from_ghostdag(
ghostdag: Arc<GhostdagManager>,
dag: Arc<BlockDag>,
reachability: Arc<ReachabilityStore>,
) -> Self {
Self::from_ghostdag_with_config(ghostdag, dag, reachability, BlockRateConfig::Standard)
}
/// Creates a DAGKnight manager wrapping an existing GHOSTDAG manager with config.
pub fn from_ghostdag_with_config(
ghostdag: Arc<GhostdagManager>,
dag: Arc<BlockDag>,
reachability: Arc<ReachabilityStore>,
config: BlockRateConfig,
) -> Self {
let k_bounds = AdaptiveKBounds::for_block_rate(config);
Self {
ghostdag,
dag,
reachability,
latency_tracker: Arc::new(LatencyTracker::new()),
adaptive_k: RwLock::new(k_bounds.default_k),
block_rate_config: config,
block_rate_bps: config.bps(),
k_bounds,
}
}
/// Processes a new block with latency tracking.
///
/// This method:
/// 1. Records the block observation in the latency tracker
/// 2. Delegates to GHOSTDAG for blue set calculation
/// 3. Updates the adaptive k parameter if needed
pub fn add_block(
&self,
block_id: BlockId,
parents: &[BlockId],
block_time_ms: u64,
) -> Result<GhostdagData, GhostdagError> {
// Calculate anticone size for this block
let anticone_size = self.calculate_anticone_size(&block_id, parents);
// Record observation in latency tracker
self.latency_tracker.record_block(block_id, block_time_ms, anticone_size);
// Process with underlying GHOSTDAG
let data = self.ghostdag.add_block(block_id, parents)?;
// Periodically update adaptive k
if self.latency_tracker.sample_count() % 50 == 0 {
self.update_adaptive_k();
}
Ok(data)
}
/// Calculates the anticone size for a new block.
fn calculate_anticone_size(&self, block_id: &BlockId, parents: &[BlockId]) -> usize {
// Anticone is the set of blocks that are neither ancestors nor descendants
// For a new block, we estimate based on tips that aren't in parent set
let tips = self.dag.tips();
let mut anticone_count = 0;
for tip in tips {
if tip != *block_id && !parents.contains(&tip) {
// Check if tip is in the past of any parent
let in_past = parents.iter().any(|p| {
self.reachability
.is_ancestor(p, &tip)
.unwrap_or(false)
});
if !in_past {
anticone_count += 1;
}
}
}
anticone_count
}
/// Updates the adaptive k parameter based on observed latency.
///
/// The adaptive k formula is:
/// k = ceil(block_rate * network_delay * safety_margin)
///
/// This ensures that even with network delays, honest miners
/// can create blocks that fit within the k-cluster. Higher block
/// rates (32/100 BPS) use scaled k bounds to maintain security.
fn update_adaptive_k(&self) {
let stats = self.latency_tracker.get_stats();
// Don't adapt until we have enough samples
if stats.sample_count < MIN_SAMPLES_FOR_ADAPTATION {
return;
}
// Calculate k based on P95 delay (conservative)
let delay_secs = stats.p95_delay_ms / 1000.0;
let calculated_k = (self.block_rate_bps * delay_secs * SAFETY_MARGIN).ceil() as u16;
// Clamp to valid range for this block rate configuration
let new_k = (calculated_k as u8).clamp(self.k_bounds.min_k, self.k_bounds.max_k);
// Update if significantly different (avoid jitter)
let current_k = *self.adaptive_k.read();
if (new_k as i16 - current_k as i16).abs() >= 2 {
*self.adaptive_k.write() = new_k;
}
}
/// Gets the current adaptive k parameter.
pub fn adaptive_k(&self) -> u8 {
*self.adaptive_k.read()
}
/// Gets the current latency statistics.
pub fn latency_stats(&self) -> LatencyStats {
self.latency_tracker.get_stats()
}
/// Estimates confirmation time for a block at a given confidence level.
///
/// DAGKnight provides probabilistic confirmation based on:
/// 1. Current depth (blue score difference from tip)
/// 2. Observed network latency
/// 3. Requested confidence level
pub fn estimate_confirmation_time(
&self,
block_id: &BlockId,
confidence: ConfirmationConfidence,
) -> Result<ConfirmationStatus, GhostdagError> {
let block_data = self.ghostdag.get_data(block_id)?;
let tip_data = self.get_virtual_tip_data()?;
// Depth is the blue score difference
let depth = tip_data.blue_score.saturating_sub(block_data.blue_score);
// Get latency stats
let stats = self.latency_tracker.get_stats();
// Calculate required depth for requested confidence
// Based on the paper, confirmation requires depth proportional to
// network delay variance
let sigma = stats.std_dev_ms / 1000.0; // Convert to seconds
let mean_delay = stats.mean_delay_ms / 1000.0;
let sigma_multiplier = confidence.sigma_multiplier();
// Required depth scales with variance and confidence level
let required_depth = (self.block_rate_bps * (mean_delay + sigma * sigma_multiplier)).ceil() as u64;
// Current confidence based on actual depth
let current_confidence = if depth >= required_depth {
self.calculate_confidence(depth, mean_delay, sigma)
} else {
// Interpolate confidence based on depth progress
(depth as f64 / required_depth as f64) * 0.95
};
// Time to reach required depth
let blocks_needed = required_depth.saturating_sub(depth);
let time_per_block_ms = 1000.0 / self.block_rate_bps;
let estimated_time = Duration::from_millis((blocks_needed as f64 * time_per_block_ms) as u64);
// Block is final if depth exceeds finality threshold for this block rate
let is_final = depth >= self.finality_depth();
Ok(ConfirmationStatus {
block_id: *block_id,
depth,
estimated_time,
current_confidence,
is_final,
})
}
/// Calculates confidence level based on depth and network conditions.
fn calculate_confidence(&self, depth: u64, mean_delay: f64, sigma: f64) -> f64 {
// Using simplified normal CDF approximation
// Confidence increases with depth relative to expected delay variance
let depth_secs = depth as f64 / self.block_rate_bps;
let z_score = (depth_secs - mean_delay) / sigma.max(0.001);
// Approximate CDF using logistic function
1.0 / (1.0 + (-1.7 * z_score).exp())
}
/// Gets the GHOSTDAG data for the virtual tip (highest blue score block).
fn get_virtual_tip_data(&self) -> Result<GhostdagData, GhostdagError> {
let tips = self.dag.tips();
// Find tip with highest blue score
let mut best_tip = tips[0];
let mut best_score = self.ghostdag.get_blue_score(&tips[0]).unwrap_or(0);
for tip in tips.iter().skip(1) {
let score = self.ghostdag.get_blue_score(tip).unwrap_or(0);
if score > best_score {
best_score = score;
best_tip = *tip;
}
}
self.ghostdag.get_data(&best_tip)
}
/// Gets the underlying GHOSTDAG manager.
pub fn ghostdag(&self) -> &Arc<GhostdagManager> {
&self.ghostdag
}
/// Gets the latency tracker.
pub fn latency_tracker(&self) -> &Arc<LatencyTracker> {
&self.latency_tracker
}
/// Gets the blue score for a block (delegates to GHOSTDAG).
pub fn get_blue_score(&self, block_id: &BlockId) -> Result<BlueScore, GhostdagError> {
self.ghostdag.get_blue_score(block_id)
}
/// Gets the GHOSTDAG data for a block.
pub fn get_data(&self, block_id: &BlockId) -> Result<GhostdagData, GhostdagError> {
self.ghostdag.get_data(block_id)
}
/// Checks if a block is in the blue set.
pub fn is_blue(&self, block_id: &BlockId) -> bool {
self.ghostdag.is_blue(block_id)
}
/// Returns the selected chain from a block to genesis.
pub fn get_selected_chain(&self, from: &BlockId) -> Result<Vec<BlockId>, GhostdagError> {
self.ghostdag.get_selected_chain(from)
}
/// Resets the latency tracker (e.g., after network reconfiguration).
pub fn reset_latency_tracking(&self) {
self.latency_tracker.reset();
*self.adaptive_k.write() = self.k_bounds.default_k;
}
/// Gets the current block rate configuration.
pub fn block_rate_config(&self) -> BlockRateConfig {
self.block_rate_config
}
/// Gets the adaptive k bounds for this configuration.
pub fn k_bounds(&self) -> AdaptiveKBounds {
self.k_bounds
}
/// Gets the block rate in blocks per second.
pub fn block_rate_bps(&self) -> f64 {
self.block_rate_bps
}
/// Gets the finality depth for this configuration.
pub fn finality_depth(&self) -> u64 {
self.block_rate_config.finality_depth()
}
/// Gets the merge depth for this configuration.
pub fn merge_depth(&self) -> u64 {
self.block_rate_config.merge_depth()
}
/// Gets the pruning depth for this configuration.
pub fn pruning_depth(&self) -> u64 {
self.block_rate_config.pruning_depth()
}
}
impl std::fmt::Debug for DagKnightManager {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let stats = self.latency_tracker.get_stats();
f.debug_struct("DagKnightManager")
.field("block_rate_config", &self.block_rate_config)
.field("block_rate_bps", &self.block_rate_bps)
.field("adaptive_k", &*self.adaptive_k.read())
.field("k_bounds", &format!("{}-{}", self.k_bounds.min_k, self.k_bounds.max_k))
.field("mean_delay_ms", &stats.mean_delay_ms)
.field("sample_count", &stats.sample_count)
.finish()
}
}
/// Calculates the optimal k for a given network delay and block rate.
///
/// This is a utility function for network analysis. The k bounds are
/// automatically scaled based on the block rate configuration.
pub fn calculate_optimal_k(network_delay_ms: f64, block_rate_bps: f64) -> u8 {
// Determine the appropriate k bounds for this block rate
let config = if block_rate_bps <= 15.0 {
BlockRateConfig::Standard
} else if block_rate_bps <= 50.0 {
BlockRateConfig::Enhanced
} else {
BlockRateConfig::Maximum
};
let bounds = AdaptiveKBounds::for_block_rate(config);
let delay_secs = network_delay_ms / 1000.0;
let k = (block_rate_bps * delay_secs * SAFETY_MARGIN).ceil() as u16;
(k as u8).clamp(bounds.min_k, bounds.max_k)
}
/// Calculates the optimal k for a specific block rate configuration.
pub fn calculate_optimal_k_for_config(
network_delay_ms: f64,
config: BlockRateConfig,
) -> u8 {
let bounds = AdaptiveKBounds::for_block_rate(config);
let delay_secs = network_delay_ms / 1000.0;
let k = (config.bps() * delay_secs * SAFETY_MARGIN).ceil() as u16;
(k as u8).clamp(bounds.min_k, bounds.max_k)
}
/// Estimates throughput (TPS) for given network conditions.
///
/// Throughput depends on block rate and transaction capacity per block.
pub fn estimate_throughput(
block_rate_bps: f64,
avg_tx_per_block: u64,
network_delay_ms: f64,
) -> f64 {
// Effective block rate accounting for orphan rate
let orphan_rate = (network_delay_ms / 1000.0 * block_rate_bps).min(0.5);
let effective_bps = block_rate_bps * (1.0 - orphan_rate);
effective_bps * avg_tx_per_block as f64
}
#[cfg(test)]
mod tests {
use super::*;
use synor_types::Hash256;
fn make_block_id(n: u8) -> BlockId {
let mut bytes = [0u8; 32];
bytes[0] = n;
Hash256::from_bytes(bytes)
}
fn setup_test_dag() -> (Arc<BlockDag>, Arc<ReachabilityStore>, DagKnightManager) {
let genesis = make_block_id(0);
let dag = Arc::new(BlockDag::new(genesis, 0));
let reachability = Arc::new(ReachabilityStore::new(genesis));
let dagknight = DagKnightManager::new(dag.clone(), reachability.clone());
(dag, reachability, dagknight)
}
fn setup_test_dag_with_config(config: BlockRateConfig) -> (Arc<BlockDag>, Arc<ReachabilityStore>, DagKnightManager) {
let genesis = make_block_id(0);
let dag = Arc::new(BlockDag::new(genesis, 0));
let reachability = Arc::new(ReachabilityStore::new(genesis));
let dagknight = DagKnightManager::with_config(dag.clone(), reachability.clone(), config);
(dag, reachability, dagknight)
}
#[test]
fn test_initial_k_standard() {
let (_, _, dagknight) = setup_test_dag();
let bounds = AdaptiveKBounds::for_block_rate(BlockRateConfig::Standard);
assert_eq!(dagknight.adaptive_k(), bounds.default_k);
assert_eq!(dagknight.block_rate_bps(), 10.0);
}
#[test]
fn test_initial_k_enhanced() {
let (_, _, dagknight) = setup_test_dag_with_config(BlockRateConfig::Enhanced);
let bounds = AdaptiveKBounds::for_block_rate(BlockRateConfig::Enhanced);
assert_eq!(dagknight.adaptive_k(), bounds.default_k);
assert_eq!(dagknight.block_rate_bps(), 32.0);
assert_eq!(dagknight.k_bounds().min_k, 16);
assert_eq!(dagknight.k_bounds().max_k, 128);
}
#[test]
fn test_initial_k_maximum() {
let (_, _, dagknight) = setup_test_dag_with_config(BlockRateConfig::Maximum);
let bounds = AdaptiveKBounds::for_block_rate(BlockRateConfig::Maximum);
assert_eq!(dagknight.adaptive_k(), bounds.default_k);
assert_eq!(dagknight.block_rate_bps(), 100.0);
assert_eq!(dagknight.k_bounds().min_k, 50);
assert_eq!(dagknight.k_bounds().max_k, 255);
}
#[test]
fn test_adaptive_k_bounds_scaling() {
let standard = AdaptiveKBounds::for_block_rate(BlockRateConfig::Standard);
let enhanced = AdaptiveKBounds::for_block_rate(BlockRateConfig::Enhanced);
let maximum = AdaptiveKBounds::for_block_rate(BlockRateConfig::Maximum);
// Higher block rates should have higher k bounds
assert!(enhanced.min_k > standard.min_k);
assert!(enhanced.max_k > standard.max_k);
assert!(maximum.min_k > enhanced.min_k);
assert!(maximum.max_k > enhanced.max_k);
}
#[test]
fn test_calculate_optimal_k() {
// Standard mode (10 BPS)
let bounds = AdaptiveKBounds::for_block_rate(BlockRateConfig::Standard);
// 100ms delay at 10 BPS: k = ceil(10 * 0.1 * 1.5) = 2, clamped to min_k (8)
let k_low = calculate_optimal_k(100.0, 10.0);
assert!(k_low >= bounds.min_k);
assert!(k_low <= bounds.max_k);
// 1000ms delay at 10 BPS: k = ceil(10 * 1.0 * 1.5) = 15, above MIN
let k_medium = calculate_optimal_k(1000.0, 10.0);
assert!(k_medium >= bounds.min_k);
// 3000ms delay at 10 BPS: k = ceil(10 * 3.0 * 1.5) = 45
let k_high = calculate_optimal_k(3000.0, 10.0);
assert!(k_high > k_medium);
assert!(k_high > k_low);
}
#[test]
fn test_calculate_optimal_k_for_config() {
// Test Enhanced mode (32 BPS) requires higher k for same delay
let k_standard = calculate_optimal_k_for_config(100.0, BlockRateConfig::Standard);
let k_enhanced = calculate_optimal_k_for_config(100.0, BlockRateConfig::Enhanced);
let k_maximum = calculate_optimal_k_for_config(100.0, BlockRateConfig::Maximum);
// Higher block rates calculate higher k for same delay
// Standard: ceil(10 * 0.1 * 1.5) = 2 -> clamped to 8
// Enhanced: ceil(32 * 0.1 * 1.5) = 5 -> clamped to 16
// Maximum: ceil(100 * 0.1 * 1.5) = 15 -> clamped to 50
assert!(k_enhanced >= k_standard);
assert!(k_maximum >= k_enhanced);
}
#[test]
fn test_estimate_throughput() {
// Good network: 10ms delay - orphan_rate = 0.01 * 10 = 0.1
let tps_good = estimate_throughput(10.0, 100, 10.0);
// Poor network: 40ms delay - orphan_rate = 0.04 * 10 = 0.4
let tps_poor = estimate_throughput(10.0, 100, 40.0);
// Good network should have higher throughput
assert!(tps_good > tps_poor, "tps_good={} should be > tps_poor={}", tps_good, tps_poor);
}
#[test]
fn test_throughput_by_config() {
// At same network conditions, higher BPS = higher theoretical TPS
let tps_10 = estimate_throughput(10.0, 100, 20.0); // 10 BPS
let tps_32 = estimate_throughput(32.0, 100, 20.0); // 32 BPS
let tps_100 = estimate_throughput(100.0, 100, 20.0); // 100 BPS
// Higher block rates give higher TPS (with network overhead)
assert!(tps_32 > tps_10);
assert!(tps_100 > tps_32);
}
#[test]
fn test_finality_depth_scaling() {
let (_, _, standard) = setup_test_dag_with_config(BlockRateConfig::Standard);
let (_, _, enhanced) = setup_test_dag_with_config(BlockRateConfig::Enhanced);
let (_, _, maximum) = setup_test_dag_with_config(BlockRateConfig::Maximum);
// All configs should have ~2.4 hours of finality time
let standard_time_hrs = standard.finality_depth() as f64 / 10.0 / 3600.0;
let enhanced_time_hrs = enhanced.finality_depth() as f64 / 32.0 / 3600.0;
let maximum_time_hrs = maximum.finality_depth() as f64 / 100.0 / 3600.0;
// Should all be approximately 2.4 hours (allow some variance)
assert!((standard_time_hrs - 2.4).abs() < 0.1, "standard: {}", standard_time_hrs);
assert!((enhanced_time_hrs - 2.4).abs() < 0.1, "enhanced: {}", enhanced_time_hrs);
assert!((maximum_time_hrs - 2.4).abs() < 0.1, "maximum: {}", maximum_time_hrs);
}
#[test]
fn test_confidence_levels() {
assert!(ConfirmationConfidence::VeryHigh.sigma_multiplier()
> ConfirmationConfidence::High.sigma_multiplier());
assert!(ConfirmationConfidence::High.sigma_multiplier()
> ConfirmationConfidence::Medium.sigma_multiplier());
assert!(ConfirmationConfidence::Medium.sigma_multiplier()
> ConfirmationConfidence::Low.sigma_multiplier());
}
#[test]
fn test_block_rate_config_values() {
assert_eq!(BlockRateConfig::Standard.bps(), 10.0);
assert_eq!(BlockRateConfig::Enhanced.bps(), 32.0);
assert_eq!(BlockRateConfig::Maximum.bps(), 100.0);
assert_eq!(BlockRateConfig::Standard.block_time_ms(), 100);
assert_eq!(BlockRateConfig::Enhanced.block_time_ms(), 31);
assert_eq!(BlockRateConfig::Maximum.block_time_ms(), 10);
}
}