A complete blockchain implementation featuring: - synord: Full node with GHOSTDAG consensus - explorer-web: Modern React blockchain explorer with 3D DAG visualization - CLI wallet and tools - Smart contract SDK and example contracts (DEX, NFT, token) - WASM crypto library for browser/mobile
746 lines
22 KiB
Rust
746 lines
22 KiB
Rust
//! Reorganization and DAG restructuring tests.
|
|
//!
|
|
//! These tests verify:
|
|
//! - DAG restructuring when new blocks arrive
|
|
//! - Virtual selected parent chain updates
|
|
//! - UTXO rollback and reapplication
|
|
//! - Mempool restoration after reorgs
|
|
//! - Transaction conflict resolution
|
|
//! - Blue score recalculation during restructuring
|
|
|
|
use std::sync::Arc;
|
|
use std::time::Duration;
|
|
|
|
use tempfile::TempDir;
|
|
use tokio::time::sleep;
|
|
use tracing::info;
|
|
|
|
use synord::config::NodeConfig;
|
|
use synord::node::{NodeState, SynorNode};
|
|
|
|
// ==================== Test Helpers ====================
|
|
|
|
/// Creates a test node configuration.
|
|
fn create_node_config(temp_dir: &TempDir, node_index: u16, seeds: Vec<String>) -> NodeConfig {
|
|
let mut config = NodeConfig::for_network("devnet").unwrap();
|
|
config.data_dir = temp_dir.path().join(format!("node_{}", node_index));
|
|
config.mining.enabled = false;
|
|
|
|
// Use unique ports per test to avoid conflicts
|
|
let port_base = 19000 + (std::process::id() % 500) as u16 * 10 + node_index * 3;
|
|
config.p2p.listen_addr = format!("/ip4/127.0.0.1/tcp/{}", port_base);
|
|
config.rpc.http_addr = format!("127.0.0.1:{}", port_base + 1);
|
|
config.rpc.ws_addr = format!("127.0.0.1:{}", port_base + 2);
|
|
config.p2p.seeds = seeds;
|
|
|
|
config
|
|
}
|
|
|
|
/// A test network for reorg scenarios.
|
|
struct ReorgTestNetwork {
|
|
/// All nodes in the network.
|
|
nodes: Vec<Arc<SynorNode>>,
|
|
/// Temp directories.
|
|
_temp_dirs: Vec<TempDir>,
|
|
}
|
|
|
|
impl ReorgTestNetwork {
|
|
/// Creates a new test network.
|
|
async fn new(node_count: usize) -> anyhow::Result<Self> {
|
|
let mut temp_dirs = Vec::new();
|
|
let mut nodes = Vec::new();
|
|
|
|
// First node (seed)
|
|
let temp = TempDir::new()?;
|
|
let seed_port = 19000 + (std::process::id() % 500) as u16 * 10;
|
|
let config = create_node_config(&temp, 0, vec![]);
|
|
temp_dirs.push(temp);
|
|
nodes.push(Arc::new(SynorNode::new(config).await?));
|
|
|
|
// Remaining nodes connect to seed
|
|
for i in 1..node_count {
|
|
let temp = TempDir::new()?;
|
|
let config = create_node_config(
|
|
&temp,
|
|
i as u16,
|
|
vec![format!("/ip4/127.0.0.1/tcp/{}", seed_port)],
|
|
);
|
|
temp_dirs.push(temp);
|
|
nodes.push(Arc::new(SynorNode::new(config).await?));
|
|
}
|
|
|
|
Ok(ReorgTestNetwork {
|
|
nodes,
|
|
_temp_dirs: temp_dirs,
|
|
})
|
|
}
|
|
|
|
/// Creates an isolated network (nodes don't connect to each other).
|
|
async fn new_isolated(node_count: usize) -> anyhow::Result<Self> {
|
|
let mut temp_dirs = Vec::new();
|
|
let mut nodes = Vec::new();
|
|
|
|
for i in 0..node_count {
|
|
let temp = TempDir::new()?;
|
|
let config = create_node_config(&temp, i as u16, vec![]); // No seeds = isolated
|
|
temp_dirs.push(temp);
|
|
nodes.push(Arc::new(SynorNode::new(config).await?));
|
|
}
|
|
|
|
Ok(ReorgTestNetwork {
|
|
nodes,
|
|
_temp_dirs: temp_dirs,
|
|
})
|
|
}
|
|
|
|
/// Starts all nodes.
|
|
async fn start_all(&self) -> anyhow::Result<()> {
|
|
for node in &self.nodes {
|
|
node.start().await?;
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
/// Stops all nodes.
|
|
async fn stop_all(&self) -> anyhow::Result<()> {
|
|
for node in &self.nodes {
|
|
node.stop().await?;
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
/// Connects two isolated nodes.
|
|
async fn connect_nodes(&self, from: usize, to: usize) {
|
|
if from >= self.nodes.len() || to >= self.nodes.len() {
|
|
return;
|
|
}
|
|
|
|
// Get the listen address of the target node
|
|
let to_config = self.nodes[to].config();
|
|
let to_addr = &to_config.p2p.listen_addr;
|
|
|
|
// Connect from source to target
|
|
let from_network = self.nodes[from].network();
|
|
let _ = from_network.connect_peer(to_addr).await;
|
|
}
|
|
}
|
|
|
|
// ==================== Virtual Selected Parent Tests ====================
|
|
|
|
#[tokio::test]
|
|
async fn test_vsp_update_on_new_block() {
|
|
let network = ReorgTestNetwork::new(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
// Get initial VSP from both nodes
|
|
let consensus0 = network.nodes[0].consensus();
|
|
let consensus1 = network.nodes[1].consensus();
|
|
|
|
let vsp0_initial: Option<[u8; 32]> = consensus0.virtual_selected_parent().await;
|
|
let vsp1_initial: Option<[u8; 32]> = consensus1.virtual_selected_parent().await;
|
|
|
|
info!(
|
|
node0_vsp = ?vsp0_initial.map(|v| hex::encode(&v[..8])),
|
|
node1_vsp = ?vsp1_initial.map(|v| hex::encode(&v[..8])),
|
|
"Initial VSPs"
|
|
);
|
|
|
|
// In a connected network with same state, VSPs should match
|
|
// (or be very close during block propagation)
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_vsp_convergence_after_sync() {
|
|
let network = ReorgTestNetwork::new(3).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
// Allow time for nodes to sync
|
|
sleep(Duration::from_secs(3)).await;
|
|
|
|
// Collect VSPs from all nodes
|
|
let mut vsps: Vec<Option<[u8; 32]>> = Vec::new();
|
|
for (i, node) in network.nodes.iter().enumerate() {
|
|
let consensus = node.consensus();
|
|
let vsp: Option<[u8; 32]> = consensus.virtual_selected_parent().await;
|
|
info!(node = i, vsp = ?vsp.map(|v| hex::encode(&v[..8])), "Node VSP");
|
|
vsps.push(vsp);
|
|
}
|
|
|
|
// After sync, all nodes should converge to same VSP
|
|
// (might differ temporarily during active block production)
|
|
let has_vsp_count = vsps.iter().filter(|v| v.is_some()).count();
|
|
info!(
|
|
nodes_with_vsp = has_vsp_count,
|
|
total_nodes = vsps.len(),
|
|
"VSP convergence status"
|
|
);
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
// ==================== DAG Restructuring Tests ====================
|
|
|
|
#[tokio::test]
|
|
async fn test_dag_restructure_on_late_block() {
|
|
let network = ReorgTestNetwork::new(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
// Record initial DAG state
|
|
let consensus = network.nodes[0].consensus();
|
|
let initial_tips: Vec<[u8; 32]> = consensus.tips().await;
|
|
let initial_blue_score = consensus.current_blue_score().await;
|
|
|
|
info!(
|
|
initial_tips = initial_tips.len(),
|
|
initial_blue_score = initial_blue_score,
|
|
"Initial DAG state"
|
|
);
|
|
|
|
// In GHOSTDAG, the DAG restructures when:
|
|
// 1. A new block arrives that extends the DAG
|
|
// 2. The block might change the selected parent chain
|
|
// 3. Blue scores get recalculated
|
|
|
|
// After some time, state should evolve (if mining were enabled)
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
let final_tips: Vec<[u8; 32]> = consensus.tips().await;
|
|
let final_blue_score = consensus.current_blue_score().await;
|
|
|
|
info!(
|
|
final_tips = final_tips.len(),
|
|
final_blue_score = final_blue_score,
|
|
"Final DAG state"
|
|
);
|
|
|
|
// Blue score should remain stable or increase (never decrease)
|
|
assert!(
|
|
final_blue_score >= initial_blue_score,
|
|
"Blue score should not decrease"
|
|
);
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_selected_chain_update() {
|
|
let network = ReorgTestNetwork::new(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
// Get selected chains from both nodes
|
|
let consensus0 = network.nodes[0].consensus();
|
|
let consensus1 = network.nodes[1].consensus();
|
|
|
|
let chain0: Vec<[u8; 32]> = consensus0.get_selected_chain(20).await;
|
|
let chain1: Vec<[u8; 32]> = consensus1.get_selected_chain(20).await;
|
|
|
|
info!(
|
|
node0_chain_len = chain0.len(),
|
|
node1_chain_len = chain1.len(),
|
|
"Selected chain lengths"
|
|
);
|
|
|
|
// Log the chain blocks
|
|
for (i, block) in chain0.iter().enumerate().take(5) {
|
|
info!(position = i, block = hex::encode(&block[..8]), "Node 0 chain");
|
|
}
|
|
|
|
for (i, block) in chain1.iter().enumerate().take(5) {
|
|
info!(position = i, block = hex::encode(&block[..8]), "Node 1 chain");
|
|
}
|
|
|
|
// Chains should be similar after sync
|
|
if !chain0.is_empty() && !chain1.is_empty() {
|
|
// Genesis should match
|
|
if chain0[0] == chain1[0] {
|
|
info!("Genesis blocks match between nodes");
|
|
}
|
|
}
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
// ==================== Merge Set Tests ====================
|
|
|
|
#[tokio::test]
|
|
async fn test_merge_set_calculation() {
|
|
let network = ReorgTestNetwork::new(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
let consensus = network.nodes[0].consensus();
|
|
let tips: Vec<[u8; 32]> = consensus.tips().await;
|
|
|
|
// For each tip, examine its merge set (blues and reds)
|
|
for tip in tips.iter().take(3) {
|
|
if let Some(block_info) = consensus.get_block_info(tip).await {
|
|
let merge_set_size = block_info.blues.len() + block_info.reds.len();
|
|
info!(
|
|
block = hex::encode(&tip[..8]),
|
|
blues = block_info.blues.len(),
|
|
reds = block_info.reds.len(),
|
|
merge_set = merge_set_size,
|
|
"Block merge set"
|
|
);
|
|
|
|
// Merge set represents blocks ordered by this block
|
|
// Blues are "accepted" blocks in the k-cluster
|
|
// Reds are blocks outside the k-cluster
|
|
|
|
// In a normal DAG, most blocks should be blue
|
|
// Red blocks indicate concurrent mining beyond k-cluster capacity
|
|
}
|
|
}
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
// ==================== UTXO Consistency Tests ====================
|
|
|
|
#[tokio::test]
|
|
async fn test_utxo_consistency_across_nodes() {
|
|
let network = ReorgTestNetwork::new(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(3)).await;
|
|
|
|
// Check UTXO-related consensus state across nodes
|
|
let consensus0 = network.nodes[0].consensus();
|
|
let consensus1 = network.nodes[1].consensus();
|
|
|
|
let daa0 = consensus0.current_daa_score().await;
|
|
let daa1 = consensus1.current_daa_score().await;
|
|
|
|
let height0 = consensus0.current_height().await;
|
|
let height1 = consensus1.current_height().await;
|
|
|
|
info!(
|
|
node0_daa = daa0,
|
|
node1_daa = daa1,
|
|
node0_height = height0,
|
|
node1_height = height1,
|
|
"UTXO-related state"
|
|
);
|
|
|
|
// In a synchronized network, DAA scores and heights should be close
|
|
// Small differences are acceptable during active block propagation
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_utxo_virtual_state() {
|
|
let temp_dir = TempDir::new().unwrap();
|
|
let config = create_node_config(&temp_dir, 0, vec![]);
|
|
|
|
let node = SynorNode::new(config).await.unwrap();
|
|
node.start().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(1)).await;
|
|
|
|
// The UTXO virtual state represents spendable outputs at the DAG tips
|
|
let consensus = node.consensus();
|
|
|
|
// Get current state
|
|
let blue_score = consensus.current_blue_score().await;
|
|
let daa_score = consensus.current_daa_score().await;
|
|
|
|
info!(
|
|
blue_score = blue_score,
|
|
daa_score = daa_score,
|
|
"Virtual UTXO state context"
|
|
);
|
|
|
|
// Virtual state should be consistent with consensus
|
|
// In a fresh node, this is just genesis state
|
|
|
|
node.stop().await.unwrap();
|
|
}
|
|
|
|
// ==================== Network Partition Recovery Tests ====================
|
|
|
|
#[tokio::test]
|
|
async fn test_partition_recovery_dag_merge() {
|
|
// Create isolated nodes (simulating network partition)
|
|
let network = ReorgTestNetwork::new_isolated(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
// Let nodes operate independently
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
// Record independent states
|
|
let consensus0 = network.nodes[0].consensus();
|
|
let consensus1 = network.nodes[1].consensus();
|
|
|
|
let tips0_before: Vec<[u8; 32]> = consensus0.tips().await;
|
|
let tips1_before: Vec<[u8; 32]> = consensus1.tips().await;
|
|
|
|
info!(
|
|
node0_tips = tips0_before.len(),
|
|
node1_tips = tips1_before.len(),
|
|
"Tips before reconnection"
|
|
);
|
|
|
|
// Connect the nodes (heal partition)
|
|
network.connect_nodes(0, 1).await;
|
|
|
|
// Wait for sync
|
|
sleep(Duration::from_secs(3)).await;
|
|
|
|
// After reconnection, DAGs should merge
|
|
let tips0_after: Vec<[u8; 32]> = consensus0.tips().await;
|
|
let tips1_after: Vec<[u8; 32]> = consensus1.tips().await;
|
|
|
|
info!(
|
|
node0_tips = tips0_after.len(),
|
|
node1_tips = tips1_after.len(),
|
|
"Tips after reconnection"
|
|
);
|
|
|
|
// In GHOSTDAG, DAG merge means:
|
|
// - Both chains become part of the unified DAG
|
|
// - Selected parent chain is recalculated
|
|
// - Some blocks might become red if outside k-cluster
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_blue_score_after_partition_heal() {
|
|
let network = ReorgTestNetwork::new_isolated(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
let consensus0 = network.nodes[0].consensus();
|
|
let consensus1 = network.nodes[1].consensus();
|
|
|
|
let score0_before = consensus0.current_blue_score().await;
|
|
let score1_before = consensus1.current_blue_score().await;
|
|
|
|
info!(
|
|
node0_score = score0_before,
|
|
node1_score = score1_before,
|
|
"Blue scores before heal"
|
|
);
|
|
|
|
// Heal partition
|
|
network.connect_nodes(0, 1).await;
|
|
sleep(Duration::from_secs(3)).await;
|
|
|
|
let score0_after = consensus0.current_blue_score().await;
|
|
let score1_after = consensus1.current_blue_score().await;
|
|
|
|
info!(
|
|
node0_score = score0_after,
|
|
node1_score = score1_after,
|
|
"Blue scores after heal"
|
|
);
|
|
|
|
// Blue scores should converge after heal
|
|
// The merged DAG has higher or equal blue score
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
// ==================== Mempool Behavior During Reorg ====================
|
|
|
|
#[tokio::test]
|
|
async fn test_mempool_after_dag_update() {
|
|
let network = ReorgTestNetwork::new(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
// Check mempool state
|
|
let mempool0 = network.nodes[0].mempool();
|
|
let mempool1 = network.nodes[1].mempool();
|
|
|
|
let size0 = mempool0.size().await;
|
|
let size1 = mempool1.size().await;
|
|
|
|
info!(
|
|
node0_mempool = size0,
|
|
node1_mempool = size1,
|
|
"Mempool sizes"
|
|
);
|
|
|
|
// Mempool should be empty in fresh nodes without transactions
|
|
// After a reorg, transactions from orphaned blocks should return to mempool
|
|
// (if not conflicting with the new chain)
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_mempool_tx_revalidation() {
|
|
let temp_dir = TempDir::new().unwrap();
|
|
let config = create_node_config(&temp_dir, 0, vec![]);
|
|
|
|
let node = SynorNode::new(config).await.unwrap();
|
|
node.start().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(1)).await;
|
|
|
|
let mempool = node.mempool();
|
|
let consensus = node.consensus();
|
|
|
|
// In a real scenario:
|
|
// 1. Transactions in mempool are validated against current UTXO state
|
|
// 2. After reorg, UTXO state changes
|
|
// 3. Some transactions might become invalid (double-spend)
|
|
// 4. Valid transactions should remain in mempool
|
|
|
|
let initial_size = mempool.size().await;
|
|
let blue_score = consensus.current_blue_score().await;
|
|
|
|
info!(
|
|
mempool_size = initial_size,
|
|
blue_score = blue_score,
|
|
"Mempool state for revalidation test"
|
|
);
|
|
|
|
node.stop().await.unwrap();
|
|
}
|
|
|
|
// ==================== Deep Reorg Tests ====================
|
|
|
|
#[tokio::test]
|
|
async fn test_deep_reorg_protection() {
|
|
let temp_dir = TempDir::new().unwrap();
|
|
let config = create_node_config(&temp_dir, 0, vec![]);
|
|
|
|
let node = SynorNode::new(config).await.unwrap();
|
|
node.start().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(1)).await;
|
|
|
|
let consensus = node.consensus();
|
|
|
|
// GHOSTDAG has finality through:
|
|
// 1. Finality depth - blocks beyond this depth are considered final
|
|
// 2. Merge depth - limits how far back new blocks can merge
|
|
|
|
let finality_depth = node.config().consensus.finality_depth;
|
|
let merge_depth = node.config().consensus.merge_depth;
|
|
|
|
info!(
|
|
finality_depth = finality_depth,
|
|
merge_depth = merge_depth,
|
|
"Reorg protection parameters"
|
|
);
|
|
|
|
// Get current confirmations of genesis/first block
|
|
let tips: Vec<[u8; 32]> = consensus.tips().await;
|
|
if !tips.is_empty() {
|
|
let confirmations = consensus.get_confirmations(&tips[0]).await;
|
|
info!(
|
|
tip_confirmations = confirmations,
|
|
is_final = confirmations >= finality_depth,
|
|
"Tip finality status"
|
|
);
|
|
}
|
|
|
|
node.stop().await.unwrap();
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_finality_prevents_reorg() {
|
|
let network = ReorgTestNetwork::new(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
let consensus = network.nodes[0].consensus();
|
|
|
|
// In GHOSTDAG, blocks with sufficient confirmations are final
|
|
// A reorg cannot undo finalized blocks
|
|
|
|
let finality_depth = network.nodes[0].config().consensus.finality_depth;
|
|
let current_height = consensus.current_height().await;
|
|
|
|
info!(
|
|
current_height = current_height,
|
|
finality_depth = finality_depth,
|
|
"Finality context"
|
|
);
|
|
|
|
// Blocks older than finality_depth from current height are final
|
|
// This prevents deep reorgs that could disrupt settled transactions
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
// ==================== Conflicting Block Tests ====================
|
|
|
|
#[tokio::test]
|
|
async fn test_handle_conflicting_blocks() {
|
|
let network = ReorgTestNetwork::new(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
// In GHOSTDAG, "conflicting" blocks are just parallel blocks in the DAG
|
|
// They don't cause traditional reorgs but are classified as blue or red
|
|
|
|
let consensus = network.nodes[0].consensus();
|
|
let tips: Vec<[u8; 32]> = consensus.tips().await;
|
|
|
|
// Multiple tips indicate parallel blocks at the DAG frontier
|
|
if tips.len() > 1 {
|
|
info!(
|
|
tip_count = tips.len(),
|
|
"Multiple parallel tips detected (normal in GHOSTDAG)"
|
|
);
|
|
|
|
// These parallel blocks will be ordered by GHOSTDAG
|
|
// The selected parent chain picks one path
|
|
// Other blocks become part of merge sets
|
|
}
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_block_acceptance_order() {
|
|
let network = ReorgTestNetwork::new(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
let consensus = network.nodes[0].consensus();
|
|
|
|
// Get the selected chain which determines transaction order
|
|
let chain: Vec<[u8; 32]> = consensus.get_selected_chain(10).await;
|
|
|
|
info!(chain_length = chain.len(), "Selected chain for ordering");
|
|
|
|
// GHOSTDAG provides total ordering through:
|
|
// 1. Selected parent chain (main chain of blocks)
|
|
// 2. Merge sets (blocks merged at each selected block)
|
|
// 3. Topological order within merge sets
|
|
|
|
for (i, block) in chain.iter().enumerate() {
|
|
if let Some(info) = consensus.get_block_info(block).await {
|
|
info!(
|
|
position = i,
|
|
block = hex::encode(&block[..8]),
|
|
merge_set_size = info.blues.len() + info.reds.len(),
|
|
"Block in ordering"
|
|
);
|
|
}
|
|
}
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
// ==================== State Rollback Tests ====================
|
|
|
|
#[tokio::test]
|
|
async fn test_state_consistency_after_restructure() {
|
|
let network = ReorgTestNetwork::new_isolated(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
// Let nodes build independent state
|
|
sleep(Duration::from_secs(2)).await;
|
|
|
|
// Connect nodes
|
|
network.connect_nodes(0, 1).await;
|
|
sleep(Duration::from_secs(3)).await;
|
|
|
|
// Verify state consistency
|
|
let consensus0 = network.nodes[0].consensus();
|
|
let consensus1 = network.nodes[1].consensus();
|
|
|
|
let vsp0: Option<[u8; 32]> = consensus0.virtual_selected_parent().await;
|
|
let vsp1: Option<[u8; 32]> = consensus1.virtual_selected_parent().await;
|
|
|
|
let blue0 = consensus0.current_blue_score().await;
|
|
let blue1 = consensus1.current_blue_score().await;
|
|
|
|
info!(
|
|
node0_vsp = ?vsp0.map(|v| hex::encode(&v[..8])),
|
|
node1_vsp = ?vsp1.map(|v| hex::encode(&v[..8])),
|
|
node0_blue = blue0,
|
|
node1_blue = blue1,
|
|
"State after restructure"
|
|
);
|
|
|
|
// After DAG merge, both nodes should have consistent view
|
|
// Small differences acceptable during sync window
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|
|
|
|
// ==================== Edge Cases ====================
|
|
|
|
#[tokio::test]
|
|
async fn test_single_node_no_reorg() {
|
|
let temp_dir = TempDir::new().unwrap();
|
|
let config = create_node_config(&temp_dir, 0, vec![]);
|
|
|
|
let node = SynorNode::new(config).await.unwrap();
|
|
node.start().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(1)).await;
|
|
|
|
let consensus = node.consensus();
|
|
|
|
// Single node should have stable state
|
|
let blue_score1 = consensus.current_blue_score().await;
|
|
sleep(Duration::from_millis(500)).await;
|
|
let blue_score2 = consensus.current_blue_score().await;
|
|
|
|
info!(
|
|
score1 = blue_score1,
|
|
score2 = blue_score2,
|
|
"Blue score stability"
|
|
);
|
|
|
|
// Without new blocks, blue score should be stable
|
|
assert_eq!(blue_score1, blue_score2, "Blue score should be stable");
|
|
|
|
node.stop().await.unwrap();
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_rapid_reconnection() {
|
|
let network = ReorgTestNetwork::new(2).await.unwrap();
|
|
network.start_all().await.unwrap();
|
|
|
|
sleep(Duration::from_secs(1)).await;
|
|
|
|
// Simulate rapid connect/disconnect cycles
|
|
for i in 0..3 {
|
|
info!(cycle = i, "Connection cycle");
|
|
|
|
// Disconnect
|
|
let network_service = network.nodes[0].network();
|
|
let peers = network_service.peers().await;
|
|
for peer in &peers {
|
|
network_service.disconnect_peer(&peer.id).await;
|
|
}
|
|
|
|
sleep(Duration::from_millis(200)).await;
|
|
|
|
// Reconnect
|
|
network.connect_nodes(0, 1).await;
|
|
|
|
sleep(Duration::from_millis(500)).await;
|
|
}
|
|
|
|
// Node should remain stable through rapid reconnections
|
|
assert_eq!(network.nodes[0].state().await, NodeState::Running);
|
|
assert_eq!(network.nodes[1].state().await, NodeState::Running);
|
|
|
|
network.stop_all().await.unwrap();
|
|
}
|