synor/apps/synord/src/main.rs
2026-01-08 05:22:24 +05:30

672 lines
17 KiB
Rust

//! Synor blockchain node daemon.
//!
//! This is the main entry point for running a Synor node.
#![allow(dead_code)]
use std::path::PathBuf;
use std::sync::Arc;
use clap::{Parser, Subcommand};
use tracing::{error, info};
use synord::config::NodeConfig;
use synord::node::SynorNode;
use synord::services::StorageService;
/// Synor blockchain node daemon.
#[derive(Parser)]
#[command(name = "synord")]
#[command(version, about = "Synor blockchain node daemon", long_about = None)]
struct Cli {
/// Configuration file path
#[arg(short, long, default_value = "synord.toml")]
config: PathBuf,
/// Data directory
#[arg(short, long, env = "SYNOR_DATA_DIR")]
data_dir: Option<PathBuf>,
/// Network to connect to
#[arg(short, long, default_value = "mainnet")]
network: String,
/// Log level
#[arg(long, default_value = "info")]
log_level: String,
/// Enable JSON logging
#[arg(long)]
json_logs: bool,
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(Subcommand)]
enum Commands {
/// Run the node
Run {
/// Enable mining
#[arg(long)]
mine: bool,
/// Mining address for block rewards
#[arg(long)]
coinbase: Option<String>,
/// Number of mining threads (0 = auto)
#[arg(long, default_value = "0")]
mining_threads: usize,
/// RPC bind address
#[arg(long, default_value = "127.0.0.1")]
rpc_host: String,
/// RPC port
#[arg(long, default_value = "16110")]
rpc_port: u16,
/// WebSocket port
#[arg(long, default_value = "16111")]
ws_port: u16,
/// P2P bind address
#[arg(long, default_value = "0.0.0.0")]
p2p_host: String,
/// P2P port
#[arg(long, default_value = "16100")]
p2p_port: u16,
/// Seed nodes to connect to
#[arg(long)]
seeds: Vec<String>,
},
/// Initialize a new node
Init {
/// Network (mainnet, testnet, devnet)
#[arg(long, default_value = "mainnet")]
network: String,
/// Force overwrite existing config
#[arg(long)]
force: bool,
},
/// Import blocks from file
Import {
/// Path to blocks file
path: PathBuf,
/// Skip verification
#[arg(long)]
no_verify: bool,
},
/// Export blocks to file
Export {
/// Output path
path: PathBuf,
/// Start height
#[arg(long, default_value = "0")]
from: u64,
/// End height (0 = latest)
#[arg(long, default_value = "0")]
to: u64,
},
/// Show node version and info
Version,
}
#[tokio::main]
async fn main() {
let cli = Cli::parse();
// Initialize logging
init_logging(&cli.log_level, cli.json_logs);
info!(
version = env!("CARGO_PKG_VERSION"),
"Starting Synor node daemon"
);
// Run command
let result = match cli.command {
Some(Commands::Run {
mine,
coinbase,
mining_threads,
rpc_host,
rpc_port,
ws_port,
p2p_host,
p2p_port,
seeds,
}) => {
run_node(
cli.config,
cli.data_dir,
cli.network,
mine,
coinbase,
mining_threads,
rpc_host,
rpc_port,
ws_port,
p2p_host,
p2p_port,
seeds,
)
.await
}
Some(Commands::Init { network, force }) => init_node(cli.data_dir, network, force).await,
Some(Commands::Import { path, no_verify }) => {
import_blocks(cli.config, cli.data_dir, path, no_verify).await
}
Some(Commands::Export { path, from, to }) => {
export_blocks(cli.config, cli.data_dir, path, from, to).await
}
Some(Commands::Version) => {
print_version();
Ok(())
}
None => {
// Default to run
run_node(
cli.config,
cli.data_dir,
cli.network,
false,
None,
0,
"127.0.0.1".to_string(),
16110,
16111,
"0.0.0.0".to_string(),
16100,
vec![],
)
.await
}
};
if let Err(e) = result {
error!("Node error: {}", e);
std::process::exit(1);
}
}
/// Initialize logging.
fn init_logging(level: &str, json: bool) {
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(level));
let subscriber = tracing_subscriber::registry().with(filter);
if json {
subscriber.with(fmt::layer().json()).init();
} else {
subscriber.with(fmt::layer().with_target(true)).init();
}
}
/// Run the node.
async fn run_node(
config_path: PathBuf,
data_dir: Option<PathBuf>,
network: String,
mine: bool,
coinbase: Option<String>,
mining_threads: usize,
rpc_host: String,
rpc_port: u16,
ws_port: u16,
p2p_host: String,
p2p_port: u16,
seeds: Vec<String>,
) -> anyhow::Result<()> {
// Load or create config
let config = NodeConfig::load_or_default(&config_path, &network)?;
// Override with CLI args
let config = config
.with_data_dir(data_dir)
.with_mining(mine, coinbase, mining_threads)
.with_rpc(&rpc_host, rpc_port, ws_port)
.with_p2p(&p2p_host, p2p_port, seeds);
info!(
network = %config.network,
data_dir = %config.data_dir.display(),
"Node configuration loaded"
);
// Create and start node
let node = SynorNode::new(config).await?;
let node = Arc::new(node);
// Start all services
node.start().await?;
info!("Synor node is running");
// Wait for shutdown signal
wait_for_shutdown().await;
info!("Shutting down...");
node.stop().await?;
info!("Node stopped gracefully");
Ok(())
}
/// Initialize a new node with genesis block.
async fn init_node(data_dir: Option<PathBuf>, network: String, force: bool) -> anyhow::Result<()> {
use synor_consensus::genesis::ChainConfig;
use synor_storage::{BlockBody, ChainState};
use synor_types::{BlockId, Network};
let data_dir = data_dir.unwrap_or_else(default_data_dir);
// Check if already initialized
let genesis_marker = data_dir.join("chainstate").join("GENESIS");
if genesis_marker.exists() && !force {
anyhow::bail!(
"Node already initialized at {}. Use --force to reinitialize.",
data_dir.display()
);
}
// Parse network
let net = match network.as_str() {
"mainnet" => Network::Mainnet,
"testnet" => Network::Testnet,
"devnet" => Network::Devnet,
_ => anyhow::bail!(
"Unknown network: {}. Use mainnet, testnet, or devnet.",
network
),
};
info!(network = %network, "Initializing node...");
// Get chain config with genesis block
let chain_config = ChainConfig::for_network(net);
info!(
genesis_hash = %hex::encode(chain_config.genesis_hash.as_bytes()),
"Using genesis block"
);
// Create directories
std::fs::create_dir_all(&data_dir)?;
std::fs::create_dir_all(data_dir.join("blocks"))?;
std::fs::create_dir_all(data_dir.join("chainstate"))?;
std::fs::create_dir_all(data_dir.join("contracts"))?;
std::fs::create_dir_all(data_dir.join("keys"))?;
// Create and save node config
let config = NodeConfig::for_network(&network)?.with_data_dir(Some(data_dir.clone()));
let config_path = data_dir.join("synord.toml");
config.save(&config_path)?;
info!("Created configuration file");
// Initialize storage
let storage = StorageService::new(&config).await?;
storage.start().await?;
info!("Initialized storage");
// Store genesis block header
storage.put_header(&chain_config.genesis.header).await?;
info!("Stored genesis header");
// Store genesis block body
let genesis_hash = chain_config.genesis_hash;
let body = BlockBody {
transaction_ids: chain_config
.genesis
.body
.transactions
.iter()
.map(|tx| tx.txid())
.collect(),
};
storage.put_block_body(&genesis_hash, &body).await?;
info!("Stored genesis block body");
// Store genesis transactions
for tx in &chain_config.genesis.body.transactions {
storage.put_transaction(tx).await?;
}
info!(
tx_count = chain_config.genesis.body.transactions.len(),
"Stored genesis transactions"
);
// Set genesis hash in metadata
let genesis_id = BlockId::from_bytes(*genesis_hash.as_bytes());
storage.set_genesis(&genesis_id).await?;
info!("Set genesis hash");
// Set initial tips (just genesis)
storage.set_tips(&[genesis_id]).await?;
info!("Set initial tips");
// Initialize chain state
let chain_state = ChainState {
max_blue_score: 0,
total_blocks: 1,
daa_score: 0,
difficulty_bits: chain_config.initial_difficulty,
total_work: vec![0; 32],
};
storage.set_chain_state(&chain_state).await?;
info!("Initialized chain state");
// Create genesis marker file
std::fs::write(&genesis_marker, hex::encode(genesis_hash.as_bytes()))?;
// Stop storage
storage.stop().await?;
info!(
path = %data_dir.display(),
network = %network,
genesis = %hex::encode(genesis_hash.as_bytes()),
"Node initialized successfully"
);
println!();
println!("Synor node initialized!");
println!();
println!(" Network: {}", network);
println!(" Data dir: {}", data_dir.display());
println!(" Genesis: {}", hex::encode(genesis_hash.as_bytes()));
println!();
println!("Chain parameters:");
println!(
" Block time: {} ms",
chain_config.target_block_time_ms
);
println!(" GHOSTDAG K: {}", chain_config.ghostdag_k);
println!(
" Initial reward: {} SYNOR",
chain_config.initial_reward / 100_000_000
);
println!(
" Halving interval: {} blocks",
chain_config.halving_interval
);
println!();
println!("To start the node:");
println!(" synord run --network {}", network);
println!();
Ok(())
}
/// Import blocks from file.
async fn import_blocks(
config_path: PathBuf,
data_dir: Option<PathBuf>,
path: PathBuf,
no_verify: bool,
) -> anyhow::Result<()> {
use std::fs::File;
use std::io::{BufReader, Read};
let config = NodeConfig::load_or_default(&config_path, "mainnet")?;
let config = config.with_data_dir(data_dir);
info!(
path = %path.display(),
verify = !no_verify,
"Importing blocks"
);
// Open the import file
let file = File::open(&path)?;
let mut reader = BufReader::new(file);
// Read file header (magic + version)
let mut magic = [0u8; 8];
reader.read_exact(&mut magic)?;
if &magic != b"SYNBLKS\x01" {
anyhow::bail!("Invalid block export file format");
}
// Initialize storage
let storage = Arc::new(StorageService::new(&config).await?);
storage.start().await?;
let mut imported = 0u64;
let mut errors = 0u64;
// Read blocks until EOF
loop {
// Read block length
let mut len_buf = [0u8; 4];
match reader.read_exact(&mut len_buf) {
Ok(_) => {}
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => break,
Err(e) => return Err(e.into()),
}
let block_len = u32::from_le_bytes(len_buf) as usize;
if block_len == 0 {
break;
}
// Read block data
let mut block_bytes = vec![0u8; block_len];
reader.read_exact(&mut block_bytes)?;
// Deserialize block data (hash + header + body)
let block_data: synord::services::BlockData = match borsh::from_slice(&block_bytes) {
Ok(b) => b,
Err(e) => {
error!("Failed to deserialize block: {}", e);
errors += 1;
continue;
}
};
// Store the block
if let Err(e) = storage.put_block(&block_data).await {
error!(
hash = hex::encode(&block_data.hash[..8]),
"Failed to store block: {}", e
);
errors += 1;
} else {
imported += 1;
if imported % 1000 == 0 {
info!("Imported {} blocks...", imported);
}
}
}
storage.stop().await?;
info!(
imported = imported,
errors = errors,
"Block import complete"
);
Ok(())
}
/// Export blocks to file.
async fn export_blocks(
config_path: PathBuf,
data_dir: Option<PathBuf>,
path: PathBuf,
from: u64,
to: u64,
) -> anyhow::Result<()> {
use std::fs::File;
use std::io::{BufWriter, Write};
let config = NodeConfig::load_or_default(&config_path, "mainnet")?;
let config = config.with_data_dir(data_dir);
info!(
path = %path.display(),
from = from,
to = to,
"Exporting blocks"
);
// Initialize storage
let storage = Arc::new(StorageService::new(&config).await?);
storage.start().await?;
// Get tips to start walking backwards through the DAG
let tips = storage.get_tips().await?;
if tips.is_empty() {
anyhow::bail!("No tips found - is the node initialized?");
}
// Open output file
let file = File::create(&path)?;
let mut writer = BufWriter::new(file);
// Write file header (magic + version)
writer.write_all(b"SYNBLKS\x01")?;
let mut exported = 0u64;
let mut errors = 0u64;
// Walk backwards from tips through the DAG
// Export blocks with blue_score in [from, to] range
let mut seen = std::collections::HashSet::new();
let mut to_visit: Vec<[u8; 32]> = tips.into_iter().map(|h| *h.as_bytes()).collect();
while let Some(hash) = to_visit.pop() {
if seen.contains(&hash) {
continue;
}
seen.insert(hash);
if let Ok(Some(block_data)) = storage.get_block(&hash).await {
// Parse header to check blue score (used as height in DAG)
let header: synor_types::BlockHeader = match borsh::from_slice(&block_data.header) {
Ok(h) => h,
Err(e) => {
error!("Failed to parse header: {}", e);
errors += 1;
continue;
}
};
let blue_score = header.blue_score.value();
// Only export blocks within the specified blue score range
if blue_score >= from && blue_score <= to {
// Serialize the block data
let serialized = borsh::to_vec(&block_data)?;
// Write length + data
writer.write_all(&(serialized.len() as u32).to_le_bytes())?;
writer.write_all(&serialized)?;
exported += 1;
if exported % 1000 == 0 {
info!("Exported {} blocks...", exported);
}
}
// Add parents to visit (walk backwards through DAG)
// Only continue if we haven't gone below the 'from' threshold
if blue_score > from {
for parent in &header.parents {
to_visit.push(*parent.as_bytes());
}
}
}
}
// Write terminator
writer.write_all(&0u32.to_le_bytes())?;
writer.flush()?;
storage.stop().await?;
info!(
exported = exported,
errors = errors,
path = %path.display(),
"Block export complete"
);
Ok(())
}
/// Print version information.
fn print_version() {
println!("synord {}", env!("CARGO_PKG_VERSION"));
println!();
println!("Build info:");
println!(" Rust version: {}", rustc_version());
println!(" Target: {}", std::env::consts::ARCH);
println!(" OS: {}", std::env::consts::OS);
println!();
println!("Network parameters:");
println!(" Max supply: 70,000,000 SYNOR");
println!(" Block time: ~1 second (DAG)");
println!(" Algorithm: kHeavyHash PoW");
println!(" Consensus: GHOSTDAG");
}
fn rustc_version() -> &'static str {
option_env!("RUSTC_VERSION").unwrap_or("unknown")
}
/// Get default data directory.
fn default_data_dir() -> PathBuf {
dirs::data_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("synor")
}
/// Wait for shutdown signal.
async fn wait_for_shutdown() {
#[cfg(unix)]
{
use tokio::signal::unix::{signal, SignalKind};
let mut sigterm = signal(SignalKind::terminate()).expect("Failed to register SIGTERM");
let mut sigint = signal(SignalKind::interrupt()).expect("Failed to register SIGINT");
tokio::select! {
_ = sigterm.recv() => {
info!("Received SIGTERM");
}
_ = sigint.recv() => {
info!("Received SIGINT");
}
}
}
#[cfg(windows)]
{
tokio::signal::ctrl_c()
.await
.expect("Failed to listen for Ctrl+C");
info!("Received Ctrl+C");
}
}