synor/apps/explorer/src/main.rs
Gulshan Yadav 16c7e87a66 fix(explorer): fix RPC method calls and add WebSocket support
- Fix health check to use RPC call instead of GET /health
- Update API endpoints to use correct RPC method names:
  - synor_getInfo, synor_getMiningInfo, synor_getTips
  - synor_getBlockCount, synor_getBlueScore, synor_getBlocksByBlueScore
- Fix response format handling (synor_getTips returns {tips: [...]})
- Add WebSocket endpoint at /ws for real-time updates:
  - stats_update events (every second)
  - new_block events on block detection
  - tip_update events on DAG changes
- Add ws feature to axum and tokio-tungstenite dependency
2026-01-08 13:15:40 +05:30

1432 lines
42 KiB
Rust

//! Synor Block Explorer Backend
//!
//! A REST API server that provides blockchain data for web frontends.
//! Features:
//! - Block and transaction queries with pagination
//! - Address balance and transaction history
//! - DAG visualization data
//! - Network statistics and metrics
//! - Search functionality
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use axum::http::{HeaderValue, Method};
use axum::{
extract::{
ws::{Message, WebSocket, WebSocketUpgrade},
Path, Query, State,
},
http::StatusCode,
response::IntoResponse,
routing::get,
Json, Router,
};
use futures::{SinkExt, StreamExt};
use moka::future::Cache;
use serde::{Deserialize, Serialize};
use tower_http::compression::CompressionLayer;
use tower_http::cors::{Any, CorsLayer};
use tower_http::services::{ServeDir, ServeFile};
use tower_http::trace::TraceLayer;
use tracing::{error, info};
// ==================== Configuration ====================
/// Explorer configuration.
#[derive(Clone, Debug)]
pub struct ExplorerConfig {
/// RPC URL of the Synor node.
pub rpc_url: String,
/// Server listen address.
pub listen_addr: SocketAddr,
/// Directory containing static frontend files.
pub static_dir: Option<String>,
/// Cache TTL for blocks (seconds).
pub block_cache_ttl: u64,
/// Cache TTL for stats (seconds).
pub stats_cache_ttl: u64,
/// Maximum blocks per page.
pub max_page_size: usize,
/// Allowed CORS origins (comma-separated, or "*" for any).
pub cors_origins: String,
}
impl Default for ExplorerConfig {
fn default() -> Self {
ExplorerConfig {
rpc_url: "http://localhost:17110".to_string(),
listen_addr: "0.0.0.0:3000".parse().unwrap(),
static_dir: None,
block_cache_ttl: 60,
stats_cache_ttl: 10,
max_page_size: 100,
// Default to specific production origins for security
cors_origins: "https://explorer.synor.cc,https://wallet.synor.cc".to_string(),
}
}
}
impl ExplorerConfig {
/// Load configuration from environment variables.
pub fn from_env() -> Self {
let mut config = ExplorerConfig::default();
if let Ok(url) = std::env::var("SYNOR_RPC_URL") {
config.rpc_url = url;
}
if let Ok(addr) = std::env::var("EXPLORER_LISTEN_ADDR") {
if let Ok(addr) = addr.parse() {
config.listen_addr = addr;
}
}
if let Ok(dir) = std::env::var("EXPLORER_STATIC_DIR") {
if std::path::Path::new(&dir).exists() {
config.static_dir = Some(dir);
}
}
if let Ok(ttl) = std::env::var("EXPLORER_BLOCK_CACHE_TTL") {
if let Ok(ttl) = ttl.parse() {
config.block_cache_ttl = ttl;
}
}
if let Ok(ttl) = std::env::var("EXPLORER_STATS_CACHE_TTL") {
if let Ok(ttl) = ttl.parse() {
config.stats_cache_ttl = ttl;
}
}
if let Ok(origins) = std::env::var("EXPLORER_CORS_ORIGINS") {
config.cors_origins = origins;
}
config
}
/// Build CORS layer from configuration.
pub fn cors_layer(&self) -> CorsLayer {
if self.cors_origins == "*" {
// Allow any origin (development/testing only)
CorsLayer::new()
.allow_origin(Any)
.allow_methods(Any)
.allow_headers(Any)
} else {
// Parse comma-separated origins
let origins: Vec<HeaderValue> = self
.cors_origins
.split(',')
.filter_map(|s| s.trim().parse().ok())
.collect();
CorsLayer::new()
.allow_origin(origins)
.allow_methods([Method::GET, Method::POST, Method::OPTIONS])
.allow_headers(Any)
}
}
}
// ==================== Application State ====================
/// Explorer application state.
struct ExplorerState {
config: ExplorerConfig,
http_client: reqwest::Client,
/// Cache for blocks by hash.
block_cache: Cache<String, ExplorerBlock>,
/// Cache for network stats.
stats_cache: Cache<String, NetworkStats>,
}
impl ExplorerState {
fn new(config: ExplorerConfig) -> Self {
let block_cache = Cache::builder()
.time_to_live(Duration::from_secs(config.block_cache_ttl))
.max_capacity(10_000)
.build();
let stats_cache = Cache::builder()
.time_to_live(Duration::from_secs(config.stats_cache_ttl))
.max_capacity(100)
.build();
ExplorerState {
config,
http_client: reqwest::Client::builder()
.timeout(Duration::from_secs(30))
.build()
.expect("Failed to build HTTP client"),
block_cache,
stats_cache,
}
}
}
// ==================== API Types ====================
/// Explorer block representation.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExplorerBlock {
pub hash: String,
pub version: u32,
pub parent_hashes: Vec<String>,
pub timestamp: u64,
pub timestamp_human: String,
pub bits: u32,
pub nonce: u64,
pub daa_score: u64,
pub blue_score: u64,
pub blue_work: String,
pub difficulty: f64,
pub transaction_count: usize,
pub is_chain_block: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub transactions: Option<Vec<ExplorerTransaction>>,
pub children_hashes: Vec<String>,
pub merge_set_blues: Vec<String>,
pub merge_set_reds: Vec<String>,
}
/// Explorer transaction representation.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExplorerTransaction {
pub id: String,
pub hash: String,
pub version: u16,
pub inputs: Vec<ExplorerInput>,
pub outputs: Vec<ExplorerOutput>,
pub lock_time: u64,
pub mass: u64,
pub is_coinbase: bool,
pub total_input: u64,
pub total_output: u64,
pub fee: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub block_hash: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub block_time: Option<u64>,
}
/// Transaction input with resolved address.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExplorerInput {
pub previous_tx_id: String,
pub previous_index: u32,
#[serde(skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub value: Option<u64>,
}
/// Transaction output with resolved address.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExplorerOutput {
pub value: u64,
pub value_human: String,
pub script_type: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
}
/// Address information.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AddressInfo {
pub address: String,
pub balance: u64,
pub balance_human: String,
pub utxo_count: usize,
pub total_received: u64,
pub total_sent: u64,
pub transaction_count: usize,
}
/// Network statistics.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct NetworkStats {
pub network_id: String,
pub is_synced: bool,
pub block_count: u64,
pub header_count: u64,
pub tip_count: usize,
pub virtual_daa_score: u64,
pub difficulty: f64,
pub hashrate: f64,
pub hashrate_human: String,
pub block_rate: f64,
pub mempool_size: u64,
pub peer_count: usize,
pub circulating_supply: u64,
pub circulating_supply_human: String,
pub max_supply: u64,
}
/// DAG visualization data.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct DagVisualization {
pub blocks: Vec<DagBlock>,
pub edges: Vec<DagEdge>,
}
/// Block for DAG visualization.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct DagBlock {
pub hash: String,
pub short_hash: String,
pub blue_score: u64,
pub is_blue: bool,
pub is_chain_block: bool,
pub timestamp: u64,
pub tx_count: usize,
}
/// Edge for DAG visualization.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct DagEdge {
pub from: String,
pub to: String,
pub is_selected_parent: bool,
}
/// Pagination parameters.
#[derive(Clone, Debug, Deserialize)]
pub struct PaginationParams {
#[serde(default = "default_page")]
pub page: usize,
#[serde(default = "default_limit")]
pub limit: usize,
}
fn default_page() -> usize {
1
}
fn default_limit() -> usize {
25
}
/// Paginated response.
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PaginatedResponse<T> {
pub data: Vec<T>,
pub page: usize,
pub limit: usize,
pub total: usize,
pub total_pages: usize,
pub has_next: bool,
pub has_prev: bool,
}
/// Search result.
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct SearchResult {
pub result_type: String,
pub value: String,
pub redirect_url: String,
}
/// API error response.
#[derive(Debug, Serialize)]
pub struct ApiError {
pub error: String,
pub code: u16,
}
impl IntoResponse for ApiError {
fn into_response(self) -> axum::response::Response {
let status = StatusCode::from_u16(self.code).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
(status, Json(self)).into_response()
}
}
// ==================== RPC Client ====================
/// JSON-RPC request.
#[derive(Serialize)]
struct RpcRequest<P> {
jsonrpc: &'static str,
method: String,
params: P,
id: u64,
}
/// JSON-RPC response.
#[derive(Deserialize)]
struct RpcResponse<R> {
result: Option<R>,
error: Option<RpcError>,
}
#[derive(Deserialize)]
struct RpcError {
message: String,
}
impl ExplorerState {
/// Make an RPC call.
async fn rpc_call<P: Serialize, R: for<'de> Deserialize<'de>>(
&self,
method: &str,
params: P,
) -> Result<R, ApiError> {
let request = RpcRequest {
jsonrpc: "2.0",
method: method.to_string(),
params,
id: 1,
};
let response = self
.http_client
.post(&self.config.rpc_url)
.json(&request)
.send()
.await
.map_err(|e| {
error!("RPC request failed: {}", e);
ApiError {
error: "RPC connection failed".to_string(),
code: 503,
}
})?;
let rpc_response: RpcResponse<R> = response.json().await.map_err(|e| {
error!("Failed to parse RPC response: {}", e);
ApiError {
error: "Invalid RPC response".to_string(),
code: 500,
}
})?;
if let Some(error) = rpc_response.error {
return Err(ApiError {
error: error.message,
code: 400,
});
}
rpc_response.result.ok_or_else(|| ApiError {
error: "Empty RPC response".to_string(),
code: 500,
})
}
}
// ==================== Route Handlers ====================
/// Health check endpoint.
async fn health(State(state): State<Arc<ExplorerState>>) -> impl IntoResponse {
// Check RPC connection by making a simple RPC call
#[derive(Deserialize)]
struct VersionResult {
version: String,
}
let rpc_ok = state
.rpc_call::<_, VersionResult>("synor_getServerVersion", ())
.await
.is_ok();
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct Health {
healthy: bool,
rpc_connected: bool,
}
let status = if rpc_ok {
StatusCode::OK
} else {
StatusCode::SERVICE_UNAVAILABLE
};
(
status,
Json(Health {
healthy: rpc_ok,
rpc_connected: rpc_ok,
}),
)
}
/// Get network statistics.
async fn get_stats(
State(state): State<Arc<ExplorerState>>,
) -> Result<Json<NetworkStats>, ApiError> {
// Check cache first
if let Some(stats) = state.stats_cache.get("network_stats").await {
return Ok(Json(stats));
}
// Response types matching the actual node RPC
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct NodeInfo {
version: String,
#[serde(default)]
protocol_version: u32,
peer_count: usize,
block_count: u64,
blue_score: u64,
mempool_size: usize,
synced: bool,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct MiningInfo {
blocks: u64,
difficulty: f64,
networkhashps: u64,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct TipsResponse {
tips: Vec<String>,
}
// Make parallel RPC calls using correct method names
let info_fut = state.rpc_call::<_, NodeInfo>("synor_getInfo", ());
let mining_fut = state.rpc_call::<_, MiningInfo>("synor_getMiningInfo", ());
let tips_fut = state.rpc_call::<_, TipsResponse>("synor_getTips", ());
let (info, mining, tips) = tokio::try_join!(info_fut, mining_fut, tips_fut)?;
// Estimate block rate from difficulty (blocks per second)
// With 100ms target block time, ~10 blocks per second
let block_rate = 10.0;
// Calculate estimated supply based on block count
// Initial block reward: 100 SYNOR, halving every 210,000 blocks
let blocks = info.block_count;
let halvings = (blocks / 210_000).min(10) as u32;
let circulating_supply = if blocks > 0 {
// Approximate: sum of geometric series for each halving period
let mut supply = 0u64;
let mut remaining = blocks;
for h in 0..=halvings {
let period_blocks = remaining.min(210_000);
let reward = (100_u64 >> h) * 100_000_000; // in sompi
supply += period_blocks * reward;
remaining = remaining.saturating_sub(210_000);
}
supply
} else {
0
};
let max_supply = 21_000_000_u64 * 100_000_000; // 21M SYNOR in sompi
let stats = NetworkStats {
network_id: "testnet".to_string(),
is_synced: info.synced,
block_count: info.block_count,
header_count: info.block_count, // Same as block_count in this impl
tip_count: tips.tips.len(),
virtual_daa_score: info.blue_score,
difficulty: mining.difficulty,
hashrate: mining.networkhashps as f64,
hashrate_human: format_hashrate(mining.networkhashps as f64),
block_rate,
mempool_size: info.mempool_size as u64,
peer_count: info.peer_count,
circulating_supply,
circulating_supply_human: format_synor(circulating_supply),
max_supply,
};
// Cache the result
state
.stats_cache
.insert("network_stats".to_string(), stats.clone())
.await;
Ok(Json(stats))
}
/// Get block by hash.
async fn get_block(
State(state): State<Arc<ExplorerState>>,
Path(hash): Path<String>,
Query(params): Query<IncludeTxsParam>,
) -> Result<Json<ExplorerBlock>, ApiError> {
let include_txs = params.include_txs.unwrap_or(false);
let cache_key = format!("{}:{}", hash, include_txs);
// Check cache
if let Some(block) = state.block_cache.get(&cache_key).await {
return Ok(Json(block));
}
// Fetch from RPC
#[derive(Serialize)]
struct GetBlockParams {
hash: String,
include_txs: bool,
}
let rpc_block: synor_rpc::RpcBlock = state
.rpc_call(
"synor_getBlock",
GetBlockParams {
hash: hash.clone(),
include_txs,
},
)
.await?;
let block = convert_rpc_block(rpc_block);
// Cache the result
state.block_cache.insert(cache_key, block.clone()).await;
Ok(Json(block))
}
#[derive(Deserialize)]
struct IncludeTxsParam {
include_txs: Option<bool>,
}
/// Get recent blocks with pagination.
async fn get_blocks(
State(state): State<Arc<ExplorerState>>,
Query(params): Query<PaginationParams>,
) -> Result<Json<PaginatedResponse<ExplorerBlock>>, ApiError> {
let limit = params.limit.min(state.config.max_page_size);
// Get block count and current blue score
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct BlockCount {
block_count: u64,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct BlueScore {
blue_score: u64,
}
let count: BlockCount = state.rpc_call("synor_getBlockCount", ()).await?;
let score: BlueScore = state.rpc_call("synor_getBlueScore", ()).await?;
let total = count.block_count as usize;
if total == 0 {
return Ok(Json(PaginatedResponse {
data: vec![],
page: params.page,
limit,
total: 0,
total_pages: 0,
has_next: false,
has_prev: false,
}));
}
// Fetch blocks by blue score (most recent first)
let start_score = score.blue_score.saturating_sub((params.page.saturating_sub(1) * limit) as u64);
let blocks_data: Vec<serde_json::Value> = state
.rpc_call("synor_getBlocksByBlueScore", (start_score, true))
.await
.unwrap_or_else(|_| vec![]);
// Convert to explorer blocks
let blocks: Vec<ExplorerBlock> = blocks_data
.into_iter()
.take(limit)
.filter_map(|b| {
let hash = b.get("hash")?.as_str()?.to_string();
let header = b.get("header")?;
let timestamp = header.get("timestamp")?.as_u64()?;
Some(ExplorerBlock {
hash: hash.clone(),
version: header.get("version")?.as_u64()? as u32,
parent_hashes: header
.get("parents")
.and_then(|p| p.as_array())
.map(|a| a.iter().filter_map(|v| v.as_str().map(String::from)).collect())
.unwrap_or_default(),
timestamp,
timestamp_human: format_timestamp(timestamp),
bits: header.get("bits")?.as_u64()? as u32,
nonce: header.get("nonce")?.as_u64()?,
daa_score: header.get("blueScore").and_then(|v| v.as_u64()).unwrap_or(0),
blue_score: header.get("blueScore").and_then(|v| v.as_u64()).unwrap_or(0),
blue_work: String::new(),
difficulty: 0.0,
transaction_count: b.get("transactions")
.and_then(|t| t.as_array())
.map(|a| a.len())
.unwrap_or(0),
is_chain_block: true,
transactions: None,
children_hashes: vec![],
merge_set_blues: vec![],
merge_set_reds: vec![],
})
})
.collect();
let total_pages = total.div_ceil(limit);
Ok(Json(PaginatedResponse {
data: blocks,
page: params.page,
limit,
total,
total_pages,
has_next: params.page < total_pages,
has_prev: params.page > 1,
}))
}
/// Get current DAG tips.
async fn get_tips(State(state): State<Arc<ExplorerState>>) -> Result<Json<Vec<String>>, ApiError> {
#[derive(Deserialize)]
struct TipsResponse {
tips: Vec<String>,
}
let response: TipsResponse = state.rpc_call("synor_getTips", ()).await?;
Ok(Json(response.tips))
}
/// Get transaction by ID.
async fn get_transaction(
State(state): State<Arc<ExplorerState>>,
Path(tx_id): Path<String>,
) -> Result<Json<ExplorerTransaction>, ApiError> {
#[derive(Serialize)]
struct GetTxParams {
tx_id: String,
}
let rpc_tx: synor_rpc::RpcTransaction = state
.rpc_call("synor_getTransaction", GetTxParams { tx_id })
.await?;
let tx = convert_rpc_transaction(rpc_tx);
Ok(Json(tx))
}
/// Get address information.
async fn get_address(
State(state): State<Arc<ExplorerState>>,
Path(address): Path<String>,
) -> Result<Json<AddressInfo>, ApiError> {
// Validate address format
if !address.starts_with("synor1") || address.len() < 40 {
return Err(ApiError {
error: "Invalid address format".to_string(),
code: 400,
});
}
// Get UTXOs
#[derive(Serialize)]
struct GetUtxosParams {
addresses: Vec<String>,
}
let utxos: Vec<synor_rpc::RpcUtxo> = state
.rpc_call(
"synor_getUtxosByAddresses",
GetUtxosParams {
addresses: vec![address.clone()],
},
)
.await?;
// Get balance
#[derive(Serialize)]
struct GetBalanceParams {
address: String,
}
#[derive(Deserialize)]
struct BalanceResult {
balance: u64,
}
let balance: BalanceResult = state
.rpc_call(
"synor_getBalanceByAddress",
GetBalanceParams {
address: address.clone(),
},
)
.await?;
let info = AddressInfo {
address: address.clone(),
balance: balance.balance,
balance_human: format_synor(balance.balance),
utxo_count: utxos.len(),
total_received: 0, // Would need historical data
total_sent: 0, // Would need historical data
transaction_count: 0, // Would need indexing
};
Ok(Json(info))
}
/// Get UTXOs for an address.
async fn get_address_utxos(
State(state): State<Arc<ExplorerState>>,
Path(address): Path<String>,
) -> Result<Json<Vec<synor_rpc::RpcUtxo>>, ApiError> {
#[derive(Serialize)]
struct GetUtxosParams {
addresses: Vec<String>,
}
let utxos: Vec<synor_rpc::RpcUtxo> = state
.rpc_call(
"synor_getUtxosByAddresses",
GetUtxosParams {
addresses: vec![address],
},
)
.await?;
Ok(Json(utxos))
}
/// Get DAG visualization data.
async fn get_dag(
State(state): State<Arc<ExplorerState>>,
Query(params): Query<DagParams>,
) -> Result<Json<DagVisualization>, ApiError> {
let depth = params.depth.unwrap_or(10).min(50);
// Get tips
#[derive(Deserialize)]
struct TipsResponse {
tips: Vec<String>,
}
let tips_resp: TipsResponse = state.rpc_call("synor_getTips", ()).await?;
if tips_resp.tips.is_empty() {
return Ok(Json(DagVisualization {
blocks: vec![],
edges: vec![],
}));
}
// Get current blue score to fetch recent blocks
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct BlueScore {
blue_score: u64,
}
let score: BlueScore = state.rpc_call("synor_getBlueScore", ()).await?;
let mut all_hashes = std::collections::HashSet::new();
let mut blocks = Vec::new();
let mut edges = Vec::new();
// Fetch blocks around the current blue score
for i in 0..depth as u64 {
let target_score = score.blue_score.saturating_sub(i);
let blocks_data: Vec<serde_json::Value> = state
.rpc_call("synor_getBlocksByBlueScore", (target_score, true))
.await
.unwrap_or_else(|_| vec![]);
for b in blocks_data {
if let Some(hash) = b.get("hash").and_then(|h| h.as_str()) {
if all_hashes.insert(hash.to_string()) {
let header = b.get("header");
// Add edges to parents
if let Some(parents) = header
.and_then(|h| h.get("parents"))
.and_then(|p| p.as_array())
{
for (i, parent) in parents.iter().enumerate() {
if let Some(parent_hash) = parent.as_str() {
edges.push(DagEdge {
from: hash.to_string(),
to: parent_hash.to_string(),
is_selected_parent: i == 0,
});
}
}
}
let timestamp = header
.and_then(|h| h.get("timestamp"))
.and_then(|t| t.as_u64())
.unwrap_or(0);
let blue_score_val = header
.and_then(|h| h.get("blueScore"))
.and_then(|s| s.as_u64())
.unwrap_or(target_score);
let tx_count = b
.get("transactions")
.and_then(|t| t.as_array())
.map(|a| a.len())
.unwrap_or(0);
blocks.push(DagBlock {
hash: hash.to_string(),
short_hash: hash.chars().take(8).collect(),
blue_score: blue_score_val,
is_blue: true,
is_chain_block: tips_resp.tips.contains(&hash.to_string()),
timestamp,
tx_count,
});
}
}
}
}
Ok(Json(DagVisualization { blocks, edges }))
}
#[derive(Deserialize)]
struct DagParams {
depth: Option<usize>,
}
/// Get mempool transactions.
async fn get_mempool(
State(state): State<Arc<ExplorerState>>,
Query(params): Query<PaginationParams>,
) -> Result<Json<PaginatedResponse<ExplorerTransaction>>, ApiError> {
let limit = params.limit.min(state.config.max_page_size);
let offset = (params.page.saturating_sub(1)) * limit;
#[derive(Serialize)]
struct GetMempoolParams {
include_orphan_pool: bool,
filter_tx_in_addresses: bool,
}
let entries: Vec<synor_rpc::RpcMempoolEntry> = state
.rpc_call(
"synor_getMempoolEntries",
GetMempoolParams {
include_orphan_pool: false,
filter_tx_in_addresses: false,
},
)
.await?;
let total = entries.len();
let page_entries: Vec<_> = entries.into_iter().skip(offset).take(limit).collect();
let txs: Vec<ExplorerTransaction> = page_entries
.into_iter()
.map(|e| convert_rpc_transaction(e.transaction))
.collect();
let total_pages = total.div_ceil(limit);
Ok(Json(PaginatedResponse {
data: txs,
page: params.page,
limit,
total,
total_pages,
has_next: params.page < total_pages,
has_prev: params.page > 1,
}))
}
/// Search for block, transaction, or address.
async fn search(
State(state): State<Arc<ExplorerState>>,
Query(params): Query<SearchParams>,
) -> Result<Json<SearchResult>, ApiError> {
let query = params.q.trim();
if query.is_empty() {
return Err(ApiError {
error: "Search query is required".to_string(),
code: 400,
});
}
// Check if it's an address
if query.starts_with("synor1") {
return Ok(Json(SearchResult {
result_type: "address".to_string(),
value: query.to_string(),
redirect_url: format!("/address/{}", query),
}));
}
// Check if it's a hex hash (64 chars)
if query.len() == 64 && query.chars().all(|c| c.is_ascii_hexdigit()) {
// Try as block hash first
#[derive(Serialize)]
struct GetBlockParams {
hash: String,
include_txs: bool,
}
let block_result: Result<synor_rpc::RpcBlock, _> = state
.rpc_call(
"synor_getBlock",
GetBlockParams {
hash: query.to_string(),
include_txs: false,
},
)
.await;
if block_result.is_ok() {
return Ok(Json(SearchResult {
result_type: "block".to_string(),
value: query.to_string(),
redirect_url: format!("/block/{}", query),
}));
}
// Try as transaction ID
#[derive(Serialize)]
struct GetTxParams {
tx_id: String,
}
let tx_result: Result<synor_rpc::RpcTransaction, _> = state
.rpc_call(
"synor_getTransaction",
GetTxParams {
tx_id: query.to_string(),
},
)
.await;
if tx_result.is_ok() {
return Ok(Json(SearchResult {
result_type: "transaction".to_string(),
value: query.to_string(),
redirect_url: format!("/tx/{}", query),
}));
}
}
Err(ApiError {
error: "No matching block, transaction, or address found".to_string(),
code: 404,
})
}
#[derive(Deserialize)]
struct SearchParams {
q: String,
}
// ==================== Helper Functions ====================
/// Convert RPC block to explorer block.
fn convert_rpc_block(rpc: synor_rpc::RpcBlock) -> ExplorerBlock {
let verbose = rpc.verbose_data.as_ref();
ExplorerBlock {
hash: rpc.header.hash.clone(),
version: rpc.header.version,
parent_hashes: rpc.header.parent_hashes,
timestamp: rpc.header.timestamp,
timestamp_human: format_timestamp(rpc.header.timestamp),
bits: rpc.header.bits,
nonce: rpc.header.nonce,
daa_score: rpc.header.daa_score,
blue_score: rpc.header.blue_score,
blue_work: rpc.header.blue_work,
difficulty: verbose.map(|v| v.difficulty).unwrap_or(0.0),
transaction_count: rpc.transactions.len(),
is_chain_block: verbose.map(|v| v.is_chain_block).unwrap_or(true),
transactions: Some(
rpc.transactions
.into_iter()
.map(convert_rpc_transaction)
.collect(),
),
children_hashes: verbose
.map(|v| v.children_hashes.clone())
.unwrap_or_default(),
merge_set_blues: verbose
.map(|v| v.merge_set_blues_hashes.clone())
.unwrap_or_default(),
merge_set_reds: verbose
.map(|v| v.merge_set_reds_hashes.clone())
.unwrap_or_default(),
}
}
/// Convert RPC transaction to explorer transaction.
fn convert_rpc_transaction(rpc: synor_rpc::RpcTransaction) -> ExplorerTransaction {
let verbose = rpc.verbose_data.as_ref();
let is_coinbase = rpc.inputs.is_empty()
|| rpc
.inputs
.first()
.map(|i| i.previous_outpoint.transaction_id.chars().all(|c| c == '0'))
.unwrap_or(false);
let total_output: u64 = rpc.outputs.iter().map(|o| o.value).sum();
let total_input: u64 = rpc
.inputs
.iter()
.filter_map(|i| i.verbose_data.as_ref().map(|v| v.value))
.sum();
let fee = if is_coinbase {
0
} else {
total_input.saturating_sub(total_output)
};
ExplorerTransaction {
id: verbose
.as_ref()
.map(|v| v.transaction_id.clone())
.unwrap_or_default(),
hash: verbose.as_ref().map(|v| v.hash.clone()).unwrap_or_default(),
version: rpc.version,
inputs: rpc
.inputs
.into_iter()
.map(|i| ExplorerInput {
previous_tx_id: i.previous_outpoint.transaction_id,
previous_index: i.previous_outpoint.index,
address: i.verbose_data.as_ref().and_then(|v| v.address.clone()),
value: i.verbose_data.as_ref().map(|v| v.value),
})
.collect(),
outputs: rpc
.outputs
.into_iter()
.map(|o| ExplorerOutput {
value: o.value,
value_human: format_synor(o.value),
script_type: o
.verbose_data
.as_ref()
.map(|v| v.script_type.clone())
.unwrap_or_else(|| "unknown".to_string()),
address: o.verbose_data.and_then(|v| v.address),
})
.collect(),
lock_time: rpc.lock_time,
mass: verbose.as_ref().map(|v| v.mass).unwrap_or(0),
is_coinbase,
total_input,
total_output,
fee,
block_hash: verbose.as_ref().map(|v| v.block_hash.clone()),
block_time: verbose.as_ref().map(|v| v.block_time),
}
}
/// Format sompi as SYNOR.
fn format_synor(sompi: u64) -> String {
let synor = sompi as f64 / 100_000_000.0;
format!("{:.8} SYNOR", synor)
}
/// Format hashrate.
fn format_hashrate(hashrate: f64) -> String {
if hashrate >= 1e18 {
format!("{:.2} EH/s", hashrate / 1e18)
} else if hashrate >= 1e15 {
format!("{:.2} PH/s", hashrate / 1e15)
} else if hashrate >= 1e12 {
format!("{:.2} TH/s", hashrate / 1e12)
} else if hashrate >= 1e9 {
format!("{:.2} GH/s", hashrate / 1e9)
} else if hashrate >= 1e6 {
format!("{:.2} MH/s", hashrate / 1e6)
} else if hashrate >= 1e3 {
format!("{:.2} KH/s", hashrate / 1e3)
} else {
format!("{:.2} H/s", hashrate)
}
}
/// Format timestamp as human-readable.
fn format_timestamp(ts: u64) -> String {
chrono::DateTime::from_timestamp_millis(ts as i64)
.map(|dt| dt.format("%Y-%m-%d %H:%M:%S UTC").to_string())
.unwrap_or_else(|| "Unknown".to_string())
}
// ==================== WebSocket ====================
/// WebSocket upgrade handler.
async fn ws_handler(
ws: WebSocketUpgrade,
State(state): State<Arc<ExplorerState>>,
) -> impl IntoResponse {
ws.on_upgrade(move |socket| handle_websocket(socket, state))
}
/// Handle WebSocket connection with real-time updates.
async fn handle_websocket(socket: WebSocket, state: Arc<ExplorerState>) {
let (mut sender, mut receiver) = socket.split();
// Track last known values for change detection
let mut last_block_count: u64 = 0;
let mut last_blue_score: u64 = 0;
// Spawn a task to send periodic updates
let state_clone = state.clone();
let send_task = tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(1));
loop {
interval.tick().await;
// Get current stats
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct NodeInfo {
block_count: u64,
blue_score: u64,
mempool_size: usize,
peer_count: usize,
synced: bool,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct MiningInfo {
difficulty: f64,
networkhashps: u64,
}
// Fetch current state
let info_result = state_clone
.rpc_call::<_, NodeInfo>("synor_getInfo", ())
.await;
let mining_result = state_clone
.rpc_call::<_, MiningInfo>("synor_getMiningInfo", ())
.await;
if let (Ok(info), Ok(mining)) = (info_result, mining_result) {
// Send stats update
let stats_event = serde_json::json!({
"type": "stats_update",
"blockCount": info.block_count,
"virtualDaaScore": info.blue_score,
"difficulty": mining.difficulty,
"mempoolSize": info.mempool_size,
"hashrate": mining.networkhashps as f64,
"hashrateHuman": format_hashrate(mining.networkhashps as f64),
});
if sender
.send(Message::Text(stats_event.to_string().into()))
.await
.is_err()
{
break;
}
// Check for new blocks
if info.block_count > last_block_count {
// New block detected
let block_event = serde_json::json!({
"type": "new_block",
"hash": format!("{:064x}", info.blue_score), // Placeholder hash
"blueScore": info.blue_score,
"timestamp": chrono::Utc::now().timestamp_millis(),
"txCount": 1,
"isChainBlock": true,
});
if sender
.send(Message::Text(block_event.to_string().into()))
.await
.is_err()
{
break;
}
last_block_count = info.block_count;
}
// Check for tip updates
if info.blue_score != last_blue_score {
#[derive(Deserialize)]
struct TipsResponse {
tips: Vec<String>,
}
if let Ok(tips_resp) = state_clone
.rpc_call::<_, TipsResponse>("synor_getTips", ())
.await
{
let tip_event = serde_json::json!({
"type": "tip_update",
"tips": tips_resp.tips,
"tipCount": tips_resp.tips.len(),
});
if sender
.send(Message::Text(tip_event.to_string().into()))
.await
.is_err()
{
break;
}
}
last_blue_score = info.blue_score;
}
}
}
});
// Handle incoming messages (for potential future subscriptions)
let recv_task = tokio::spawn(async move {
while let Some(msg) = receiver.next().await {
match msg {
Ok(Message::Close(_)) => break,
Ok(Message::Ping(data)) => {
// Pong is handled automatically by axum
let _ = data;
}
_ => {}
}
}
});
// Wait for either task to complete
tokio::select! {
_ = send_task => {},
_ = recv_task => {},
}
info!("WebSocket connection closed");
}
// ==================== Main ====================
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// Initialize logging
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::from_default_env()
.add_directive("synor_explorer=info".parse()?)
.add_directive("tower_http=debug".parse()?),
)
.init();
// Load configuration
dotenvy::dotenv().ok();
let config = ExplorerConfig::from_env();
info!("Starting Synor Block Explorer Backend...");
info!("RPC URL: {}", config.rpc_url);
info!("Listen address: {}", config.listen_addr);
if let Some(ref dir) = config.static_dir {
info!("Static files: {}", dir);
}
// Create application state
let state = Arc::new(ExplorerState::new(config.clone()));
// Build API router
let api_router = Router::new()
// Health & Info
.route("/health", get(health))
.route("/api/v1/stats", get(get_stats))
// WebSocket for real-time updates
.route("/ws", get(ws_handler))
// Blocks
.route("/api/v1/blocks", get(get_blocks))
.route("/api/v1/blocks/:hash", get(get_block))
.route("/api/v1/tips", get(get_tips))
// Transactions
.route("/api/v1/tx/:tx_id", get(get_transaction))
.route("/api/v1/mempool", get(get_mempool))
// Addresses
.route("/api/v1/address/:address", get(get_address))
.route("/api/v1/address/:address/utxos", get(get_address_utxos))
// DAG
.route("/api/v1/dag", get(get_dag))
// Search
.route("/api/v1/search", get(search))
.with_state(state);
// Build full app with optional static file serving
let app = if let Some(ref static_dir) = config.static_dir {
// Serve static files with SPA fallback (index.html for client-side routing)
let index_path = format!("{}/index.html", static_dir);
let serve_dir = ServeDir::new(static_dir)
.not_found_service(ServeFile::new(&index_path));
api_router
.fallback_service(serve_dir)
.layer(TraceLayer::new_for_http())
.layer(CompressionLayer::new())
.layer(config.cors_layer())
} else {
// API-only mode
api_router
.layer(TraceLayer::new_for_http())
.layer(CompressionLayer::new())
.layer(config.cors_layer())
};
// Start server
let listener = tokio::net::TcpListener::bind(&config.listen_addr).await?;
info!("Explorer server listening on {}", config.listen_addr);
axum::serve(listener, app).await?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_format_synor() {
assert_eq!(format_synor(100_000_000), "1.00000000 SYNOR");
assert_eq!(format_synor(50_000_000), "0.50000000 SYNOR");
assert_eq!(format_synor(1), "0.00000001 SYNOR");
}
#[test]
fn test_format_hashrate() {
assert_eq!(format_hashrate(1000.0), "1.00 KH/s");
assert_eq!(format_hashrate(1_000_000.0), "1.00 MH/s");
assert_eq!(format_hashrate(1_000_000_000.0), "1.00 GH/s");
assert_eq!(format_hashrate(1_000_000_000_000.0), "1.00 TH/s");
}
#[test]
fn test_pagination() {
let params = PaginationParams { page: 2, limit: 25 };
let offset = (params.page.saturating_sub(1)) * params.limit;
assert_eq!(offset, 25);
}
}