fix: resolve all clippy warnings for CI

Fix all Rust clippy warnings that were causing CI failures when built
with RUSTFLAGS=-Dwarnings. Changes include:

- Replace derivable_impls with derive macros for BlockBody, Network, etc.
- Use div_ceil() instead of manual implementation
- Fix should_implement_trait by renaming from_str to parse
- Add type aliases for type_complexity warnings
- Use or_default(), is_some_and(), is_multiple_of() where appropriate
- Remove needless borrows and redundant closures
- Fix manual_strip with strip_prefix()
- Add allow attributes for intentional patterns (too_many_arguments,
  needless_range_loop in cryptographic code, assertions_on_constants)
- Remove unused imports, mut bindings, and dead code in tests
This commit is contained in:
Gulshan Yadav 2026-01-08 05:58:22 +05:30
parent 8dac870e5e
commit 5c643af64c
52 changed files with 190 additions and 209 deletions

View file

@ -83,9 +83,7 @@ struct AddressValidation {
fn validate_address(address: &str) -> AddressValidation {
// Check prefix
if address.starts_with("synor:") {
let rest = &address[6..];
if let Some(rest) = address.strip_prefix("synor:") {
// Check length (40 hex chars = 20 bytes public key hash)
if rest.len() != 40 {
return AddressValidation {
@ -112,10 +110,8 @@ fn validate_address(address: &str) -> AddressValidation {
address_type: Some("Ed25519".to_string()),
error: None,
}
} else if address.starts_with("synorq:") {
} else if let Some(rest) = address.strip_prefix("synorq:") {
// Quantum-resistant address
let rest = &address[7..];
if rest.len() < 40 {
return AddressValidation {
is_valid: false,

View file

@ -140,6 +140,7 @@ async fn deploy(
}
/// Call a contract method.
#[allow(clippy::too_many_arguments)]
async fn call(
client: &RpcClient,
contract_id: &str,

View file

@ -279,6 +279,7 @@ async fn proposal(client: &RpcClient, id: &str, format: OutputFormat) -> Result<
}
/// Create a proposal.
#[allow(clippy::too_many_arguments)]
async fn create_proposal(
client: &RpcClient,
proposer: &str,

View file

@ -228,10 +228,10 @@ fn build_transaction(
) -> Result<String> {
// Parse destination address
let to_address =
Address::from_str(to).map_err(|e| anyhow::anyhow!("Invalid destination address: {}", e))?;
Address::parse(to).map_err(|e| anyhow::anyhow!("Invalid destination address: {}", e))?;
// Parse change address
let change_address = Address::from_str(change_addr)
let change_address = Address::parse(change_addr)
.map_err(|e| anyhow::anyhow!("Invalid change address: {}", e))?;
// Build inputs (initially with empty signature scripts)

View file

@ -28,15 +28,14 @@ pub async fn handle(config: &CliConfig, cmd: WalletCommands, format: OutputForma
async fn create(config: &CliConfig, name: &str, format: OutputFormat) -> Result<()> {
let wallet_path = config.wallet_path(name);
if wallet_path.exists() {
if !Confirm::new()
if wallet_path.exists()
&& !Confirm::new()
.with_prompt("Wallet already exists. Overwrite?")
.default(false)
.interact()?
{
return Ok(());
}
}
// Get password for wallet encryption
let password: String = Password::new()
@ -91,15 +90,14 @@ async fn create(config: &CliConfig, name: &str, format: OutputFormat) -> Result<
async fn import(config: &CliConfig, name: &str, format: OutputFormat) -> Result<()> {
let wallet_path = config.wallet_path(name);
if wallet_path.exists() {
if !Confirm::new()
if wallet_path.exists()
&& !Confirm::new()
.with_prompt("Wallet already exists. Overwrite?")
.default(false)
.interact()?
{
return Ok(());
}
}
let seed_phrase: String = Input::new()
.with_prompt("Enter seed phrase (12 or 24 words)")

View file

@ -504,7 +504,7 @@ pub fn list_wallets(dir: &Path) -> anyhow::Result<Vec<String>> {
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.extension().map_or(false, |e| e == "wallet") {
if path.extension().is_some_and(|e| e == "wallet") {
if let Some(name) = path.file_stem() {
wallets.push(name.to_string_lossy().to_string());
}

View file

@ -671,7 +671,7 @@ async fn get_blocks(
let count: BlockCount = state.rpc_call("synor_getBlockCount", ()).await?;
let total = count.block_count as usize;
let total_pages = (total + limit - 1) / limit;
let total_pages = total.div_ceil(limit);
Ok(Json(PaginatedResponse {
data: blocks,
@ -896,7 +896,7 @@ async fn get_mempool(
.map(|e| convert_rpc_transaction(e.transaction))
.collect();
let total_pages = (total + limit - 1) / limit;
let total_pages = total.div_ceil(limit);
Ok(Json(PaginatedResponse {
data: txs,

View file

@ -640,7 +640,7 @@ async fn send_tokens(state: &FaucetState, address: &str) -> anyhow::Result<Optio
if !response.status().is_success() {
// For testnet demo, simulate success
// In production, this would be a real error
return Ok(Some(format!("0x{}", hex::encode(&rand_bytes()))));
return Ok(Some(format!("0x{}", hex::encode(rand_bytes()))));
}
let rpc_response: RpcResponse = response.json().await?;

View file

@ -222,6 +222,7 @@ fn init_logging(level: &str, json: bool) {
}
/// Run the node.
#[allow(clippy::too_many_arguments)]
async fn run_node(
config_path: PathBuf,
data_dir: Option<PathBuf>,
@ -493,7 +494,7 @@ async fn import_blocks(
errors += 1;
} else {
imported += 1;
if imported % 1000 == 0 {
if imported.is_multiple_of(1000) {
info!("Imported {} blocks...", imported);
}
}
@ -585,7 +586,7 @@ async fn export_blocks(
writer.write_all(&serialized)?;
exported += 1;
if exported % 1000 == 0 {
if exported.is_multiple_of(1000) {
info!("Exported {} blocks...", exported);
}
}

View file

@ -339,7 +339,7 @@ impl ConsensusService {
}
// Update UTXO set
for (_i, tx) in block.body.transactions.iter().enumerate() {
for tx in &block.body.transactions {
// Create UTXO diff
let diff = self
.utxo_set
@ -624,11 +624,7 @@ impl ConsensusService {
for hash in &chain {
if let Some(info) = self.get_block_info(hash).await {
let diff = if info.blue_score > target_score {
info.blue_score - target_score
} else {
target_score - info.blue_score
};
let diff = info.blue_score.abs_diff(target_score);
if diff < closest_diff {
closest_diff = diff;
closest_hash = Some(*hash);

View file

@ -117,6 +117,7 @@ impl ContractService {
}
/// Deploys a new contract.
#[allow(clippy::too_many_arguments)]
pub async fn deploy(
&self,
bytecode: Vec<u8>,
@ -213,6 +214,7 @@ impl ContractService {
}
/// Calls a contract method.
#[allow(clippy::too_many_arguments)]
pub async fn call(
&self,
contract_id: &[u8; 32],
@ -311,6 +313,7 @@ impl ContractService {
}
/// Estimates gas for a contract call.
#[allow(clippy::too_many_arguments)]
pub async fn estimate_gas(
&self,
contract_id: &[u8; 32],

View file

@ -151,7 +151,7 @@ impl MempoolService {
});
// Store handle for cleanup on stop
let mempool_clone = Arc::clone(&self);
let mempool_clone = Arc::clone(self);
tokio::spawn(async move {
*mempool_clone.cleanup_handle.write().await = Some(handle);
});

View file

@ -242,7 +242,7 @@ impl RpcService {
// synor_getTips
module.register_async_method("synor_getTips", |_, ctx| async move {
let tips = ctx.consensus.tips().await;
let tip_strings: Vec<String> = tips.iter().map(|t| hex::encode(t)).collect();
let tip_strings: Vec<String> = tips.iter().map(hex::encode).collect();
serde_json::json!({"tips": tip_strings})
})?;
@ -271,7 +271,7 @@ impl RpcService {
};
let block_json = serde_json::json!({
"hash": hex::encode(&hash),
"hash": hex::encode(hash),
"header": {
"version": header.version,
"parents": header.parents.iter().map(|p| hex::encode(p.as_bytes())).collect::<Vec<_>>(),
@ -462,7 +462,7 @@ impl RpcService {
.await
{
Ok(result) => serde_json::json!({
"contractId": hex::encode(&result.contract_id),
"contractId": hex::encode(result.contract_id),
"address": hex::encode(&result.address),
"gasUsed": result.gas_used
}),
@ -530,8 +530,8 @@ impl RpcService {
Ok(result) => {
let logs: Vec<serde_json::Value> = result.logs.iter().map(|log| {
serde_json::json!({
"contractId": hex::encode(&log.contract_id),
"topics": log.topics.iter().map(|t| hex::encode(t)).collect::<Vec<_>>(),
"contractId": hex::encode(log.contract_id),
"topics": log.topics.iter().map(hex::encode).collect::<Vec<_>>(),
"data": hex::encode(&log.data)
})
}).collect();
@ -701,7 +701,7 @@ impl RpcService {
match ctx.contract.get_contract(&contract_id).await {
Ok(Some(contract)) => serde_json::json!({
"codeHash": hex::encode(&contract.code_hash),
"codeHash": hex::encode(contract.code_hash),
"deployer": hex::encode(&contract.deployer),
"deployedAt": contract.deployed_at,
"deployedHeight": contract.deployed_height
@ -727,7 +727,7 @@ impl RpcService {
pub async fn get_block(
&self,
hash: &str,
include_txs: bool,
_include_txs: bool,
) -> anyhow::Result<Option<RpcBlock>> {
let hash_bytes = hex_to_hash(hash)?;
let block_data = self.storage.get_block(&hash_bytes).await?;
@ -747,7 +747,7 @@ impl RpcService {
blue_work: String::new(),
pruning_point: None,
},
transactions: if include_txs { vec![] } else { vec![] },
transactions: vec![],
verbose_data: None,
}))
} else {
@ -766,7 +766,7 @@ impl RpcService {
.tips()
.await
.iter()
.map(|h| hex::encode(h))
.map(hex::encode)
.collect()
}
@ -797,7 +797,7 @@ impl RpcService {
// Announce to network
self.network.announce_tx(hash).await;
Ok(hex::encode(&hash))
Ok(hex::encode(hash))
}
crate::services::consensus::TxValidation::Invalid { reason } => {
anyhow::bail!("Invalid transaction: {}", reason)

View file

@ -143,7 +143,7 @@ impl SyncService {
*self.state.write().await = progress.state;
*self.progress.write().await = progress;
if self.state.read().await.clone() == SyncState::Synced {
if *self.state.read().await == SyncState::Synced {
info!("Node is already synced");
return Ok(());
}

View file

@ -19,6 +19,7 @@ use synord::config::NodeConfig;
use synord::node::{NodeState, SynorNode};
/// Test timeout for operations.
#[allow(dead_code)]
const TEST_TIMEOUT: Duration = Duration::from_secs(30);
// ==================== Test Helpers ====================
@ -39,6 +40,7 @@ fn create_node_config(temp_dir: &TempDir, node_index: u16, seeds: Vec<String>) -
}
/// Creates a mining-enabled node configuration.
#[allow(dead_code)]
fn create_miner_config(
temp_dir: &TempDir,
node_index: u16,
@ -60,6 +62,7 @@ struct ForkTestNetwork {
impl ForkTestNetwork {
/// Creates a network with specified number of mining nodes.
#[allow(dead_code)]
async fn new_with_miners(miner_count: usize) -> anyhow::Result<Self> {
let mut temp_dirs = Vec::new();
let mut nodes = Vec::new();

View file

@ -16,9 +16,10 @@ use tokio::time::{sleep, timeout};
use tracing::info;
use synord::config::NodeConfig;
use synord::node::{NodeState, SynorNode};
use synord::node::SynorNode;
/// Test timeout for async operations.
#[allow(dead_code)]
const TEST_TIMEOUT: Duration = Duration::from_secs(60);
/// Time to wait for network operations.
@ -52,6 +53,7 @@ fn create_node_config(temp_dir: &TempDir, node_index: u16, seeds: Vec<String>) -
}
/// Test network with multiple nodes.
#[allow(dead_code)]
struct TestNetwork {
nodes: Vec<Arc<SynorNode>>,
temp_dirs: Vec<TempDir>,
@ -276,7 +278,7 @@ async fn test_manual_peer_connect() {
}
// Manually connect node1 to node2
let node2_port = 17000 + (std::process::id() % 500) as u16 * 10 + 1 * 3;
let node2_port = 17000 + (std::process::id() % 500) as u16 * 10 + 3;
let node2_addr = format!("/ip4/127.0.0.1/tcp/{}", node2_port);
{
@ -488,13 +490,13 @@ async fn test_simultaneous_node_start() {
let first_port = 17000 + (std::process::id() % 500) as u16 * 10;
let mut configs = Vec::new();
for i in 0..node_count {
for (i, temp_dir) in temp_dirs.iter().enumerate() {
let seeds = if i == 0 {
vec![]
} else {
vec![format!("/ip4/127.0.0.1/tcp/{}", first_port)]
};
configs.push(create_node_config(&temp_dirs[i], i as u16, seeds));
configs.push(create_node_config(temp_dir, i as u16, seeds));
}
// Create all nodes

View file

@ -22,6 +22,7 @@ use synord::node::{NodeState, SynorNode};
// ==================== Test Constants ====================
/// Timeout for stress test operations.
#[allow(dead_code)]
const STRESS_TIMEOUT: Duration = Duration::from_secs(60);
/// Number of concurrent operations for stress tests.
@ -64,7 +65,7 @@ impl StressTestNetwork {
// First node (seed)
let temp = TempDir::new()?;
let seed_port = 20000 + (std::process::id() % 500) as u16 * 10;
let mut config = create_stress_config(&temp, 0);
let config = create_stress_config(&temp, 0);
temp_dirs.push(temp);
nodes.push(Arc::new(SynorNode::new(config).await?));

View file

@ -19,6 +19,7 @@ use synord::config::NodeConfig;
use synord::node::{NodeState, SynorNode};
/// Test timeout for sync operations.
#[allow(dead_code)]
const SYNC_TIMEOUT: Duration = Duration::from_secs(30);
// ==================== Test Helpers ====================
@ -212,11 +213,6 @@ async fn test_sync_service_start_stop() {
node.start().await.unwrap();
// Verify sync service is accessible
assert!(
true, // sync service always exists
"Sync service should be accessible"
);
let sync = node.sync();
// Check that we can get state
let state = sync.state().await;
@ -468,7 +464,7 @@ async fn test_sync_after_disconnect() {
#[tokio::test]
async fn test_sync_config_options() {
let temp_dir = TempDir::new().unwrap();
let mut config = create_node_config(&temp_dir, 0, vec![]);
let config = create_node_config(&temp_dir, 0, vec![]);
// Verify sync-related config options
info!(

View file

@ -445,15 +445,13 @@ impl Compiler {
let parser = Parser::new(0);
let mut data_size = 0usize;
for payload in parser.parse_all(wasm) {
if let Ok(Payload::DataSection(reader)) = payload {
for data in reader {
if let Ok(data) = data {
for payload in parser.parse_all(wasm).flatten() {
if let Payload::DataSection(reader) = payload {
for data in reader.into_iter().flatten() {
data_size += data.data.len();
}
}
}
}
self.config.gas_config.memory_byte * data_size as u64
}

View file

@ -17,7 +17,7 @@ use wasmparser::{Parser, Payload};
use crate::{CompilerError, Result};
/// Contract metadata.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ContractMetadata {
/// Contract name.
pub name: Option<String>,
@ -50,23 +50,6 @@ pub struct ContractMetadata {
pub custom: HashMap<String, String>,
}
impl Default for ContractMetadata {
fn default() -> Self {
ContractMetadata {
name: None,
version: None,
authors: Vec::new(),
description: None,
license: None,
repository: None,
build_timestamp: None,
rust_version: None,
sdk_version: None,
custom: HashMap::new(),
}
}
}
impl ContractMetadata {
/// Creates new metadata with a name.
pub fn new(name: &str) -> Self {
@ -716,17 +699,14 @@ pub fn estimate_contract_gas(wasm: &[u8], gas_config: &synor_vm::GasConfig) -> u
let parser = Parser::new(0);
for payload in parser.parse_all(wasm) {
if let Ok(payload) = payload {
for payload in parser.parse_all(wasm).flatten() {
match payload {
Payload::DataSection(reader) => {
// Gas for data initialization
for data in reader {
if let Ok(data) = data {
for data in reader.into_iter().flatten() {
gas += gas_config.memory_byte * data.data.len() as u64;
}
}
}
Payload::CodeSectionStart { count, .. } => {
// Base gas per function
gas += count as u64 * gas_config.instruction_base * 10;
@ -734,7 +714,6 @@ pub fn estimate_contract_gas(wasm: &[u8], gas_config: &synor_vm::GasConfig) -> u
_ => {}
}
}
}
// Base deployment cost
gas += gas_config.create_base;

View file

@ -420,8 +420,9 @@ impl Validator {
// Validate entry point signatures
for export in &exports {
if export.name == "__synor_init" || export.name == "__synor_call" {
if export.kind != ExportKind::Function {
if (export.name == "__synor_init" || export.name == "__synor_call")
&& export.kind != ExportKind::Function
{
errors.push(ValidationError::InvalidExportType {
name: export.name.clone(),
expected: "function".into(),
@ -429,7 +430,6 @@ impl Validator {
});
}
}
}
// Validate memory
debug!("Validating {} memories", memories.len());
@ -528,7 +528,7 @@ impl Validator {
pub fn allow_import(&mut self, module: &str, name: &str) {
self.allowed_imports
.entry(module.to_string())
.or_insert_with(HashSet::new)
.or_default()
.insert(name.to_string());
}
@ -574,7 +574,7 @@ mod tests {
#[test]
fn test_validate_too_large() {
let validator = Validator::new(10, MAX_MEMORY_PAGES);
let result = validator.validate(&vec![0u8; 100]);
let result = validator.validate(&[0u8; 100]);
assert!(matches!(
result,
Err(ValidationError::ContractTooLarge { .. })

View file

@ -240,12 +240,7 @@ impl DifficultyManager {
// Clamp to valid ranges
coef = coef.clamp(1, 0x7fffff);
if exp < 3 {
exp = 3;
}
if exp > 32 {
exp = 32;
}
exp = exp.clamp(3, 32);
((exp as u32) << 24) | (coef as u32)
}

View file

@ -141,6 +141,7 @@ mod tests {
use super::*;
#[test]
#[allow(clippy::assertions_on_constants)]
fn test_constants() {
assert!(MAX_TRANSACTION_SIZE > 0);
assert!(MAX_BLOCK_MASS > 0);

View file

@ -169,7 +169,7 @@ impl TransactionValidator {
let fee = total_input.saturating_sub(total_output);
// Check minimum fee
let tx_size = tx.weight() as u64;
let tx_size = tx.weight();
let min_fee = Amount::from_sompi(tx_size * self.min_fee_rate);
if fee < min_fee {
return Err(ValidationError::InsufficientFee {

View file

@ -187,17 +187,6 @@ impl DerivationPath {
}
}
/// Returns the path as a string.
pub fn to_string(&self) -> String {
format!(
"m/44'/{}'/{}'/{}/{}",
Self::COIN_TYPE,
self.account,
self.change,
self.index
)
}
/// Creates a path for the nth external address.
pub fn external(account: u32, index: u32) -> Self {
DerivationPath {
@ -225,7 +214,14 @@ impl Default for DerivationPath {
impl std::fmt::Display for DerivationPath {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.to_string())
write!(
f,
"m/44'/{}'/{}'/{}/{}",
Self::COIN_TYPE,
self.account,
self.change,
self.index
)
}
}

View file

@ -11,7 +11,7 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use std::sync::Arc;
use synor_dag::{BlockDag, BlockId, GhostdagManager, ReachabilityStore, GHOSTDAG_K};
use synor_dag::{BlockDag, BlockId, GhostdagManager, ReachabilityStore};
use synor_types::Hash256;
/// Helper to create deterministic block IDs.
@ -73,7 +73,7 @@ fn build_wide_dag(
let block = make_block_id(block_num);
// Connect to all blocks in previous layer (up to MAX_BLOCK_PARENTS)
let parents: Vec<_> = layers[d - 1].iter().copied().collect();
let parents: Vec<_> = layers[d - 1].to_vec();
let selected_parent = parents[0];
dag.insert_block(block, parents.clone(), block_num * 100)
@ -148,7 +148,7 @@ fn block_insertion_wide(c: &mut Criterion) {
|(dag, reachability, ghostdag, layers)| {
// Benchmark inserting a new block on top
let new_block = make_block_id(99999);
let parents: Vec<_> = layers.last().unwrap().iter().copied().collect();
let parents: Vec<_> = layers.last().unwrap().to_vec();
let selected_parent = parents[0];
dag.insert_block(new_block, parents.clone(), 99999 * 100)
@ -274,7 +274,7 @@ fn merge_set_calculation(c: &mut Criterion) {
|(dag, reachability, ghostdag, layers)| {
// Insert a new block that merges multiple parents
let new_block = make_block_id(99999);
let parents: Vec<_> = layers.last().unwrap().iter().copied().collect();
let parents: Vec<_> = layers.last().unwrap().to_vec();
let selected_parent = parents[0];
dag.insert_block(new_block, parents.clone(), 99999 * 100)
@ -310,7 +310,7 @@ fn k_cluster_validation(c: &mut Criterion) {
build_wide_dag(5, k_val as usize, &dag, &reachability, &ghostdag);
(dag, reachability, ghostdag)
},
|(dag, reachability, ghostdag)| {
|(_dag, _reachability, ghostdag)| {
// Check k-cluster property
black_box(ghostdag.k())
},

View file

@ -21,6 +21,9 @@ use hashbrown::{HashMap, HashSet};
use parking_lot::RwLock;
use thiserror::Error;
/// Result of partitioning the merge set into blue and red blocks.
type PartitionResult = (Vec<BlockId>, Vec<BlockId>, HashMap<BlockId, usize>);
/// GHOSTDAG data computed for each block.
#[derive(Clone, Debug)]
pub struct GhostdagData {
@ -261,7 +264,7 @@ impl GhostdagManager {
&self,
selected_parent_data: &GhostdagData,
merge_set: &[BlockId],
) -> Result<(Vec<BlockId>, Vec<BlockId>, HashMap<BlockId, usize>), GhostdagError> {
) -> Result<PartitionResult, GhostdagError> {
let mut blues: Vec<BlockId> = Vec::with_capacity(merge_set.len());
let mut reds: Vec<BlockId> = Vec::new();
let mut blues_anticone_sizes: HashMap<BlockId, usize> = HashMap::with_capacity(
@ -500,7 +503,7 @@ mod tests {
#[test]
fn test_genesis_data() {
let genesis = make_block_id(0);
let (dag, reachability, ghostdag) = setup_test_dag();
let (_dag, _reachability, ghostdag) = setup_test_dag();
let data = ghostdag.get_data(&genesis).unwrap();
assert_eq!(data.blue_score, 0);

View file

@ -67,6 +67,7 @@ mod tests {
use super::*;
#[test]
#[allow(clippy::assertions_on_constants)]
fn test_constants() {
assert!(GHOSTDAG_K > 0);
assert!(MAX_BLOCK_PARENTS >= GHOSTDAG_K as usize);

View file

@ -287,6 +287,7 @@ pub struct Proposal {
impl Proposal {
/// Creates a new proposal.
#[allow(clippy::too_many_arguments)]
pub fn new(
number: u64,
proposer: Address,
@ -537,6 +538,7 @@ impl DAO {
}
/// Creates a new proposal.
#[allow(clippy::too_many_arguments)]
pub fn create_proposal(
&mut self,
proposer: Address,
@ -570,7 +572,7 @@ impl DAO {
// Index by proposer
self.by_proposer
.entry(proposer)
.or_insert_with(Vec::new)
.or_default()
.push(id);
self.proposals.insert(id, proposal);

View file

@ -637,7 +637,7 @@ impl MultisigManager {
for signer in &config.signers {
self.by_signer
.entry(signer.clone())
.or_insert_with(Vec::new)
.or_default()
.push(id);
}

View file

@ -519,7 +519,7 @@ impl TreasuryPool {
.config
.approvals
.dao_threshold
.map_or(false, |t| amount >= t)
.is_some_and(|t| amount >= t)
{
SpendingRequestState::PendingDao
} else if self.config.approvals.council_required {
@ -571,7 +571,7 @@ impl TreasuryPool {
.config
.approvals
.dao_threshold
.map_or(false, |t| request.amount >= t);
.is_some_and(|t| request.amount >= t);
if needs_dao {
request.state = SpendingRequestState::PendingDao;

View file

@ -105,9 +105,7 @@ impl VestingSchedule {
// Linear vesting between cliff and end
// vested = total * elapsed / vesting_duration
let vested =
(self.total_amount as u128 * elapsed as u128 / self.vesting_duration as u128) as u64;
vested
(self.total_amount as u128 * elapsed as u128 / self.vesting_duration as u128) as u64
}
/// Returns the cliff end timestamp.
@ -441,13 +439,13 @@ impl VestingManager {
// Index by beneficiary
self.by_beneficiary
.entry(beneficiary)
.or_insert_with(Vec::new)
.or_default()
.push(id);
// Index by grantor
self.by_grantor
.entry(grantor)
.or_insert_with(Vec::new)
.or_default()
.push(id);
self.contracts.insert(id, contract);

View file

@ -484,7 +484,7 @@ fn mining_stats_operations(c: &mut Criterion) {
group.bench_function("update_hashrate", |b| {
b.iter_batched(
|| MiningStats::default(),
MiningStats::default,
|mut stats| {
stats.update_hashrate(1000000, 1000);
black_box(stats)

View file

@ -87,7 +87,7 @@ impl KHeavyHash {
// Step 5: Final SHA3-256
let mut hasher = Sha3_256::new();
hasher.update(&mixed);
hasher.update(mixed);
let final_hash: [u8; 32] = hasher.finalize().into();
PowHash {
@ -166,7 +166,7 @@ impl KHeavyHash {
}
// Report progress every 10000 hashes
if tried % 10000 == 0 && !callback(tried, nonce) {
if tried.is_multiple_of(10000) && !callback(tried, nonce) {
return None; // Cancelled
}
}

View file

@ -127,6 +127,7 @@ impl Target {
}
/// Checks if a hash meets this target.
#[allow(clippy::needless_range_loop)]
pub fn is_met_by(&self, hash: &Hash256) -> bool {
// Hash must be <= target (big-endian comparison)
let hash_bytes = hash.as_bytes();

View file

@ -39,6 +39,7 @@ impl HeavyMatrix {
/// Multiplies a 256-bit vector by the matrix.
/// Input: 32 bytes (256 bits) treated as 64 nibbles
/// Output: 32 bytes after matrix multiplication
#[allow(clippy::needless_range_loop)]
pub fn multiply(&self, input: &[u8; 32]) -> [u8; 32] {
// Expand 32 bytes into 64 nibbles (4 bits each)
let mut nibbles = [0u8; 64];
@ -68,6 +69,7 @@ impl HeavyMatrix {
}
/// Multiplies with full byte input (alternative method).
#[allow(clippy::needless_range_loop)]
pub fn multiply_bytes(&self, input: &[u8; 32]) -> [u8; 32] {
let mut output = [0u8; 32];
@ -141,6 +143,7 @@ pub struct GfTables {
impl GfTables {
/// Creates precomputed tables.
#[allow(clippy::needless_range_loop)]
pub fn new() -> Self {
let mut mul = [[0u8; 256]; 256];
@ -201,6 +204,7 @@ impl OptimizedMatrix {
}
/// Fast matrix multiplication.
#[allow(clippy::needless_range_loop)]
pub fn multiply(&self, input: &[u8; 32]) -> [u8; 32] {
// Expand to nibbles
let mut nibbles = [0u8; 64];

View file

@ -282,7 +282,7 @@ impl BlockMiner {
nonce = nonce.wrapping_add(1);
// Update stats periodically
if hashes % 10000 == 0 {
if hashes.is_multiple_of(10000) {
self.hash_counter.fetch_add(10000, Ordering::Relaxed);
}
}
@ -527,7 +527,6 @@ mod tests {
let result = result.unwrap();
assert!(result.hashes > 0);
assert!(result.solve_time_ms >= 0);
}
#[test]

View file

@ -46,8 +46,8 @@ impl StratumJob {
StratumJob {
job_id,
header_hash: hex::encode(&template.header_data),
share_target: hex::encode(&template.target),
block_target: hex::encode(&template.target),
share_target: hex::encode(template.target),
block_target: hex::encode(template.target),
timestamp: template.timestamp,
extra_nonce1: extra_nonce1.to_string(),
extra_nonce2_size: 8,

View file

@ -430,7 +430,7 @@ fn merkle_root(hashes: &[&[u8; 32]]) -> Hash256 {
let mut current: Vec<[u8; 32]> = hashes.iter().map(|h| **h).collect();
while current.len() > 1 {
let mut next = Vec::with_capacity((current.len() + 1) / 2);
let mut next = Vec::with_capacity(current.len().div_ceil(2));
for chunk in current.chunks(2) {
let mut data = [0u8; 64];

View file

@ -161,6 +161,7 @@ impl SynorBehaviour {
}
/// Sends a response to a request.
#[allow(clippy::result_large_err)]
pub fn send_response(
&mut self,
channel: request_response::ResponseChannel<SynorResponse>,
@ -262,7 +263,6 @@ pub enum BehaviourEvent {
#[cfg(test)]
mod tests {
use super::*;
use libp2p::identity::Keypair;
#[test]
fn test_message_id() {

View file

@ -114,6 +114,7 @@ impl ChainId {
}
}
#[allow(clippy::derivable_impls)]
impl Default for ChainId {
fn default() -> Self {
ChainId::Mainnet

View file

@ -489,7 +489,7 @@ mod tests {
#[test]
fn test_header_chain() {
let mut chain = HeaderChain::new();
let chain = HeaderChain::new();
assert!(chain.is_empty());
// Would need real headers to test properly

View file

@ -82,6 +82,7 @@ mod tests {
use super::*;
#[test]
#[allow(clippy::assertions_on_constants)]
fn test_constants() {
assert!(DEFAULT_RPC_PORT > 0);
assert!(MAX_BATCH_SIZE > 0);

View file

@ -194,7 +194,7 @@ macro_rules! define_event {
};
}
/// Common events
// Common events
/// Transfer event for token contracts.
#[derive(Clone, Debug)]

View file

@ -73,7 +73,7 @@ fn make_utxo(n: u64) -> StoredUtxo {
amount: 1_000_000_000 + n * 1000,
script_pubkey,
block_daa_score: n * 10,
is_coinbase: n % 10 == 0,
is_coinbase: n.is_multiple_of(10),
}
}

View file

@ -242,6 +242,7 @@ impl Database {
}
/// Iterates over all keys in a column family.
#[allow(clippy::type_complexity)]
pub fn iter(
&self,
cf_name: &str,
@ -252,6 +253,7 @@ impl Database {
}
/// Iterates over keys with a given prefix.
#[allow(clippy::type_complexity)]
pub fn prefix_iter(
&self,
cf_name: &str,
@ -308,7 +310,7 @@ impl Database {
let size = self
.db
.property_int_value_cf(cf, "rocksdb.estimate-live-data-size")
.map_err(|e| DbError::RocksDb(e))?
.map_err(DbError::RocksDb)?
.unwrap_or(0);
Ok(size)
}

View file

@ -689,6 +689,7 @@ impl ContractStateStore {
}
/// Gets all storage entries for a contract.
#[allow(clippy::type_complexity)]
pub fn get_all(&self, contract_id: &[u8; 32]) -> Result<Vec<([u8; 32], Vec<u8>)>, DbError> {
let mut entries = Vec::new();

View file

@ -104,36 +104,10 @@ impl Address {
}
/// Parses an address from a Bech32m string.
pub fn from_str(s: &str) -> Result<Self, AddressError> {
// Decode Bech32m
let (hrp, data) =
bech32::decode(s).map_err(|e| AddressError::Bech32Error(e.to_string()))?;
// Determine network from HRP
let network = match hrp.as_str() {
"synor" => Network::Mainnet,
"tsynor" => Network::Testnet,
"dsynor" => Network::Devnet,
_ => return Err(AddressError::InvalidHrp(hrp.to_string())),
};
// Data should be 33 bytes: 1 byte type + 32 bytes payload
if data.len() != 33 {
return Err(AddressError::InvalidLength {
expected: 33,
got: data.len(),
});
}
let addr_type = AddressType::from_byte(data[0])?;
let mut payload = [0u8; 32];
payload.copy_from_slice(&data[1..33]);
Ok(Address {
network,
addr_type,
payload,
})
///
/// This is a convenience method that wraps the `FromStr` implementation.
pub fn parse(s: &str) -> Result<Self, AddressError> {
s.parse()
}
/// Encodes the address as a Bech32m string.
@ -200,7 +174,35 @@ impl std::str::FromStr for Address {
type Err = AddressError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Address::from_str(s)
// Decode Bech32m
let (hrp, data) =
bech32::decode(s).map_err(|e| AddressError::Bech32Error(e.to_string()))?;
// Determine network from HRP
let network = match hrp.as_str() {
"synor" => Network::Mainnet,
"tsynor" => Network::Testnet,
"dsynor" => Network::Devnet,
_ => return Err(AddressError::InvalidHrp(hrp.to_string())),
};
// Data should be 33 bytes: 1 byte type + 32 bytes payload
if data.len() != 33 {
return Err(AddressError::InvalidLength {
expected: 33,
got: data.len(),
});
}
let addr_type = AddressType::from_byte(data[0])?;
let mut payload = [0u8; 32];
payload.copy_from_slice(&data[1..33]);
Ok(Address {
network,
addr_type,
payload,
})
}
}
@ -219,7 +221,7 @@ impl<'de> Deserialize<'de> for Address {
D: serde::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
Address::from_str(&s).map_err(serde::de::Error::custom)
Address::parse(&s).map_err(serde::de::Error::custom)
}
}
@ -293,7 +295,7 @@ mod tests {
let encoded = addr.to_bech32();
assert!(encoded.starts_with("synor1"));
let decoded = Address::from_str(&encoded).unwrap();
let decoded = Address::parse(&encoded).unwrap();
assert_eq!(addr, decoded);
}

View file

@ -140,7 +140,9 @@ impl fmt::Display for BlockHeader {
}
/// Block body - contains the transactions.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
#[derive(
Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize,
)]
pub struct BlockBody {
/// Transactions in this block.
pub transactions: Vec<Transaction>,
@ -172,14 +174,6 @@ impl BlockBody {
}
}
impl Default for BlockBody {
fn default() -> Self {
BlockBody {
transactions: Vec::new(),
}
}
}
/// A complete block (header + body).
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
pub struct Block {

View file

@ -91,7 +91,7 @@ impl Hash256 {
let mut current_level: Vec<Hash256> = hashes.to_vec();
while current_level.len() > 1 {
let mut next_level = Vec::with_capacity((current_level.len() + 1) / 2);
let mut next_level = Vec::with_capacity(current_level.len().div_ceil(2));
for chunk in current_level.chunks(2) {
let combined = if chunk.len() == 2 {

View file

@ -17,10 +17,21 @@ pub use transaction::{Transaction, TransactionId, TxInput, TxOutput};
/// Network identifier for Synor chains.
#[derive(
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
Debug,
Clone,
Copy,
Default,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
serde::Serialize,
serde::Deserialize,
)]
pub enum Network {
/// Main production network
#[default]
Mainnet,
/// Public test network
Testnet,
@ -48,12 +59,6 @@ impl Network {
}
}
impl Default for Network {
fn default() -> Self {
Network::Mainnet
}
}
/// Token amount in the smallest unit (1 SYNOR = 10^8 sompi).
/// Similar to Bitcoin's satoshi.
#[derive(

View file

@ -30,7 +30,7 @@ impl StorageKey {
}
/// Creates from a string key (hashed).
pub fn from_str(key: &str) -> Self {
pub fn from_string_key(key: &str) -> Self {
let hash: [u8; 32] = blake3::hash(key.as_bytes()).into();
StorageKey(hash)
}
@ -432,11 +432,11 @@ mod tests {
#[test]
fn test_storage_key() {
let key1 = StorageKey::from_str("balance");
let key2 = StorageKey::from_str("balance");
let key1 = StorageKey::from_string_key("balance");
let key2 = StorageKey::from_string_key("balance");
assert_eq!(key1, key2);
let key3 = StorageKey::from_str("other");
let key3 = StorageKey::from_string_key("other");
assert_ne!(key1, key3);
}
@ -451,7 +451,7 @@ mod tests {
#[test]
fn test_map_key() {
let base = StorageKey::from_str("balances");
let base = StorageKey::from_string_key("balances");
let index = [1u8; 32];
let key1 = StorageKey::map_key(&base, &index);
@ -464,7 +464,7 @@ mod tests {
fn test_memory_storage() {
let mut storage = MemoryStorage::new();
let contract = test_contract_id();
let key = StorageKey::from_str("test");
let key = StorageKey::from_string_key("test");
// Write
storage.set(&contract, key, StorageValue::from_u64(42));
@ -488,7 +488,7 @@ mod tests {
fn test_storage_rollback() {
let mut storage = MemoryStorage::new();
let contract = test_contract_id();
let key = StorageKey::from_str("test");
let key = StorageKey::from_string_key("test");
storage.set(&contract, key, StorageValue::from_u64(42));
storage.commit();
@ -509,7 +509,7 @@ mod tests {
storage.set(
&contract,
StorageKey::from_str("a"),
StorageKey::from_string_key("a"),
StorageValue::from_u64(1),
);
storage.commit();
@ -519,7 +519,7 @@ mod tests {
storage.set(
&contract,
StorageKey::from_str("b"),
StorageKey::from_string_key("b"),
StorageValue::from_u64(2),
);
storage.commit();