chore: apply clippy auto-fixes to reduce warnings

- Applied clippy --fix to synor-storage (19 fixes)
- Applied clippy --fix to synor-zk (2 fixes)
- Simplified code patterns and removed redundant operations
This commit is contained in:
Gulshan Yadav 2026-01-26 21:16:10 +05:30
parent 5c14f10259
commit 63d2d44e75
11 changed files with 24 additions and 47 deletions

View file

@ -139,7 +139,7 @@ impl Chunker {
pub fn chunk_count(&self, file_size: u64) -> u32 { pub fn chunk_count(&self, file_size: u64) -> u32 {
let size = file_size as usize; let size = file_size as usize;
let full_chunks = size / self.config.chunk_size; let full_chunks = size / self.config.chunk_size;
let has_remainder = size % self.config.chunk_size != 0; let has_remainder = !size.is_multiple_of(self.config.chunk_size);
(full_chunks + if has_remainder { 1 } else { 0 }) as u32 (full_chunks + if has_remainder { 1 } else { 0 }) as u32
} }

View file

@ -9,20 +9,17 @@ use std::fmt;
/// Hash algorithm identifiers (multihash compatible) /// Hash algorithm identifiers (multihash compatible)
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[repr(u8)] #[repr(u8)]
#[derive(Default)]
pub enum HashType { pub enum HashType {
/// SHA2-256 (0x12) /// SHA2-256 (0x12)
Sha256 = 0x12, Sha256 = 0x12,
/// Keccak-256 (0x1B) /// Keccak-256 (0x1B)
Keccak256 = 0x1B, Keccak256 = 0x1B,
/// Blake3 (0x1E) - Synor default /// Blake3 (0x1E) - Synor default
#[default]
Blake3 = 0x1E, Blake3 = 0x1E,
} }
impl Default for HashType {
fn default() -> Self {
Self::Blake3
}
}
/// Content Identifier - uniquely identifies content by hash /// Content Identifier - uniquely identifies content by hash
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
@ -79,7 +76,7 @@ impl ContentId {
let mut hasher = Sha256::new(); let mut hasher = Sha256::new();
hasher.update(data); hasher.update(data);
let result = hasher.finalize(); let result = hasher.finalize();
result.as_slice() == &self.digest result.as_slice() == self.digest
} }
HashType::Keccak256 => { HashType::Keccak256 => {
// TODO: Implement Keccak256 verification // TODO: Implement Keccak256 verification

View file

@ -117,7 +117,7 @@ impl ErasureCoder {
let original_size = data.len(); let original_size = data.len();
// Pad data to be divisible by data_shards // Pad data to be divisible by data_shards
let shard_size = (data.len() + self.config.data_shards - 1) / self.config.data_shards; let shard_size = data.len().div_ceil(self.config.data_shards);
let padded_size = shard_size * self.config.data_shards; let padded_size = shard_size * self.config.data_shards;
let mut padded_data = data.to_vec(); let mut padded_data = data.to_vec();

View file

@ -4,7 +4,6 @@
//! and reassembling content. //! and reassembling content.
use crate::cid::ContentId; use crate::cid::ContentId;
use crate::chunker::Chunk;
use crate::error::{Error, Result}; use crate::error::{Error, Result};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
@ -44,6 +43,7 @@ pub struct ContentResolver {
/// Node health tracking /// Node health tracking
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
#[derive(Default)]
struct NodeHealth { struct NodeHealth {
/// Successful requests /// Successful requests
successes: u64, successes: u64,
@ -55,16 +55,6 @@ struct NodeHealth {
last_check: u64, last_check: u64,
} }
impl Default for NodeHealth {
fn default() -> Self {
Self {
successes: 0,
failures: 0,
latency_ms: 0,
last_check: 0,
}
}
}
impl ContentResolver { impl ContentResolver {
/// Create a new resolver /// Create a new resolver
@ -173,7 +163,7 @@ impl ContentResolver {
// TODO: HTTP request to provider // TODO: HTTP request to provider
// For now, calculate from size // For now, calculate from size
let chunk_size = 1024 * 1024; // 1 MB let chunk_size = 1024 * 1024; // 1 MB
let chunk_count = ((cid.size as usize) + chunk_size - 1) / chunk_size; let chunk_count = (cid.size as usize).div_ceil(chunk_size);
Ok(ContentMetadata { Ok(ContentMetadata {
size: cid.size, size: cid.size,
@ -237,7 +227,7 @@ impl ContentResolver {
pub async fn sorted_nodes(&self) -> Vec<String> { pub async fn sorted_nodes(&self) -> Vec<String> {
let health = self.node_health.read().await; let health = self.node_health.read().await;
let mut nodes: Vec<_> = self.nodes.iter().cloned().collect(); let mut nodes: Vec<_> = self.nodes.to_vec();
nodes.sort_by(|a, b| { nodes.sort_by(|a, b| {
let health_a = health.get(a); let health_a = health.get(a);

View file

@ -14,7 +14,6 @@ pub use store::ChunkStore;
pub use network::StorageNetwork; pub use network::StorageNetwork;
pub use prover::ProofSubmitter; pub use prover::ProofSubmitter;
use crate::cid::ContentId;
use crate::deal::{StorageDeal, StorageOffer}; use crate::deal::{StorageDeal, StorageOffer};
use crate::erasure::Shard; use crate::erasure::Shard;
use crate::proof::{Challenge, StorageProof}; use crate::proof::{Challenge, StorageProof};
@ -272,7 +271,7 @@ impl StorageNode {
} }
/// Update storage offer pricing /// Update storage offer pricing
pub async fn update_pricing(&self, min_price: u64, max_price: u64) { pub async fn update_pricing(&self, min_price: u64, _max_price: u64) {
let mut offer = self.offer.write().await; let mut offer = self.offer.write().await;
offer.price_per_byte_epoch = min_price; offer.price_per_byte_epoch = min_price;
} }

View file

@ -167,8 +167,8 @@ impl StorageNetwork {
pub async fn request_shard( pub async fn request_shard(
&self, &self,
peer_id: &[u8; 32], peer_id: &[u8; 32],
deal_id: [u8; 32], _deal_id: [u8; 32],
shard_index: u8, _shard_index: u8,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
let _peer = self.peers.get(peer_id).ok_or_else(|| { let _peer = self.peers.get(peer_id).ok_or_else(|| {
Error::Network(format!("Peer not found: {:?}", hex::encode(peer_id))) Error::Network(format!("Peer not found: {:?}", hex::encode(peer_id)))

View file

@ -207,15 +207,13 @@ impl ProofSubmitter {
let mut expired = Vec::new(); let mut expired = Vec::new();
for (id, tracked) in &mut self.challenges { for (id, tracked) in &mut self.challenges {
if tracked.status == ChallengeStatus::Pending if (tracked.status == ChallengeStatus::Pending
|| tracked.status == ChallengeStatus::ProofReady || tracked.status == ChallengeStatus::ProofReady)
{ && current_block >= tracked.deadline_block {
if current_block >= tracked.deadline_block {
tracked.status = ChallengeStatus::Expired; tracked.status = ChallengeStatus::Expired;
self.stats.challenges_expired += 1; self.stats.challenges_expired += 1;
expired.push(*id); expired.push(*id);
} }
}
} }
expired expired

View file

@ -79,10 +79,12 @@ impl Region {
/// Redundancy level for pinning /// Redundancy level for pinning
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Default)]
pub enum RedundancyLevel { pub enum RedundancyLevel {
/// Single copy (no redundancy) /// Single copy (no redundancy)
None, None,
/// Standard: 3 copies minimum /// Standard: 3 copies minimum
#[default]
Standard, Standard,
/// Enhanced: 5 copies with geo-distribution /// Enhanced: 5 copies with geo-distribution
Enhanced, Enhanced,
@ -117,11 +119,6 @@ impl RedundancyLevel {
} }
} }
impl Default for RedundancyLevel {
fn default() -> Self {
RedundancyLevel::Standard
}
}
/// Storage node information /// Storage node information
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@ -488,7 +485,7 @@ impl PinManager {
/// Select optimal nodes for pinning /// Select optimal nodes for pinning
fn select_nodes( fn select_nodes(
&self, &self,
cid: &ContentId, _cid: &ContentId,
size: u64, size: u64,
required_copies: usize, required_copies: usize,
required_regions: usize, required_regions: usize,
@ -545,7 +542,7 @@ impl PinManager {
} }
// Second pass: fill remaining slots // Second pass: fill remaining slots
for (region, mut region_nodes) in by_region { for (_region, mut region_nodes) in by_region {
if selected.len() >= required_copies { if selected.len() >= required_copies {
break; break;
} }

View file

@ -144,7 +144,7 @@ impl MerkleTree {
let mut level_size = leaf_count; let mut level_size = leaf_count;
while level_size > 1 { while level_size > 1 {
let next_level_size = (level_size + 1) / 2; let next_level_size = level_size.div_ceil(2);
for i in 0..next_level_size { for i in 0..next_level_size {
let left_idx = level_start + i * 2; let left_idx = level_start + i * 2;
@ -187,7 +187,7 @@ impl MerkleTree {
let mut level_size = self.leaf_count; let mut level_size = self.leaf_count;
while level_size > 1 { while level_size > 1 {
let sibling_idx = if idx % 2 == 0 { let sibling_idx = if idx.is_multiple_of(2) {
idx + 1 idx + 1
} else { } else {
idx - 1 idx - 1
@ -201,7 +201,7 @@ impl MerkleTree {
idx /= 2; idx /= 2;
level_start += level_size; level_start += level_size;
level_size = (level_size + 1) / 2; level_size = level_size.div_ceil(2);
} }
proof proof

View file

@ -27,8 +27,10 @@ use crate::circuit::{Circuit, CircuitError};
/// Proof system backend selection. /// Proof system backend selection.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Default)]
pub enum ProofSystemBackend { pub enum ProofSystemBackend {
/// Groth16 - smallest proofs (~200 bytes), requires per-circuit trusted setup /// Groth16 - smallest proofs (~200 bytes), requires per-circuit trusted setup
#[default]
Groth16, Groth16,
/// PLONK - universal setup, medium proofs (~500 bytes) /// PLONK - universal setup, medium proofs (~500 bytes)
Plonk, Plonk,
@ -36,12 +38,6 @@ pub enum ProofSystemBackend {
Stark, Stark,
} }
impl Default for ProofSystemBackend {
fn default() -> Self {
// Groth16 is the default for production (smallest proofs)
ProofSystemBackend::Groth16
}
}
/// Verification key for proof validation. /// Verification key for proof validation.
#[derive(Clone)] #[derive(Clone)]

View file

@ -238,7 +238,7 @@ impl StateTree {
F: FnOnce(&mut AccountState), F: FnOnce(&mut AccountState),
{ {
let mut accounts = self.accounts.write(); let mut accounts = self.accounts.write();
let state = accounts.entry(index).or_insert_with(AccountState::default); let state = accounts.entry(index).or_default();
f(state); f(state);
let hash = state.hash(); let hash = state.hash();
drop(accounts); drop(accounts);