synor/crates/synor-storage/src/gateway/cache.rs
Gulshan Yadav f5bdef2691 feat(storage): add Synor Storage L2 decentralized storage layer
Complete implementation of the Synor Storage Layer (L2) for decentralized
content storage. This enables permanent, censorship-resistant storage of
any file type including Next.js apps, Flutter apps, and arbitrary data.

Core modules:
- cid.rs: Content addressing with Blake3/SHA256 hashing (synor1... format)
- chunker.rs: File chunking for parallel upload/download (1MB chunks)
- erasure.rs: Reed-Solomon erasure coding (10+4 shards) for fault tolerance
- proof.rs: Storage proofs with Merkle trees for verification
- deal.rs: Storage deals and market economics (3 pricing tiers)

Infrastructure:
- node/: Storage node service with P2P networking and local storage
- gateway/: HTTP gateway for browser access with LRU caching
- Docker deployment with nginx load balancer

Architecture:
- Operates as L2 alongside Synor L1 blockchain
- Storage proofs verified on-chain for reward distribution
- Can lose 4 shards per chunk and still recover data
- Gateway URLs: /synor1<cid> for content access

All 28 unit tests passing.
2026-01-10 11:42:03 +05:30

295 lines
7.8 KiB
Rust

//! Gateway Cache - LRU cache for frequently accessed content
//!
//! Caches resolved content to reduce load on storage nodes
//! and improve response times for popular content.
use crate::cid::ContentId;
use super::GatewayResponse;
use std::collections::HashMap;
/// LRU cache entry
#[derive(Debug, Clone)]
struct CacheEntry {
/// Cached response
response: GatewayResponse,
/// Access count
access_count: u64,
/// Last access timestamp
last_access: u64,
/// Size in bytes
size: u64,
}
/// LRU cache for gateway responses
pub struct GatewayCache {
/// Maximum cache size in bytes
max_size: u64,
/// Current size in bytes
current_size: u64,
/// Cached entries by CID
entries: HashMap<ContentId, CacheEntry>,
/// Access order for LRU eviction (CID, last_access)
access_order: Vec<(ContentId, u64)>,
/// Cache statistics
stats: CacheStats,
}
/// Cache statistics
#[derive(Debug, Clone, Default)]
pub struct CacheStats {
/// Total hits
pub hits: u64,
/// Total misses
pub misses: u64,
/// Total evictions
pub evictions: u64,
/// Total bytes cached
pub bytes_cached: u64,
/// Total bytes evicted
pub bytes_evicted: u64,
}
impl GatewayCache {
/// Create a new cache with maximum size in bytes
pub fn new(max_size: u64) -> Self {
Self {
max_size,
current_size: 0,
entries: HashMap::new(),
access_order: Vec::new(),
stats: CacheStats::default(),
}
}
/// Get an entry from the cache
pub fn get(&self, cid: &ContentId) -> Option<GatewayResponse> {
self.entries.get(cid).map(|entry| entry.response.clone())
}
/// Get an entry and update access stats
pub fn get_mut(&mut self, cid: &ContentId) -> Option<GatewayResponse> {
let now = current_timestamp();
if let Some(entry) = self.entries.get_mut(cid) {
entry.access_count += 1;
entry.last_access = now;
self.stats.hits += 1;
// Update access order
if let Some(pos) = self.access_order.iter().position(|(c, _)| c == cid) {
self.access_order.remove(pos);
}
self.access_order.push((cid.clone(), now));
Some(entry.response.clone())
} else {
self.stats.misses += 1;
None
}
}
/// Put an entry in the cache
pub fn put(&mut self, cid: ContentId, response: GatewayResponse) {
let size = response.content.len() as u64;
// Don't cache if larger than max size
if size > self.max_size {
return;
}
// Remove existing entry if present
if self.entries.contains_key(&cid) {
self.remove(&cid);
}
// Evict entries until we have space
while self.current_size + size > self.max_size && !self.entries.is_empty() {
self.evict_lru();
}
let now = current_timestamp();
let entry = CacheEntry {
response,
access_count: 1,
last_access: now,
size,
};
self.entries.insert(cid.clone(), entry);
self.access_order.push((cid, now));
self.current_size += size;
self.stats.bytes_cached += size;
}
/// Remove an entry from the cache
pub fn remove(&mut self, cid: &ContentId) -> Option<GatewayResponse> {
if let Some(entry) = self.entries.remove(cid) {
self.current_size -= entry.size;
self.access_order.retain(|(c, _)| c != cid);
Some(entry.response)
} else {
None
}
}
/// Evict the least recently used entry
fn evict_lru(&mut self) {
if let Some((cid, _)) = self.access_order.first().cloned() {
if let Some(entry) = self.entries.remove(&cid) {
self.current_size -= entry.size;
self.stats.evictions += 1;
self.stats.bytes_evicted += entry.size;
}
self.access_order.remove(0);
}
}
/// Clear the entire cache
pub fn clear(&mut self) {
self.entries.clear();
self.access_order.clear();
self.current_size = 0;
}
/// Get cache statistics
pub fn stats(&self) -> &CacheStats {
&self.stats
}
/// Get current cache size in bytes
pub fn size(&self) -> u64 {
self.current_size
}
/// Get maximum cache size in bytes
pub fn max_size(&self) -> u64 {
self.max_size
}
/// Get number of entries in cache
pub fn len(&self) -> usize {
self.entries.len()
}
/// Check if cache is empty
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
/// Check if CID is in cache
pub fn contains(&self, cid: &ContentId) -> bool {
self.entries.contains_key(cid)
}
/// Get hit rate
pub fn hit_rate(&self) -> f64 {
let total = self.stats.hits + self.stats.misses;
if total == 0 {
0.0
} else {
self.stats.hits as f64 / total as f64
}
}
/// Prune entries not accessed since cutoff timestamp
pub fn prune_stale(&mut self, cutoff_timestamp: u64) {
let stale: Vec<ContentId> = self
.entries
.iter()
.filter(|(_, entry)| entry.last_access < cutoff_timestamp)
.map(|(cid, _)| cid.clone())
.collect();
for cid in stale {
self.remove(&cid);
}
}
}
/// Get current timestamp in seconds
fn current_timestamp() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cid::ContentId;
fn make_response(data: &[u8]) -> GatewayResponse {
let cid = ContentId::from_content(data);
GatewayResponse {
cid: cid.clone(),
content: data.to_vec(),
mime_type: "application/octet-stream".to_string(),
size: data.len() as u64,
}
}
#[test]
fn test_cache_put_get() {
let mut cache = GatewayCache::new(1024);
let data = b"hello world";
let cid = ContentId::from_content(data);
let response = make_response(data);
cache.put(cid.clone(), response);
let retrieved = cache.get(&cid).unwrap();
assert_eq!(retrieved.content, data);
}
#[test]
fn test_cache_eviction() {
let mut cache = GatewayCache::new(100);
// Add entries until we exceed limit
for i in 0..10 {
let data = vec![i; 20];
let cid = ContentId::from_content(&data);
let response = make_response(&data);
cache.put(cid, response);
}
// Should have evicted some entries
assert!(cache.size() <= 100);
assert!(cache.len() < 10);
}
#[test]
fn test_cache_lru_order() {
let mut cache = GatewayCache::new(100);
// Add 3 entries (each ~10 bytes)
let entries: Vec<_> = (0..3)
.map(|i| {
let data = vec![i; 10];
let cid = ContentId::from_content(&data);
let response = make_response(&data);
(cid, response)
})
.collect();
for (cid, response) in &entries {
cache.put(cid.clone(), response.clone());
}
// Access first entry to make it recently used
cache.get_mut(&entries[0].0);
// Add more entries to trigger eviction
for i in 3..10 {
let data = vec![i; 10];
let cid = ContentId::from_content(&data);
let response = make_response(&data);
cache.put(cid, response);
}
// First entry should still be present (was accessed recently)
assert!(cache.contains(&entries[0].0));
}
}