feat: Enhance economics manager with flexible oracle configurations

- Added `with_production_oracle` and `with_oracle` methods to `EconomicsManager` for custom oracle setups.
- Introduced `record_compute_with_gpu` method in `MeteringService` to handle GPU-specific pricing.
- Enhanced `CircuitBreakerManager` to streamline price recording and state checking.
- Expanded `CrossChainOracle` with a builder pattern for easier configuration and added methods for managing pending packets.
- Introduced `PriceOracleBuilder` and `OracleFactory` for creating price oracles with various feeds.
- Added volume discount functionalities in `PricingEngine` for better pricing strategies.
- Improved `ContentResolver` with configuration management and health check features.
- Enhanced `ProverConfig` accessibility in `ProofSubmitter` and `Verifier` for better integration.
- Added utility methods in `SmtContext` for managing SMT-LIB scripts and assertions.
This commit is contained in:
Gulshan Yadav 2026-01-26 23:37:45 +05:30
parent 2f2d3cef50
commit 780a6aaad0
25 changed files with 1197 additions and 42 deletions

View file

@ -466,6 +466,7 @@ impl ExplorerState {
async fn health(State(state): State<Arc<ExplorerState>>) -> impl IntoResponse {
// Check RPC connection by making a simple RPC call
#[derive(Deserialize)]
#[allow(dead_code)] // Fields used for JSON deserialization
struct VersionResult {
version: String,
}
@ -508,6 +509,7 @@ async fn get_stats(
// Response types matching the actual node RPC
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)] // Some fields used for JSON deserialization only
struct NodeInfo {
version: String,
#[serde(default)]
@ -521,6 +523,7 @@ async fn get_stats(
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)] // blocks field used for JSON deserialization
struct MiningInfo {
blocks: u64,
difficulty: f64,
@ -1312,6 +1315,7 @@ async fn handle_websocket(socket: WebSocket, state: Arc<ExplorerState>) {
// Get current stats
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)] // Some fields used for JSON deserialization only
struct NodeInfo {
block_count: u64,
blue_score: u64,

View file

@ -306,17 +306,12 @@ impl OrderBook {
/// Submits an order and attempts to match.
pub fn submit(&self, order: Order) -> Result<Vec<Trade>, ComputeError> {
let order = Arc::new(RwLock::new(order));
let mut trades = Vec::new();
// Try to match order
match order.read().side {
OrderSide::Buy => {
trades = self.match_buy(&order);
}
OrderSide::Sell => {
trades = self.match_sell(&order);
}
}
let trades = match order.read().side {
OrderSide::Buy => self.match_buy(&order),
OrderSide::Sell => self.match_sell(&order),
};
// Add remaining to book if not filled
let (is_filled, is_ioc, id, side) = {

View file

@ -14,7 +14,7 @@ pub use work_queue::WorkQueue;
use crate::device::DeviceRegistry;
use crate::error::ComputeError;
use crate::processor::{Processor, ProcessorId, ProcessorType};
use crate::processor::{ProcessorId, ProcessorType};
use crate::task::{Task, TaskId};
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};

View file

@ -1,7 +1,7 @@
//! Work queue with thread-safe task management.
use crate::processor::ProcessorType;
use crate::task::{Task, TaskId, TaskPriority};
use crate::task::{Task, TaskPriority};
use crossbeam_channel::{bounded, Receiver, Sender, TryRecvError};
use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
@ -203,7 +203,7 @@ impl PriorityWorkQueue {
mod tests {
use super::*;
use crate::processor::{CpuVariant, Operation, Precision};
use crate::task::TaskStatus;
use crate::task::{TaskId, TaskStatus};
fn create_test_task(id: u64, priority: TaskPriority) -> Task {
Task {

View file

@ -1,3 +1,8 @@
// Module-level allow for Zeroize macro false positives
// The variant field IS used via variant() getter, but Zeroize macro generates
// intermediate assignments that trigger "value assigned but never read" warnings.
#![allow(unused_assignments)]
//! FALCON (FN-DSA) compact lattice-based digital signatures.
//!
//! FALCON is a lattice-based signature scheme selected by NIST as FIPS 206
@ -189,10 +194,12 @@ impl std::fmt::Debug for FalconPublicKey {
}
/// FALCON secret key (zeroized on drop).
// Allow needed: Zeroize macro generates code that triggers false positive warnings
// about variant being assigned but never read. The variant field IS used via variant() getter.
#[allow(unused_assignments)]
#[derive(Zeroize, ZeroizeOnDrop)]
pub struct FalconSecretKey {
#[zeroize(skip)]
#[allow(clippy::assigned_to_never_read)] // Used by variant() getter method
variant: FalconVariant,
bytes: Vec<u8>,
}
@ -223,6 +230,15 @@ impl FalconSecretKey {
}
}
impl std::fmt::Debug for FalconSecretKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("FalconSecretKey")
.field("variant", &self.variant)
.field("bytes", &"[REDACTED]")
.finish()
}
}
/// FALCON signature.
#[derive(Clone)]
pub struct FalconSignature {
@ -438,6 +454,9 @@ mod tests {
let keypair = FalconKeypair::generate(FalconVariant::Falcon512);
assert_eq!(keypair.variant(), FalconVariant::Falcon512);
assert_eq!(keypair.public_key().as_bytes().len(), 897);
// Test component variant accessors
assert_eq!(keypair.secret_key().variant(), FalconVariant::Falcon512);
assert_eq!(keypair.public_key().variant(), FalconVariant::Falcon512);
}
#[test]

View file

@ -1,3 +1,8 @@
// Module-level allow for Zeroize macro false positives
// The variant field IS used via variant() getter, but Zeroize macro generates
// intermediate assignments that trigger "value assigned but never read" warnings.
#![allow(unused_assignments)]
//! SPHINCS+ (SLH-DSA) hash-based digital signatures.
//!
//! SPHINCS+ is a stateless hash-based signature scheme selected by NIST as
@ -192,10 +197,12 @@ impl std::fmt::Debug for SphincsPublicKey {
}
/// SPHINCS+ secret key (zeroized on drop).
// Allow needed: Zeroize macro generates code that triggers false positive warnings
// about variant being assigned but never read. The variant field IS used via variant() getter.
#[allow(unused_assignments)]
#[derive(Zeroize, ZeroizeOnDrop)]
pub struct SphincsSecretKey {
#[zeroize(skip)]
#[allow(clippy::assigned_to_never_read)] // Used by variant() getter method
variant: SphincsVariant,
bytes: Vec<u8>,
}
@ -226,6 +233,15 @@ impl SphincsSecretKey {
}
}
impl std::fmt::Debug for SphincsSecretKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SphincsSecretKey")
.field("variant", &self.variant)
.field("bytes", &"[REDACTED]")
.finish()
}
}
/// SPHINCS+ signature.
#[derive(Clone)]
pub struct SphincsSignature {
@ -437,6 +453,9 @@ mod tests {
let keypair = SphincsKeypair::generate(SphincsVariant::Shake128s);
assert_eq!(keypair.variant(), SphincsVariant::Shake128s);
assert_eq!(keypair.public_key().as_bytes().len(), 32);
// Test component variant accessors
assert_eq!(keypair.secret_key().variant(), SphincsVariant::Shake128s);
assert_eq!(keypair.public_key().variant(), SphincsVariant::Shake128s);
}
#[test]

View file

@ -228,6 +228,12 @@ pub enum CreditError {
requested: SynorDecimal,
available: SynorDecimal,
},
/// Exceeds maximum credit allowed per account
ExceedsMaxCredit {
current: SynorDecimal,
requested: SynorDecimal,
maximum: SynorDecimal,
},
}
impl std::fmt::Display for CreditError {
@ -242,6 +248,13 @@ impl std::fmt::Display for CreditError {
requested, available
)
}
CreditError::ExceedsMaxCredit { current, requested, maximum } => {
write!(
f,
"Credit exceeds maximum: current {}, requested {}, maximum {}",
current, requested, maximum
)
}
}
}
}
@ -275,6 +288,200 @@ impl Default for CreditPolicy {
}
}
/// Credit manager for issuing and managing credits based on policy
pub struct CreditManager {
/// Credit policy configuration
policy: CreditPolicy,
/// Credits by account ID
credits: std::collections::HashMap<AccountId, Vec<Credit>>,
}
impl CreditManager {
/// Create a new credit manager with default policy
pub fn new() -> Self {
Self::with_policy(CreditPolicy::default())
}
/// Create a credit manager with custom policy
pub fn with_policy(policy: CreditPolicy) -> Self {
Self {
policy,
credits: std::collections::HashMap::new(),
}
}
/// Issue welcome credit to a new account
pub fn issue_welcome_credit(&mut self, account_id: impl Into<String>) -> Credit {
let account_id = account_id.into();
let credit = Credit::welcome(&account_id, self.policy.welcome_amount)
.with_expiry_days(self.policy.default_expiry_days);
self.credits
.entry(account_id)
.or_default()
.push(credit.clone());
credit
}
/// Issue referral credits to both referrer and referee
pub fn issue_referral_credits(
&mut self,
referrer_id: impl Into<String>,
referee_id: impl Into<String>,
) -> (Credit, Credit) {
let referrer_id = referrer_id.into();
let referee_id = referee_id.into();
// Credit for the referrer
let referrer_credit = Credit::referral(&referrer_id, self.policy.referral_referrer_amount, &referee_id)
.with_expiry_days(self.policy.default_expiry_days);
// Credit for the referee
let referee_credit = Credit::referral(&referee_id, self.policy.referral_referee_amount, &referrer_id)
.with_expiry_days(self.policy.default_expiry_days);
self.credits
.entry(referrer_id)
.or_default()
.push(referrer_credit.clone());
self.credits
.entry(referee_id)
.or_default()
.push(referee_credit.clone());
(referrer_credit, referee_credit)
}
/// Issue promotional credit
pub fn issue_promotional_credit(
&mut self,
account_id: impl Into<String>,
amount: SynorDecimal,
campaign: impl Into<String>,
) -> Result<Credit, CreditError> {
let account_id = account_id.into();
// Check if adding this credit exceeds the maximum
let current_total = self.get_total_credit(&account_id);
if current_total + amount > self.policy.max_credit_per_account {
return Err(CreditError::ExceedsMaxCredit {
current: current_total,
requested: amount,
maximum: self.policy.max_credit_per_account,
});
}
let credit = Credit::new(&account_id, amount, campaign.into())
.with_type(CreditType::Promotional)
.with_expiry_days(self.policy.default_expiry_days);
self.credits
.entry(account_id)
.or_default()
.push(credit.clone());
Ok(credit)
}
/// Issue SLA compensation credit
pub fn issue_sla_credit(
&mut self,
account_id: impl Into<String>,
amount: SynorDecimal,
incident_id: impl Into<String>,
) -> Credit {
let account_id = account_id.into();
let credit = Credit::sla_compensation(&account_id, amount, incident_id);
self.credits
.entry(account_id)
.or_default()
.push(credit.clone());
credit
}
/// Get total active credit for an account
pub fn get_total_credit(&self, account_id: &str) -> SynorDecimal {
self.credits
.get(account_id)
.map(|credits| {
credits
.iter()
.filter(|c| c.is_active && !c.is_expired())
.map(|c| c.amount)
.sum()
})
.unwrap_or(Decimal::ZERO)
}
/// Get all active credits for an account
pub fn get_active_credits(&self, account_id: &str) -> Vec<&Credit> {
self.credits
.get(account_id)
.map(|credits| {
credits
.iter()
.filter(|c| c.is_active && !c.is_expired())
.collect()
})
.unwrap_or_default()
}
/// Use credits from an account (applies oldest first - FIFO)
pub fn use_credits(
&mut self,
account_id: &str,
amount: SynorDecimal,
) -> Result<SynorDecimal, CreditError> {
let total = self.get_total_credit(account_id);
if total < amount {
return Err(CreditError::InsufficientCredit {
requested: amount,
available: total,
});
}
let credits = self.credits.get_mut(account_id).unwrap();
let mut remaining = amount;
// Sort by expiry date (soonest first) for FIFO
credits.sort_by(|a, b| {
match (&a.expires_at, &b.expires_at) {
(Some(a_exp), Some(b_exp)) => a_exp.cmp(b_exp),
(Some(_), None) => std::cmp::Ordering::Less,
(None, Some(_)) => std::cmp::Ordering::Greater,
(None, None) => a.created_at.cmp(&b.created_at),
}
});
for credit in credits.iter_mut() {
if remaining <= Decimal::ZERO {
break;
}
if credit.is_active && !credit.is_expired() {
let used = credit.use_credit(remaining).unwrap_or(Decimal::ZERO);
remaining -= used;
}
}
Ok(amount - remaining)
}
/// Get policy
pub fn policy(&self) -> &CreditPolicy {
&self.policy
}
}
impl Default for CreditManager {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;

View file

@ -6,9 +6,9 @@ mod credit;
mod invoice;
mod payment;
pub use credit::Credit;
pub use credit::{Credit, CreditError, CreditManager, CreditPolicy, CreditType};
pub use invoice::{Invoice, InvoiceLineItem, InvoiceStatus};
pub use payment::{Payment, PaymentMethod, PaymentStatus};
pub use payment::{Payment, PaymentError, PaymentMethod, PaymentProcessor, PaymentStatus};
use crate::error::{EconomicsError, Result};
use crate::metering::MeteringService;
@ -322,6 +322,12 @@ impl BillingEngine {
/// Process a payment
pub async fn process_payment(&self, payment: Payment) -> Result<()> {
use payment::PaymentProcessor;
// Validate payment using PaymentProcessor
PaymentProcessor::validate(&payment)
.map_err(|e| EconomicsError::PaymentFailed(e.to_string()))?;
let mut accounts = self.accounts.write().await;
let mut invoices = self.invoices.write().await;
let mut payments = self.payments.write().await;
@ -351,6 +357,38 @@ impl BillingEngine {
invoice.mark_paid(payment.id.clone());
}
// Process payment based on method
let mut payment = payment;
match payment.method {
PaymentMethod::PrepaidBalance => {
PaymentProcessor::process_prepaid(&mut payment, account.prepaid_balance)
.await
.map_err(|e| EconomicsError::PaymentFailed(e.to_string()))?;
// Deduct from prepaid balance
account.prepaid_balance -= payment.amount;
}
PaymentMethod::SynorTransfer => {
// In production, addresses would come from configuration
let from_address = format!("synor1{}", payment.account_id);
let to_address = "synor1treasury".to_string();
PaymentProcessor::process_synor_transfer(&mut payment, &from_address, &to_address)
.await
.map_err(|e| EconomicsError::PaymentFailed(e.to_string()))?;
}
PaymentMethod::CreditBalance => {
// Deduct from credit balance
if account.credit_balance < payment.amount {
return Err(EconomicsError::PaymentFailed("Insufficient credit balance".to_string()));
}
account.credit_balance -= payment.amount;
payment.mark_completed();
}
_ => {
// Other payment methods - just mark as completed for now
payment.mark_completed();
}
}
// Update account
account.last_payment = Some(Utc::now());
@ -358,10 +396,11 @@ impl BillingEngine {
payments.insert(payment.id.clone(), payment.clone());
tracing::info!(
"Processed payment {} for account {}: {} SYNOR",
"Processed payment {} for account {}: {} SYNOR via {:?}",
payment.id,
payment.account_id,
payment.amount
payment.amount,
payment.method
);
Ok(())
@ -448,6 +487,25 @@ impl BillingEngine {
Ok(all_invoices.into_iter().filter(|inv| !inv.is_paid()).collect())
}
/// Get detailed account information including creation date
pub async fn get_account_details(&self, account_id: &str) -> Result<AccountInfo> {
let accounts = self.accounts.read().await;
let account = accounts
.get(account_id)
.ok_or_else(|| EconomicsError::AccountNotFound(account_id.to_string()))?;
Ok(AccountInfo {
account_id: account.account_id.clone(),
prepaid_balance: account.prepaid_balance,
credit_balance: account.credit_balance,
tier: account.tier.clone(),
billing_cycle_start: account.billing_cycle_start,
created_at: account.created_at,
last_payment: account.last_payment,
invoice_count: account.invoice_ids.len(),
})
}
/// Generate all pending invoices (for batch processing)
pub async fn generate_all_pending_invoices(&self) -> Result<Vec<Invoice>> {
let account_ids: Vec<_> = {
@ -524,6 +582,27 @@ pub struct BillingStats {
pub outstanding_amount: SynorDecimal,
}
/// Account information for external use
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AccountInfo {
/// Account identifier
pub account_id: AccountId,
/// Prepaid balance in SYNOR
pub prepaid_balance: SynorDecimal,
/// Credit balance in SYNOR
pub credit_balance: SynorDecimal,
/// Pricing tier name
pub tier: String,
/// Current billing cycle start
pub billing_cycle_start: DateTime<Utc>,
/// Account creation date
pub created_at: DateTime<Utc>,
/// Last payment date
pub last_payment: Option<DateTime<Utc>>,
/// Number of invoices
pub invoice_count: usize,
}
#[cfg(test)]
mod tests {
use super::*;
@ -540,9 +619,9 @@ mod tests {
let engine = setup_engine().await;
engine.register_account("test_account", "standard").await.unwrap();
let info = engine.get_account_info("test_account").await.unwrap();
let info = engine.get_account_details("test_account").await.unwrap();
assert_eq!(info.account_id, "test_account");
assert_eq!(info.tier_name, "standard");
assert_eq!(info.tier, "standard");
}
#[tokio::test]

View file

@ -35,7 +35,10 @@ pub use metering::{
ComputeUsage, DatabaseUsage, HostingUsage, MeteringService, NetworkUsage, StorageUsage,
UsageEvent, UsageRecord,
};
pub use oracle::{PriceFeed, PriceOracle, PriceSource, TokenPrice};
pub use oracle::{
AggregateFeed, ChainlinkFeed, CoinGeckoFeed, InternalDexFeed, MockPriceFeed, OracleFactory,
PriceFeed, PriceOracle, PriceOracleBuilder, PriceSource, ProductionOracleConfig, TokenPrice,
};
pub use pricing::{Discount, DiscountType, PricingEngine, PricingTier, ServicePricing};
use chrono::{DateTime, Utc};
@ -243,8 +246,50 @@ pub struct EconomicsManager {
impl EconomicsManager {
/// Create a new economics manager with default configuration
/// Uses a development oracle with mock price feeds
pub fn new() -> Self {
let oracle = Arc::new(RwLock::new(PriceOracle::new()));
use rust_decimal_macros::dec;
// Default to development oracle with mock feeds at $1.50 base price
let oracle = Arc::new(RwLock::new(
oracle::OracleFactory::development(dec!(1.50))
));
let pricing = Arc::new(PricingEngine::new());
let metering = Arc::new(MeteringService::new(pricing.clone()));
let billing = Arc::new(BillingEngine::new(metering.clone(), pricing.clone()));
let calculator = Arc::new(CostEstimator::new(pricing.clone()));
Self {
oracle,
metering,
billing,
pricing,
calculator,
}
}
/// Create an economics manager with production oracle configuration
pub fn with_production_oracle(config: oracle::ProductionOracleConfig) -> Self {
let oracle = Arc::new(RwLock::new(
oracle::OracleFactory::production(config)
));
let pricing = Arc::new(PricingEngine::new());
let metering = Arc::new(MeteringService::new(pricing.clone()));
let billing = Arc::new(BillingEngine::new(metering.clone(), pricing.clone()));
let calculator = Arc::new(CostEstimator::new(pricing.clone()));
Self {
oracle,
metering,
billing,
pricing,
calculator,
}
}
/// Create an economics manager with a custom oracle
pub fn with_oracle(oracle: PriceOracle) -> Self {
let oracle = Arc::new(RwLock::new(oracle));
let pricing = Arc::new(PricingEngine::new());
let metering = Arc::new(MeteringService::new(pricing.clone()));
let billing = Arc::new(BillingEngine::new(metering.clone(), pricing.clone()));

View file

@ -13,7 +13,7 @@ mod hosting;
mod network;
mod storage;
pub use compute::ComputeUsage;
pub use compute::{ComputeUsage, GpuType};
pub use database::DatabaseUsage;
pub use hosting::HostingUsage;
pub use network::NetworkUsage;
@ -480,6 +480,87 @@ impl MeteringService {
self.event_rx.clone()
}
/// Record compute usage with specific GPU type for differentiated pricing
pub async fn record_compute_with_gpu(
&self,
account_id: &str,
usage: ComputeUsage,
gpu_type: Option<GpuType>,
) -> Result<()> {
// CPU hours (unchanged by GPU type)
if usage.cpu_core_seconds > 0 {
self.record(UsageEvent::new(
account_id,
ServiceType::Compute,
ResourceUnit::CpuCoreHours,
Decimal::from(usage.cpu_core_seconds) / Decimal::from(3600),
))
.await?;
}
// GPU hours with GPU type multiplier
if usage.gpu_seconds > 0 {
let multiplier = gpu_type
.map(|g| Decimal::from_f64_retain(g.price_multiplier()).unwrap_or(Decimal::ONE))
.unwrap_or(Decimal::ONE);
let gpu_type_name = gpu_type
.map(|g| format!("{:?}", g))
.unwrap_or_else(|| "Generic".to_string());
self.record(
UsageEvent::new(
account_id,
ServiceType::Compute,
ResourceUnit::GpuHours,
Decimal::from(usage.gpu_seconds) / Decimal::from(3600) * multiplier,
)
.with_metadata("gpu_type", gpu_type_name),
)
.await?;
}
// Memory GB hours
if usage.memory_gb_seconds > 0 {
self.record(UsageEvent::new(
account_id,
ServiceType::Compute,
ResourceUnit::MemoryGbHours,
Decimal::from(usage.memory_gb_seconds) / Decimal::from(3600),
))
.await?;
}
// Invocations (serverless)
if usage.invocations > 0 {
self.record(UsageEvent::new(
account_id,
ServiceType::Compute,
ResourceUnit::Invocations,
Decimal::from(usage.invocations),
))
.await?;
}
Ok(())
}
/// Get historical usage records for an account
pub async fn get_historical_records(&self, account_id: &str) -> Vec<UsageRecord> {
let records = self.records.read().await;
records
.iter()
.filter(|r| r.account_id == account_id)
.cloned()
.collect()
}
/// Add aggregated record (typically called after period flush)
pub async fn add_record(&self, record: UsageRecord) {
let mut records = self.records.write().await;
records.push(record);
}
/// Get metering stats
pub async fn stats(&self) -> MeteringStats {
let buffer = self.event_buffer.read().await;

View file

@ -232,7 +232,20 @@ impl CircuitBreakerManager {
price: SynorDecimal,
liquidity: Option<SynorDecimal>,
) -> Result<CircuitState> {
self.record_price_at(pair, price, liquidity, Utc::now())
let breaker = self.breakers.entry(pair.to_string())
.or_insert_with(PairCircuitBreaker::new);
// Use the convenience method for real-time price recording
breaker.record_price(price, liquidity);
// Check breakers if currently closed
if breaker.state == CircuitState::Closed {
self.check_triggers(pair)?;
} else {
self.check_recovery(pair)?;
}
Ok(self.get_state(pair))
}
/// Record a price at a specific timestamp (useful for testing)

View file

@ -258,11 +258,15 @@ struct CrossChainPrice {
/// Pending IBC packet awaiting confirmation
#[derive(Debug, Clone)]
struct PendingPacket {
packet: CrossChainPricePacket,
sent_at: DateTime<Utc>,
channel: String,
sequence: u64,
pub struct PendingPacket {
/// The price packet being sent
pub packet: CrossChainPricePacket,
/// When the packet was sent
pub sent_at: DateTime<Utc>,
/// IBC channel
pub channel: String,
/// Sequence number
pub sequence: u64,
}
impl CrossChainOracle {
@ -417,6 +421,58 @@ impl CrossChainOracle {
(Utc::now() - v.received_at).num_seconds() < max_age
});
}
/// Send an IBC price request and track pending packet
pub fn send_ibc_request(
&mut self,
request: IBCPriceRequest,
sequence: u64,
) -> Option<PendingPacket> {
// Create a placeholder packet for the request
let pending = PendingPacket {
packet: CrossChainPricePacket {
source_chain: request.target_chain,
token: request.token.clone(),
quote: request.quote.clone(),
price: Decimal::ZERO, // Will be filled on response
source_block: 0,
source_timestamp: Utc::now(),
proof: None,
signatures: vec![],
},
sent_at: Utc::now(),
channel: request.channel.clone(),
sequence,
};
self.pending_packets.push(pending.clone());
Some(pending)
}
/// Confirm a pending packet was received
pub fn confirm_pending_packet(&mut self, channel: &str, sequence: u64) -> Option<PendingPacket> {
if let Some(idx) = self
.pending_packets
.iter()
.position(|p| p.channel == channel && p.sequence == sequence)
{
Some(self.pending_packets.remove(idx))
} else {
None
}
}
/// Get pending packets count
pub fn pending_count(&self) -> usize {
self.pending_packets.len()
}
/// Cleanup timed out pending packets
pub fn cleanup_pending(&mut self, timeout_secs: i64) {
self.pending_packets.retain(|p| {
(Utc::now() - p.sent_at).num_seconds() < timeout_secs
});
}
}
impl Default for CrossChainOracle {
@ -455,6 +511,11 @@ impl EthereumPriceFetcher {
chainlink_feeds: feeds,
}
}
/// Get the RPC endpoint URL
pub fn rpc_url(&self) -> &str {
&self.rpc_url
}
}
#[async_trait]
@ -513,6 +574,16 @@ impl CosmosPriceFetcher {
chain_id: chain_id.into(),
}
}
/// Get the light client ID
pub fn light_client_id(&self) -> &str {
&self.light_client_id
}
/// Get the chain ID
pub fn chain_id(&self) -> &str {
&self.chain_id
}
}
#[async_trait]
@ -549,6 +620,119 @@ impl ChainPriceFetcher for CosmosPriceFetcher {
}
}
/// Builder for creating a fully-configured CrossChainOracle
pub struct CrossChainOracleBuilder {
config: CrossChainConfig,
ethereum_rpc: Option<String>,
cosmos_light_client: Option<(String, String)>,
}
impl CrossChainOracleBuilder {
/// Create a new builder with default config
pub fn new() -> Self {
Self {
config: CrossChainConfig::default(),
ethereum_rpc: None,
cosmos_light_client: None,
}
}
/// Set custom configuration
pub fn with_config(mut self, config: CrossChainConfig) -> Self {
self.config = config;
self
}
/// Add Ethereum price fetcher (Chainlink integration)
pub fn with_ethereum(mut self, rpc_url: impl Into<String>) -> Self {
self.ethereum_rpc = Some(rpc_url.into());
self
}
/// Add Cosmos/IBC price fetcher
pub fn with_cosmos(mut self, light_client_id: impl Into<String>, chain_id: impl Into<String>) -> Self {
self.cosmos_light_client = Some((light_client_id.into(), chain_id.into()));
self
}
/// Build the configured CrossChainOracle
pub fn build(self) -> CrossChainOracle {
let mut oracle = CrossChainOracle::with_config(self.config);
// Register Ethereum fetcher if configured
if let Some(rpc_url) = self.ethereum_rpc {
oracle.register_fetcher(Box::new(EthereumPriceFetcher::new(rpc_url)));
}
// Register Cosmos fetcher if configured
if let Some((light_client_id, chain_id)) = self.cosmos_light_client {
oracle.register_fetcher(Box::new(CosmosPriceFetcher::new(light_client_id, chain_id)));
}
oracle
}
}
impl Default for CrossChainOracleBuilder {
fn default() -> Self {
Self::new()
}
}
/// Factory for creating pre-configured cross-chain oracles
pub struct CrossChainOracleFactory;
impl CrossChainOracleFactory {
/// Create a development oracle with mock fetchers
pub fn development() -> CrossChainOracle {
CrossChainOracleBuilder::new()
.with_ethereum("http://localhost:8545")
.with_cosmos("07-tendermint-0", "cosmoshub-4")
.build()
}
/// Create a production oracle with real endpoints
pub fn production(config: CrossChainProductionConfig) -> CrossChainOracle {
let mut builder = CrossChainOracleBuilder::new()
.with_config(config.cross_chain_config);
if let Some(eth_rpc) = config.ethereum_rpc_url {
builder = builder.with_ethereum(eth_rpc);
}
if let Some((light_client, chain_id)) = config.cosmos_light_client {
builder = builder.with_cosmos(light_client, chain_id);
}
builder.build()
}
/// Create an oracle with only Ethereum support
pub fn ethereum_only(rpc_url: impl Into<String>) -> CrossChainOracle {
CrossChainOracleBuilder::new()
.with_ethereum(rpc_url)
.build()
}
/// Create an oracle with only Cosmos/IBC support
pub fn cosmos_only(light_client_id: impl Into<String>, chain_id: impl Into<String>) -> CrossChainOracle {
CrossChainOracleBuilder::new()
.with_cosmos(light_client_id, chain_id)
.build()
}
}
/// Configuration for production cross-chain oracle
#[derive(Debug, Clone, Default)]
pub struct CrossChainProductionConfig {
/// Base cross-chain configuration
pub cross_chain_config: CrossChainConfig,
/// Ethereum RPC URL (e.g., Infura/Alchemy)
pub ethereum_rpc_url: Option<String>,
/// Cosmos light client ID and chain ID
pub cosmos_light_client: Option<(String, String)>,
}
#[cfg(test)]
mod tests {
use super::*;

View file

@ -27,8 +27,9 @@ pub use circuit_breaker::{
TriggerReason,
};
pub use cross_chain::{
ChainNetwork, ChainPriceFetcher, CrossChainConfig, CrossChainOracle, CrossChainPricePacket,
IBCPriceRequest, MerkleProof,
ChainNetwork, ChainPriceFetcher, CosmosPriceFetcher, CrossChainConfig, CrossChainOracle,
CrossChainOracleBuilder, CrossChainOracleFactory, CrossChainPricePacket,
CrossChainProductionConfig, EthereumPriceFetcher, IBCPriceRequest, MerkleProof,
};
pub use decentralized::{
AggregationRound, AggregationStrategy, DecentralizedOracle, DecentralizedOracleConfig,
@ -43,8 +44,14 @@ pub use liquidation::{
CollateralAsset, HealthStatus, LendingPosition, LiquidationCalculation, LiquidationEvent,
LiquidationOracle, LiquidationOracleConfig, LiquidationPrice, LiquidationStats,
};
pub use price_feed::{PriceFeed, PriceSource};
pub use twap::TwapCalculator;
pub use price_feed::{
AggregateFeed, ChainlinkFeed, CoinGeckoFeed, InternalDexFeed, MockPriceFeed, PriceFeed,
PriceSource,
};
pub use twap::{
OnChainTwap, OnChainTwapFactory, TwapCalculator, TwapConfig, TwapObservation,
TwapOracleBuilder,
};
use crate::error::{EconomicsError, Result};
use crate::SynorDecimal;
@ -412,6 +419,150 @@ impl Default for PriceOracle {
}
}
/// Builder for creating a fully-configured PriceOracle
pub struct PriceOracleBuilder {
config: OracleConfig,
feeds: Vec<Box<dyn PriceFeed + Send + Sync>>,
}
impl PriceOracleBuilder {
/// Create a new builder with default config
pub fn new() -> Self {
Self {
config: OracleConfig::default(),
feeds: Vec::new(),
}
}
/// Set custom configuration
pub fn with_config(mut self, config: OracleConfig) -> Self {
self.config = config;
self
}
/// Add a mock price feed (for testing)
pub fn with_mock_feed(mut self, base_price: SynorDecimal) -> Self {
use price_feed::MockPriceFeed;
self.feeds.push(Box::new(MockPriceFeed::new(PriceSource::Internal, base_price)));
self
}
/// Add the internal DEX price feed
pub fn with_internal_dex(mut self, endpoint: impl Into<String>) -> Self {
use price_feed::InternalDexFeed;
self.feeds.push(Box::new(InternalDexFeed::new(endpoint)));
self
}
/// Add Chainlink oracle feed
pub fn with_chainlink(mut self, contract_address: impl Into<String>, rpc_url: impl Into<String>) -> Self {
use price_feed::ChainlinkFeed;
self.feeds.push(Box::new(ChainlinkFeed::new(contract_address, rpc_url)));
self
}
/// Add CoinGecko API feed
pub fn with_coingecko(mut self, api_key: Option<String>) -> Self {
use price_feed::CoinGeckoFeed;
self.feeds.push(Box::new(CoinGeckoFeed::new(api_key)));
self
}
/// Add an aggregate feed that combines multiple sources
pub fn with_aggregate_feed(mut self, feeds: Vec<Box<dyn PriceFeed + Send + Sync>>) -> Self {
use price_feed::AggregateFeed;
self.feeds.push(Box::new(AggregateFeed::new(feeds)));
self
}
/// Add a custom price feed
pub fn with_custom_feed(mut self, feed: Box<dyn PriceFeed + Send + Sync>) -> Self {
self.feeds.push(feed);
self
}
/// Build the configured PriceOracle
pub fn build(self) -> PriceOracle {
let mut oracle = PriceOracle::with_config(self.config);
for feed in self.feeds {
oracle.add_feed(feed);
}
oracle
}
}
impl Default for PriceOracleBuilder {
fn default() -> Self {
Self::new()
}
}
/// Factory for creating pre-configured price oracles
pub struct OracleFactory;
impl OracleFactory {
/// Create a development/testing oracle with mock feeds
pub fn development(base_price: SynorDecimal) -> PriceOracle {
use price_feed::MockPriceFeed;
let mut oracle = PriceOracle::new();
// Add multiple mock feeds with slight variations for testing
oracle.add_feed(Box::new(MockPriceFeed::new(PriceSource::Internal, base_price)));
oracle.add_feed(Box::new(MockPriceFeed::new(PriceSource::SynorDex, base_price)));
oracle
}
/// Create a production oracle with all available feeds
pub fn production(config: ProductionOracleConfig) -> PriceOracle {
use price_feed::{ChainlinkFeed, CoinGeckoFeed, InternalDexFeed};
let mut oracle = PriceOracle::with_config(config.oracle_config);
// Add internal DEX feed
if let Some(dex_endpoint) = config.dex_endpoint {
oracle.add_feed(Box::new(InternalDexFeed::new(dex_endpoint)));
}
// Add Chainlink feed
if let Some((contract, rpc)) = config.chainlink {
oracle.add_feed(Box::new(ChainlinkFeed::new(contract, rpc)));
}
// Add CoinGecko feed
if config.coingecko_enabled {
oracle.add_feed(Box::new(CoinGeckoFeed::new(config.coingecko_api_key)));
}
oracle
}
/// Create an aggregate oracle that combines multiple feeds for resilience
pub fn aggregate(feeds: Vec<Box<dyn PriceFeed + Send + Sync>>) -> PriceOracle {
use price_feed::AggregateFeed;
let mut oracle = PriceOracle::new();
oracle.add_feed(Box::new(AggregateFeed::new(feeds)));
oracle
}
}
/// Configuration for production oracle
#[derive(Debug, Clone, Default)]
pub struct ProductionOracleConfig {
/// Base oracle configuration
pub oracle_config: OracleConfig,
/// Internal DEX endpoint URL
pub dex_endpoint: Option<String>,
/// Chainlink contract address and RPC URL
pub chainlink: Option<(String, String)>,
/// Enable CoinGecko feed
pub coingecko_enabled: bool,
/// CoinGecko API key (optional, for higher rate limits)
pub coingecko_api_key: Option<String>,
}
/// Status of a trading pair
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PairStatus {

View file

@ -316,6 +316,109 @@ impl OnChainTwap {
_ => None,
}
}
/// Get the current number of observations
pub fn observation_count(&self) -> usize {
self.observations.len()
}
/// Get the cardinality (max observations)
pub fn cardinality(&self) -> usize {
self.cardinality
}
/// Get the next cardinality (pending growth)
pub fn cardinality_next(&self) -> usize {
self.cardinality_next
}
/// Increase the oracle cardinality (allows more observations to be stored)
/// Similar to Uniswap V3's increaseObservationCardinalityNext
pub fn increase_cardinality(&mut self, new_cardinality: usize) {
if new_cardinality > self.cardinality_next {
self.cardinality_next = new_cardinality;
}
}
/// Apply the pending cardinality increase (called during next observation)
pub fn apply_cardinality_growth(&mut self) {
if self.cardinality_next > self.cardinality {
self.observations.reserve(self.cardinality_next - self.cardinality);
self.cardinality = self.cardinality_next;
}
}
}
/// Factory for creating on-chain TWAP oracles
pub struct OnChainTwapFactory;
impl OnChainTwapFactory {
/// Create a standard TWAP oracle with default cardinality (24 observations)
pub fn standard() -> OnChainTwap {
OnChainTwap::new(24)
}
/// Create a high-frequency TWAP oracle (more observations)
pub fn high_frequency() -> OnChainTwap {
OnChainTwap::new(144) // 6 per hour * 24 hours
}
/// Create a low-frequency TWAP oracle (fewer observations)
pub fn low_frequency() -> OnChainTwap {
OnChainTwap::new(12)
}
/// Create a custom TWAP oracle with specified cardinality
pub fn custom(cardinality: usize) -> OnChainTwap {
OnChainTwap::new(cardinality)
}
}
/// Builder for configuring and creating TWAP observation systems
pub struct TwapOracleBuilder {
cardinality: usize,
initial_observations: Vec<TwapObservation>,
}
impl TwapOracleBuilder {
/// Create a new builder with default cardinality
pub fn new() -> Self {
Self {
cardinality: 24,
initial_observations: Vec::new(),
}
}
/// Set the cardinality (max observations)
pub fn with_cardinality(mut self, cardinality: usize) -> Self {
self.cardinality = cardinality;
self
}
/// Add an initial observation
pub fn with_observation(mut self, timestamp: DateTime<Utc>, price_cumulative: SynorDecimal, spl_cumulative: SynorDecimal) -> Self {
self.initial_observations.push(TwapObservation {
timestamp,
price_cumulative,
seconds_per_liquidity_cumulative: spl_cumulative,
});
self
}
/// Build the OnChainTwap oracle
pub fn build(self) -> OnChainTwap {
let mut twap = OnChainTwap::new(self.cardinality);
for obs in self.initial_observations {
twap.observations.push(obs);
}
twap
}
}
impl Default for TwapOracleBuilder {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]

View file

@ -5,8 +5,8 @@
mod discounts;
mod tiers;
pub use discounts::{Discount, DiscountType};
pub use tiers::PricingTier;
pub use discounts::{find_best_volume_discount, standard_volume_discounts, Discount, DiscountType};
pub use tiers::{compare_tiers, PricingTier, TierComparison};
use crate::error::{EconomicsError, Result};
use crate::{ResourceUnit, ServiceType, SynorDecimal};
@ -312,6 +312,45 @@ impl PricingEngine {
(discounted, applied)
}
/// Get volume discount for an amount
/// Returns the best applicable volume discount based on spending amount
pub fn get_volume_discount(&self, amount: SynorDecimal) -> Option<Discount> {
find_best_volume_discount(amount)
}
/// Get all standard volume discounts
pub fn get_volume_discount_tiers(&self) -> Vec<Discount> {
standard_volume_discounts()
}
/// Compare current tier with alternatives based on monthly usage
/// Returns a recommendation if switching tiers would save money
pub fn recommend_tier(
&self,
current_tier_name: &str,
monthly_usage: SynorDecimal,
) -> Option<TierComparison> {
let current = self.tiers.iter().find(|t| t.name == current_tier_name)?;
compare_tiers(current, &self.tiers, monthly_usage)
}
/// Calculate cost with automatic volume discount
pub fn calculate_cost_with_volume_discount(
&self,
service_type: ServiceType,
resource_unit: ResourceUnit,
amount: Decimal,
) -> Result<(SynorDecimal, Option<Discount>)> {
let base_cost = self.calculate_cost(service_type, resource_unit, amount)?;
if let Some(discount) = self.get_volume_discount(base_cost) {
let discount_amount = discount.calculate_discount(base_cost);
Ok((base_cost - discount_amount, Some(discount)))
} else {
Ok((base_cost, None))
}
}
/// Get pricing summary for display
pub fn get_pricing_summary(&self) -> PricingSummary {
PricingSummary {

View file

@ -174,6 +174,22 @@ impl EdgeCompute {
self.enabled
}
/// Get the compute endpoint URL.
pub fn endpoint(&self) -> &str {
&self.endpoint
}
/// Get the default timeout duration.
pub fn default_timeout(&self) -> Duration {
self.default_timeout
}
/// Set a custom timeout duration.
pub fn with_timeout(mut self, timeout: Duration) -> Self {
self.default_timeout = timeout;
self
}
/// Execute an edge function.
pub async fn execute(
&self,

View file

@ -1,3 +1,8 @@
// Module-level allow for Zeroize macro false positives
// The public_key field IS used via public_key() getter, but Zeroize macro
// generates intermediate assignments that trigger warnings.
#![allow(unused_assignments)]
//! Ring Signatures (LSAG - Linkable Spontaneous Anonymous Group)
//!
//! Ring signatures allow a signer to sign on behalf of a group (ring) without
@ -137,6 +142,8 @@ impl BorshDeserialize for RingPublicKey {
}
/// A private key for ring signing
// Allow needed for Zeroize macro false positives - public_key used via public_key() getter
#[allow(unused_assignments)]
#[derive(Clone, Zeroize)]
#[zeroize(drop)]
pub struct RingPrivateKey {

View file

@ -1,3 +1,8 @@
// Module-level allow for Zeroize macro false positives
// The view_key and spend_key fields ARE used via getters and meta_address(),
// but Zeroize macro generates intermediate assignments that trigger warnings.
#![allow(unused_assignments)]
//! Stealth Addresses
//!
//! Stealth addresses provide receiver privacy by allowing senders to generate
@ -197,6 +202,10 @@ impl StealthMetaAddress {
}
/// A stealth keypair (private) - held by the recipient
// The allow is needed because Zeroize macro generates intermediate assignments
// that trigger false positive warnings. The view_key and spend_key are used
// via view_key() and spend_key() getters and in meta_address().
#[allow(unused_assignments)]
#[derive(Clone, Zeroize)]
#[zeroize(drop)]
pub struct StealthKeyPair {
@ -320,6 +329,8 @@ impl StealthKeyPair {
}
/// A view-only wallet - can detect payments but not spend
// Allow needed for Zeroize macro false positives - fields used via getters
#[allow(unused_assignments)]
#[derive(Clone, Zeroize)]
#[zeroize(drop)]
pub struct ViewOnlyWallet {

View file

@ -249,6 +249,11 @@ impl GatewayHandler {
size <= self.max_content_size
}
/// Run periodic cleanup of stale rate limit entries
pub async fn cleanup_rate_limiter(&self) {
self.rate_limiter.cleanup().await;
}
/// Handle an HTTP request (main entry point for HTTP frameworks)
pub async fn handle(&self, request: &GatewayRequest) -> HttpResponse {
// Check rate limit

View file

@ -35,6 +35,8 @@ impl Default for ResolverConfig {
/// Resolves content from the storage network
pub struct ContentResolver {
/// Resolver configuration
config: ResolverConfig,
/// Storage node endpoints
nodes: Vec<String>,
/// Node health status
@ -57,14 +59,34 @@ struct NodeHealth {
impl ContentResolver {
/// Create a new resolver
/// Create a new resolver with node list
pub fn new(nodes: Vec<String>) -> Self {
let config = ResolverConfig {
nodes: nodes.clone(),
..Default::default()
};
Self {
config,
nodes,
node_health: Arc::new(RwLock::new(HashMap::new())),
}
}
/// Create a new resolver with configuration
pub fn with_config(config: ResolverConfig) -> Self {
let nodes = config.nodes.clone();
Self {
config,
nodes,
node_health: Arc::new(RwLock::new(HashMap::new())),
}
}
/// Get the resolver configuration
pub fn config(&self) -> &ResolverConfig {
&self.config
}
/// Resolve content by CID
pub async fn resolve(&self, cid: &ContentId) -> Result<Vec<u8>> {
if self.nodes.is_empty() {
@ -88,15 +110,32 @@ impl ContentResolver {
// Fetch metadata to determine chunk count
let metadata = self.fetch_metadata(cid, &providers).await?;
// Validate metadata matches CID
if metadata.size != cid.size {
return Err(Error::ReassemblyFailed(format!(
"Metadata size {} doesn't match CID size {}",
metadata.size, cid.size
)));
}
// Fetch all chunks and reassemble
let mut all_data = Vec::with_capacity(cid.size as usize);
let mut all_data = Vec::with_capacity(metadata.size as usize);
for chunk_index in 0..metadata.chunk_count {
let chunk_data = self.fetch_chunk(cid, chunk_index, &providers).await?;
// Validate chunk size (except possibly the last chunk)
if chunk_index < metadata.chunk_count - 1 && chunk_data.len() != metadata.chunk_size {
return Err(Error::ReassemblyFailed(format!(
"Chunk {} size {} doesn't match expected {}",
chunk_index, chunk_data.len(), metadata.chunk_size
)));
}
all_data.extend_from_slice(&chunk_data);
}
// Trim to exact size (last chunk might have padding)
all_data.truncate(cid.size as usize);
all_data.truncate(metadata.size as usize);
// Verify CID matches
if !cid.verify(&all_data) {
@ -131,12 +170,20 @@ impl ContentResolver {
/// Fetch content from a single provider
async fn fetch_content_from_provider(
&self,
_provider: &str,
provider: &str,
_cid: &ContentId,
) -> Result<Vec<u8>> {
let start = std::time::Instant::now();
// TODO: HTTP request to provider
// GET {provider}/content/{cid}
Err(Error::Network("Not implemented".to_string()))
let result: Result<Vec<u8>> = Err(Error::Network("Not implemented".to_string()));
// Update health stats
let latency_ms = start.elapsed().as_millis() as u32;
self.update_health(provider, result.is_ok(), latency_ms).await;
result
}
/// Find providers for a CID
@ -197,13 +244,21 @@ impl ContentResolver {
/// Fetch chunk from a single provider
async fn fetch_chunk_from_provider(
&self,
_provider: &str,
provider: &str,
_cid: &ContentId,
_chunk_index: usize,
) -> Result<Vec<u8>> {
let start = std::time::Instant::now();
// TODO: HTTP request to provider
// GET {provider}/chunks/{cid}/{chunk_index}
Err(Error::Network("Not implemented".to_string()))
let result: Result<Vec<u8>> = Err(Error::Network("Not implemented".to_string()));
// Update health stats
let latency_ms = start.elapsed().as_millis() as u32;
self.update_health(provider, result.is_ok(), latency_ms).await;
result
}
/// Update node health stats
@ -223,6 +278,17 @@ impl ContentResolver {
.as_secs();
}
/// Cleanup stale health entries
pub async fn cleanup(&self, max_age_secs: u64) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let mut health = self.node_health.write().await;
health.retain(|_, h| now - h.last_check < max_age_secs);
}
/// Get nodes sorted by health
pub async fn sorted_nodes(&self) -> Vec<String> {
let health = self.node_health.read().await;

View file

@ -105,6 +105,16 @@ impl ProofSubmitter {
}
}
/// Get the prover configuration
pub fn config(&self) -> &ProverConfig {
&self.config
}
/// Check if a challenge is within the deadline buffer
pub fn is_within_deadline(&self, deadline_block: u64, current_block: u64) -> bool {
deadline_block.saturating_sub(current_block) > self.config.deadline_buffer
}
/// Start monitoring for challenges
pub async fn start(&mut self) -> Result<()> {
// TODO: Subscribe to L1 events for storage challenges

View file

@ -474,6 +474,21 @@ pub struct ChainState {
pub total_work: Vec<u8>,
}
/// Virtual UTXO state (selected tips view).
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct VirtualState {
/// Selected parent tips (virtual parent set).
pub parents: Vec<BlockId>,
/// Virtual ghost DAG data (blue score).
pub virtual_blue_score: u64,
/// Virtual mergeset blue hashes.
pub virtual_mergeset_blues: Vec<BlockId>,
/// Virtual mergeset red hashes.
pub virtual_mergeset_reds: Vec<BlockId>,
/// Multiset hash of the virtual UTXO set.
pub utxo_multiset_hash: [u8; 32],
}
impl MetadataStore {
/// Creates a new metadata store.
pub fn new(db: Arc<Database>) -> Self {
@ -552,6 +567,24 @@ impl MetadataStore {
self.db.put(cf::METADATA, Self::KEY_GENESIS, &bytes)
}
/// Gets the virtual state (UTXO set state).
pub fn get_virtual_state(&self) -> Result<Option<VirtualState>, DbError> {
match self.db.get(cf::METADATA, Self::KEY_VIRTUAL_STATE)? {
Some(bytes) => {
let state = VirtualState::try_from_slice(&bytes)
.map_err(|e| DbError::Deserialization(e.to_string()))?;
Ok(Some(state))
}
None => Ok(None),
}
}
/// Sets the virtual state.
pub fn set_virtual_state(&self, state: &VirtualState) -> Result<(), DbError> {
let bytes = borsh::to_vec(state).map_err(|e| DbError::Serialization(e.to_string()))?;
self.db.put(cf::METADATA, Self::KEY_VIRTUAL_STATE, &bytes)
}
/// Gets a generic metadata value.
pub fn get_raw(&self, key: &[u8]) -> Result<Option<Vec<u8>>, DbError> {
self.db.get(cf::METADATA, key)

View file

@ -168,6 +168,11 @@ impl PropertyChecker {
}
}
/// Get the prover configuration
pub fn config(&self) -> &ProverConfig {
&self.config
}
/// Checks an invariant holds in all states.
pub fn check_invariant(
&self,

View file

@ -144,6 +144,16 @@ impl Verifier {
}
}
/// Get the prover configuration
pub fn config(&self) -> &ProverConfig {
&self.config
}
/// Get the symbolic executor
pub fn executor(&self) -> &SymbolicExecutor {
&self.executor
}
/// Verifies a contract against its specifications.
pub fn verify(&self, ctx: &VerificationContext) -> VerifierResult<VerificationReport> {
let mut report = VerificationReport::new(&ctx.contract_name);

View file

@ -51,6 +51,59 @@ impl SmtContext {
}
}
/// Get the timeout in milliseconds
pub fn timeout_ms(&self) -> u64 {
self.timeout_ms
}
/// Add a variable declaration
pub fn declare(&mut self, name: &str, sort: &str) {
self.declarations.push(format!("(declare-fun {} () {})", name, sort));
}
/// Add an assertion
pub fn assert(&mut self, expr: &SmtExpr) {
self.assertions.push(format!("(assert {})", expr.as_str()));
}
/// Get all declarations
pub fn declarations(&self) -> &[String] {
&self.declarations
}
/// Get all assertions
pub fn assertions(&self) -> &[String] {
&self.assertions
}
/// Generate SMT-LIB script
pub fn to_smtlib(&self) -> String {
let mut script = String::new();
script.push_str("; SMT-LIB2 script\n");
script.push_str(&format!("; Timeout: {}ms\n", self.timeout_ms));
script.push_str("(set-logic ALL)\n");
for decl in &self.declarations {
script.push_str(decl);
script.push('\n');
}
for assertion in &self.assertions {
script.push_str(assertion);
script.push('\n');
}
script.push_str("(check-sat)\n");
script.push_str("(get-model)\n");
script
}
/// Clear all declarations and assertions
pub fn reset(&mut self) {
self.declarations.clear();
self.assertions.clear();
}
/// Encodes a symbolic state as SMT constraints.
pub fn encode_state(&self, state: &SymbolicState) -> VerifierResult<Vec<SmtExpr>> {
let mut constraints = Vec::new();