synor/sdk/rust/src/tensor.rs
Gulshan Yadav 59a7123535 feat(sdk): implement Phase 1 SDKs for Wallet, RPC, and Storage
Implements comprehensive SDK support for three core services across
four programming languages (JavaScript/TypeScript, Python, Go, Rust).

## New SDKs

### Wallet SDK
- Key management (create, import, export)
- Transaction signing
- Message signing and verification
- Balance and UTXO queries
- Stealth address support

### RPC SDK
- Block and transaction queries
- Chain state information
- Fee estimation
- Mempool information
- WebSocket subscriptions for real-time updates

### Storage SDK
- Content upload and download
- Pinning operations
- CAR file support
- Directory management
- Gateway URL generation

## Shared Infrastructure

- JSON Schema definitions for all 11 services
- Common type definitions (Address, Amount, UTXO, etc.)
- Unified error handling patterns
- Builder patterns for configuration

## Package Updates

- JavaScript: Updated to @synor/sdk with module exports
- Python: Updated to synor-sdk with websockets dependency
- Go: Added gorilla/websocket dependency
- Rust: Added base64, urlencoding, multipart support

## Fixes

- Fixed Tensor Default trait implementation
- Fixed ProcessorType enum casing
2026-01-27 00:46:24 +05:30

318 lines
8.4 KiB
Rust

//! Multi-dimensional tensor for compute operations.
use crate::Precision;
use rand::Rng;
use serde::{Deserialize, Serialize};
use std::f64::consts::PI;
/// Multi-dimensional tensor.
///
/// # Examples
///
/// ```
/// use synor_compute::Tensor;
///
/// // Create a 2D tensor
/// let matrix = Tensor::new(&[2, 3], vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
///
/// // Create random tensor
/// let random = Tensor::rand(&[512, 512]);
///
/// // Operations
/// let mean = random.mean();
/// let transposed = matrix.transpose();
/// ```
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
pub struct Tensor {
shape: Vec<usize>,
data: Vec<f64>,
#[serde(default)]
dtype: Precision,
}
impl Tensor {
/// Create a new tensor with the given shape and data.
pub fn new(shape: &[usize], data: Vec<f64>) -> Self {
let expected_size: usize = shape.iter().product();
assert_eq!(
data.len(),
expected_size,
"Data size {} does not match shape {:?}",
data.len(),
shape
);
Self {
shape: shape.to_vec(),
data,
dtype: Precision::FP32,
}
}
/// Create a new tensor with the given dtype.
pub fn with_dtype(mut self, dtype: Precision) -> Self {
self.dtype = dtype;
self
}
/// Get the shape of the tensor.
pub fn shape(&self) -> &[usize] {
&self.shape
}
/// Get the data of the tensor.
pub fn data(&self) -> &[f64] {
&self.data
}
/// Get the dtype of the tensor.
pub fn dtype(&self) -> Precision {
self.dtype
}
/// Get the total number of elements.
pub fn size(&self) -> usize {
self.data.len()
}
/// Get the number of dimensions.
pub fn ndim(&self) -> usize {
self.shape.len()
}
/// Get element at indices.
pub fn get(&self, indices: &[usize]) -> f64 {
assert_eq!(
indices.len(),
self.shape.len(),
"Index dimensions must match tensor dimensions"
);
let mut idx = 0;
let mut stride = 1;
for i in (0..self.shape.len()).rev() {
idx += indices[i] * stride;
stride *= self.shape[i];
}
self.data[idx]
}
// Factory methods
/// Create a tensor filled with zeros.
pub fn zeros(shape: &[usize]) -> Self {
let size: usize = shape.iter().product();
Self::new(shape, vec![0.0; size])
}
/// Create a tensor filled with ones.
pub fn ones(shape: &[usize]) -> Self {
let size: usize = shape.iter().product();
Self::new(shape, vec![1.0; size])
}
/// Create a tensor with uniform random values [0, 1).
pub fn rand(shape: &[usize]) -> Self {
let size: usize = shape.iter().product();
let mut rng = rand::thread_rng();
let data: Vec<f64> = (0..size).map(|_| rng.gen()).collect();
Self::new(shape, data)
}
/// Create a tensor with standard normal random values.
pub fn randn(shape: &[usize]) -> Self {
let size: usize = shape.iter().product();
let mut rng = rand::thread_rng();
let data: Vec<f64> = (0..size)
.map(|_| {
// Box-Muller transform
let u1: f64 = rng.gen();
let u2: f64 = rng.gen();
(-2.0 * u1.ln()).sqrt() * (2.0 * PI * u2).cos()
})
.collect();
Self::new(shape, data)
}
/// Create an identity matrix.
pub fn eye(n: usize) -> Self {
let mut data = vec![0.0; n * n];
for i in 0..n {
data[i * n + i] = 1.0;
}
Self::new(&[n, n], data)
}
/// Create a range tensor.
pub fn arange(start: f64, end: f64, step: f64) -> Self {
let size = ((end - start) / step).ceil() as usize;
let data: Vec<f64> = (0..size).map(|i| start + i as f64 * step).collect();
Self::new(&[size], data)
}
/// Create a linearly spaced tensor.
pub fn linspace(start: f64, end: f64, num: usize) -> Self {
let step = (end - start) / (num - 1) as f64;
let data: Vec<f64> = (0..num).map(|i| start + i as f64 * step).collect();
Self::new(&[num], data)
}
// Operations
/// Reshape tensor to new shape.
pub fn reshape(&self, new_shape: &[usize]) -> Self {
let new_size: usize = new_shape.iter().product();
assert_eq!(
new_size,
self.size(),
"Cannot reshape tensor of size {} to shape {:?}",
self.size(),
new_shape
);
Self {
shape: new_shape.to_vec(),
data: self.data.clone(),
dtype: self.dtype,
}
}
/// Transpose 2D tensor.
pub fn transpose(&self) -> Self {
assert_eq!(self.ndim(), 2, "Transpose only supported for 2D tensors");
let rows = self.shape[0];
let cols = self.shape[1];
let mut transposed = vec![0.0; self.size()];
for i in 0..rows {
for j in 0..cols {
transposed[j * rows + i] = self.data[i * cols + j];
}
}
Self::new(&[cols, rows], transposed).with_dtype(self.dtype)
}
// Reductions
/// Compute mean of all elements.
pub fn mean(&self) -> f64 {
self.data.iter().sum::<f64>() / self.size() as f64
}
/// Compute sum of all elements.
pub fn sum(&self) -> f64 {
self.data.iter().sum()
}
/// Compute standard deviation.
pub fn std(&self) -> f64 {
let mean = self.mean();
let variance: f64 = self.data.iter().map(|x| (x - mean).powi(2)).sum::<f64>()
/ self.size() as f64;
variance.sqrt()
}
/// Find maximum value.
pub fn max(&self) -> f64 {
self.data.iter().cloned().fold(f64::NEG_INFINITY, f64::max)
}
/// Find minimum value.
pub fn min(&self) -> f64 {
self.data.iter().cloned().fold(f64::INFINITY, f64::min)
}
// Activations
/// Apply ReLU activation.
pub fn relu(&self) -> Self {
let data: Vec<f64> = self.data.iter().map(|x| x.max(0.0)).collect();
Self {
shape: self.shape.clone(),
data,
dtype: self.dtype,
}
}
/// Apply sigmoid activation.
pub fn sigmoid(&self) -> Self {
let data: Vec<f64> = self.data.iter().map(|x| 1.0 / (1.0 + (-x).exp())).collect();
Self {
shape: self.shape.clone(),
data,
dtype: self.dtype,
}
}
/// Apply softmax activation.
pub fn softmax(&self) -> Self {
let max_val = self.max();
let exp_values: Vec<f64> = self.data.iter().map(|x| (x - max_val).exp()).collect();
let sum: f64 = exp_values.iter().sum();
let data: Vec<f64> = exp_values.iter().map(|x| x / sum).collect();
Self {
shape: self.shape.clone(),
data,
dtype: self.dtype,
}
}
/// Convert to nested vector (for 1D and 2D tensors).
pub fn to_nested_vec(&self) -> Vec<Vec<f64>> {
match self.ndim() {
1 => vec![self.data.clone()],
2 => {
let rows = self.shape[0];
let cols = self.shape[1];
(0..rows)
.map(|i| self.data[i * cols..(i + 1) * cols].to_vec())
.collect()
}
_ => panic!("to_nested_vec only supports 1D and 2D tensors"),
}
}
}
impl std::fmt::Display for Tensor {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Tensor(shape={:?}, dtype={:?})",
self.shape, self.dtype
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tensor_creation() {
let t = Tensor::new(&[2, 3], vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
assert_eq!(t.shape(), &[2, 3]);
assert_eq!(t.size(), 6);
assert_eq!(t.ndim(), 2);
}
#[test]
fn test_tensor_zeros() {
let t = Tensor::zeros(&[3, 3]);
assert!(t.data().iter().all(|&x| x == 0.0));
}
#[test]
fn test_tensor_transpose() {
let t = Tensor::new(&[2, 3], vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
let transposed = t.transpose();
assert_eq!(transposed.shape(), &[3, 2]);
}
#[test]
fn test_tensor_mean() {
let t = Tensor::new(&[4], vec![1.0, 2.0, 3.0, 4.0]);
assert!((t.mean() - 2.5).abs() < 1e-10);
}
}