Add README.md documentation for: - Main SDK overview with quick start guides - JavaScript/TypeScript SDK - Python SDK - Go SDK - Rust SDK - Java SDK - Kotlin SDK - Swift SDK - Flutter/Dart SDK - C SDK - C++ SDK - C#/.NET SDK - Ruby SDK Each README includes: - Installation instructions - Quick start examples - Tensor operations - Matrix operations (matmul, conv2d, attention) - LLM inference (single and streaming) - Configuration options - Error handling - Type definitions
224 lines
4.5 KiB
Markdown
224 lines
4.5 KiB
Markdown
# Synor Compute SDK for Rust
|
|
|
|
Access distributed heterogeneous compute at 90% cost reduction.
|
|
|
|
## Installation
|
|
|
|
Add to `Cargo.toml`:
|
|
|
|
```toml
|
|
[dependencies]
|
|
synor-compute = "0.1"
|
|
tokio = { version = "1", features = ["full"] }
|
|
```
|
|
|
|
## Quick Start
|
|
|
|
```rust
|
|
use synor_compute::{SynorCompute, Tensor, Precision, ProcessorType};
|
|
|
|
#[tokio::main]
|
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|
let client = SynorCompute::new("your-api-key");
|
|
|
|
// Matrix multiplication on GPU
|
|
let a = Tensor::rand(&[512, 512]);
|
|
let b = Tensor::rand(&[512, 512]);
|
|
|
|
let result = client.matmul(&a, &b)
|
|
.precision(Precision::FP16)
|
|
.processor(ProcessorType::Gpu)
|
|
.send()
|
|
.await?;
|
|
|
|
if result.is_success() {
|
|
println!("Time: {}ms", result.execution_time_ms.unwrap_or(0));
|
|
println!("Cost: ${}", result.cost.unwrap_or(0.0));
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
```
|
|
|
|
## Tensor Operations
|
|
|
|
```rust
|
|
// Create tensors
|
|
let zeros = Tensor::zeros(&[3, 3]);
|
|
let ones = Tensor::ones(&[2, 2]);
|
|
let random = Tensor::rand(&[10, 10]); // Uniform [0, 1)
|
|
let randn = Tensor::randn(&[100]); // Normal distribution
|
|
let eye = Tensor::eye(3); // Identity matrix
|
|
|
|
// From data
|
|
let data = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
|
|
let tensor = Tensor::new(&[2, 3], data);
|
|
|
|
// Ranges
|
|
let range = Tensor::arange(0.0, 10.0, 1.0);
|
|
let linspace = Tensor::linspace(0.0, 1.0, 100);
|
|
|
|
// Operations
|
|
let reshaped = tensor.reshape(&[3, 2]);
|
|
let transposed = tensor.transpose();
|
|
|
|
// Math
|
|
let mean = tensor.mean();
|
|
let sum = tensor.sum();
|
|
let std = tensor.std();
|
|
|
|
// Activations
|
|
let relu = tensor.relu();
|
|
let sigmoid = tensor.sigmoid();
|
|
let softmax = tensor.softmax();
|
|
```
|
|
|
|
## Builder Pattern API
|
|
|
|
```rust
|
|
// Matrix multiplication with options
|
|
let result = client.matmul(&a, &b)
|
|
.precision(Precision::FP16)
|
|
.processor(ProcessorType::Gpu)
|
|
.priority(Priority::High)
|
|
.send()
|
|
.await?;
|
|
|
|
// 2D Convolution
|
|
let result = client.conv2d(&input, &kernel)
|
|
.stride((1, 1))
|
|
.padding((1, 1))
|
|
.precision(Precision::FP32)
|
|
.send()
|
|
.await?;
|
|
|
|
// Attention
|
|
let result = client.attention(&query, &key, &value)
|
|
.num_heads(8)
|
|
.flash(true)
|
|
.precision(Precision::FP16)
|
|
.send()
|
|
.await?;
|
|
```
|
|
|
|
## LLM Inference
|
|
|
|
```rust
|
|
// Single response
|
|
let response = client.inference("llama-3-70b", "Explain quantum computing")
|
|
.send()
|
|
.await?;
|
|
println!("{}", response.result.unwrap_or_default());
|
|
|
|
// Streaming with futures
|
|
use futures::StreamExt;
|
|
|
|
let mut stream = client.inference_stream("llama-3-70b", "Write a poem").await?;
|
|
while let Some(token) = stream.next().await {
|
|
print!("{}", token?);
|
|
}
|
|
```
|
|
|
|
## Configuration
|
|
|
|
```rust
|
|
use synor_compute::Config;
|
|
|
|
let config = Config::new("your-api-key")
|
|
.base_url("https://api.synor.io/compute/v1")
|
|
.default_processor(ProcessorType::Gpu)
|
|
.default_precision(Precision::FP16)
|
|
.timeout_secs(30)
|
|
.debug(true);
|
|
|
|
let client = SynorCompute::with_config(config);
|
|
```
|
|
|
|
## Error Handling
|
|
|
|
```rust
|
|
use synor_compute::{Error, Result};
|
|
|
|
async fn compute() -> Result<()> {
|
|
let result = client.matmul(&a, &b).send().await?;
|
|
|
|
match result.status {
|
|
JobStatus::Completed => println!("Success!"),
|
|
JobStatus::Failed => {
|
|
if let Some(err) = result.error {
|
|
eprintln!("Job failed: {}", err);
|
|
}
|
|
}
|
|
_ => {}
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
// Pattern matching on errors
|
|
match client.matmul(&a, &b).send().await {
|
|
Ok(result) => println!("Result: {:?}", result),
|
|
Err(Error::Api { status_code, message }) => {
|
|
eprintln!("API error {}: {}", status_code, message);
|
|
}
|
|
Err(Error::InvalidArgument(msg)) => {
|
|
eprintln!("Invalid argument: {}", msg);
|
|
}
|
|
Err(e) => eprintln!("Other error: {}", e),
|
|
}
|
|
```
|
|
|
|
## Types
|
|
|
|
```rust
|
|
// Processor types
|
|
ProcessorType::Cpu
|
|
ProcessorType::Gpu
|
|
ProcessorType::Tpu
|
|
ProcessorType::Npu
|
|
ProcessorType::Lpu
|
|
ProcessorType::Fpga
|
|
ProcessorType::Auto // Automatic selection
|
|
|
|
// Precision levels
|
|
Precision::FP64
|
|
Precision::FP32
|
|
Precision::FP16
|
|
Precision::BF16
|
|
Precision::INT8
|
|
Precision::INT4
|
|
|
|
// Job status
|
|
JobStatus::Pending
|
|
JobStatus::Running
|
|
JobStatus::Completed
|
|
JobStatus::Failed
|
|
JobStatus::Cancelled
|
|
|
|
// Priority
|
|
Priority::Low
|
|
Priority::Normal
|
|
Priority::High
|
|
Priority::Critical
|
|
```
|
|
|
|
## Features
|
|
|
|
Enable optional features in `Cargo.toml`:
|
|
|
|
```toml
|
|
[dependencies]
|
|
synor-compute = { version = "0.1", features = ["serde", "rayon"] }
|
|
```
|
|
|
|
- `serde` - Serialization support (enabled by default)
|
|
- `rayon` - Parallel tensor operations
|
|
|
|
## Testing
|
|
|
|
```bash
|
|
cargo test
|
|
```
|
|
|
|
## License
|
|
|
|
MIT
|