synor/sdk/swift/README.md
Gulshan Yadav 162227dc71 docs(sdk): add comprehensive documentation for all 12 SDKs
Add README.md documentation for:
- Main SDK overview with quick start guides
- JavaScript/TypeScript SDK
- Python SDK
- Go SDK
- Rust SDK
- Java SDK
- Kotlin SDK
- Swift SDK
- Flutter/Dart SDK
- C SDK
- C++ SDK
- C#/.NET SDK
- Ruby SDK

Each README includes:
- Installation instructions
- Quick start examples
- Tensor operations
- Matrix operations (matmul, conv2d, attention)
- LLM inference (single and streaming)
- Configuration options
- Error handling
- Type definitions
2026-01-11 18:05:03 +05:30

227 lines
4.3 KiB
Markdown

# Synor Compute SDK for Swift
Access distributed heterogeneous compute at 90% cost reduction.
## Installation
### Swift Package Manager
Add to `Package.swift`:
```swift
dependencies: [
.package(url: "https://github.com/synor/compute-sdk-swift", from: "0.1.0")
]
```
### Xcode
File > Add Packages > Enter URL:
`https://github.com/synor/compute-sdk-swift`
## Quick Start
```swift
import SynorCompute
let client = SynorCompute(apiKey: "your-api-key")
// Matrix multiplication on GPU
let a = Tensor.random(shape: [512, 512])
let b = Tensor.random(shape: [512, 512])
Task {
let result = try await client.matmul(a, b,
precision: .fp16,
processor: .gpu
)
if result.isSuccess {
print("Time: \(result.executionTimeMs ?? 0)ms")
print("Cost: $\(result.cost ?? 0)")
}
}
```
## Tensor Operations
```swift
// Create tensors
let zeros = Tensor.zeros(shape: [3, 3])
let ones = Tensor.ones(shape: [2, 2])
let random = Tensor.random(shape: [10, 10])
let randn = Tensor.randn(shape: [100])
let eye = Tensor.eye(size: 3)
// From array
let data: [Float] = [1, 2, 3, 4, 5, 6]
let tensor = Tensor(data: data, shape: [2, 3])
// Operations
let reshaped = tensor.reshape(to: [3, 2])
let transposed = tensor.transpose()
// Math
let mean = tensor.mean()
let sum = tensor.sum()
let std = tensor.std()
```
## Async/Await API
```swift
// Matrix multiplication
let result = try await client.matmul(a, b,
precision: .fp16,
processor: .gpu,
strategy: .speed
)
// Convolution
let conv = try await client.conv2d(input, kernel,
stride: (1, 1),
padding: (1, 1)
)
// Attention
let attention = try await client.attention(query, key, value,
numHeads: 8,
flash: true
)
```
## LLM Inference
```swift
// Single response
let response = try await client.inference(
model: "llama-3-70b",
prompt: "Explain quantum computing",
maxTokens: 512,
temperature: 0.7
)
print(response.result ?? "")
// Streaming with AsyncSequence
for try await chunk in client.inferenceStream(
model: "llama-3-70b",
prompt: "Write a poem"
) {
print(chunk, terminator: "")
}
```
## Configuration
```swift
let config = SynorConfig(
apiKey: "your-api-key",
baseUrl: "https://api.synor.io/compute/v1",
defaultProcessor: .gpu,
defaultPrecision: .fp16,
timeout: 30,
debug: true
)
let client = SynorCompute(config: config)
```
## SwiftUI Integration
```swift
import SwiftUI
import SynorCompute
struct ComputeView: View {
@StateObject private var vm = ComputeViewModel()
var body: some View {
VStack {
if vm.isLoading {
ProgressView()
} else if let result = vm.result {
Text("Result: \(result)")
}
Button("Compute") {
Task { await vm.compute() }
}
}
}
}
@MainActor
class ComputeViewModel: ObservableObject {
@Published var result: String?
@Published var isLoading = false
private let client = SynorCompute(apiKey: "your-api-key")
func compute() async {
isLoading = true
defer { isLoading = false }
do {
let response = try await client.inference(
model: "llama-3-70b",
prompt: "Hello"
)
result = response.result
} catch {
result = "Error: \(error.localizedDescription)"
}
}
}
```
## Error Handling
```swift
do {
let result = try await client.matmul(a, b)
} catch let error as SynorError {
switch error {
case .apiError(let statusCode, let message):
print("API Error \(statusCode): \(message)")
case .networkError(let underlying):
print("Network error: \(underlying)")
case .invalidArgument(let message):
print("Invalid argument: \(message)")
}
} catch {
print("Unexpected error: \(error)")
}
```
## Types
```swift
// Processor types
enum ProcessorType: String, Codable {
case cpu, gpu, tpu, npu, lpu, fpga, auto
}
// Precision
enum Precision: String, Codable {
case fp64, fp32, fp16, bf16, int8, int4
}
// Job status
enum JobStatus: String, Codable {
case pending, running, completed, failed, cancelled
}
```
## Requirements
- iOS 15.0+ / macOS 12.0+ / tvOS 15.0+ / watchOS 8.0+
- Swift 5.9+
## Testing
```bash
swift test
```
## License
MIT