synor/sdk/flutter/lib/src/tensor.dart
Gulshan Yadav 62ec3c92da feat(sdk): add Flutter/Dart SDK for Synor Compute
Complete SDK implementation for Flutter and Dart applications:

lib/src/types.dart:
- Precision, ProcessorType, Priority, JobStatus enums
- SynorConfig for client configuration
- MatMulOptions, Conv2dOptions, AttentionOptions, InferenceOptions
- PricingInfo and UsageStats data classes
- SynorException for error handling

lib/src/tensor.dart:
- Full Tensor class with shape, dtype, and data
- Factory constructors: zeros, ones, rand, randn, eye, linspace, arange
- Operations: reshape, transpose, flatten
- Statistics: sum, mean, std, min, max, argmin, argmax
- Element-wise: add, sub, mul, div, scalar ops
- Activations: relu, sigmoid, tanh, softmax
- JSON serialization with base64-encoded binary data

lib/src/job.dart:
- JobResult with status, result, timing, and cost
- Job class with WebSocket streaming and HTTP polling
- JobStatusUpdate for real-time progress tracking
- JobBatch for parallel job management

lib/src/client.dart:
- SynorCompute main client
- Operations: matmul, conv2d, attention, elementwise, reduce
- LLM inference with streaming support
- Tensor upload/download/delete
- Job management: submit, cancel, list
- Pricing and usage statistics

Platform support: Android, iOS, Linux, macOS, Web, Windows
2026-01-11 14:27:55 +05:30

520 lines
13 KiB
Dart

/// Tensor implementation for Synor Compute SDK
library synor_compute.tensor;
import 'dart:convert';
import 'dart:math' as math;
import 'dart:typed_data';
import 'types.dart';
/// Multi-dimensional tensor for compute operations
class Tensor {
/// Tensor shape (dimensions)
final List<int> shape;
/// Underlying data as Float64List
final Float64List data;
/// Data type
final DType dtype;
/// Unique identifier (assigned by server)
final String? id;
/// Creates a tensor with given shape and data
Tensor({
required this.shape,
required List<double> data,
this.dtype = DType.float64,
this.id,
}) : data = Float64List.fromList(data) {
final expectedSize = shape.fold<int>(1, (a, b) => a * b);
if (this.data.length != expectedSize) {
throw ArgumentError(
'Data length ${this.data.length} does not match shape $shape '
'(expected $expectedSize elements)',
);
}
}
/// Creates a tensor from Float64List
Tensor.fromTypedData({
required this.shape,
required this.data,
this.dtype = DType.float64,
this.id,
}) {
final expectedSize = shape.fold<int>(1, (a, b) => a * b);
if (data.length != expectedSize) {
throw ArgumentError(
'Data length ${data.length} does not match shape $shape '
'(expected $expectedSize elements)',
);
}
}
/// Creates a tensor filled with zeros
factory Tensor.zeros(List<int> shape, {DType dtype = DType.float64}) {
final size = shape.fold<int>(1, (a, b) => a * b);
return Tensor(
shape: shape,
data: List.filled(size, 0.0),
dtype: dtype,
);
}
/// Creates a tensor filled with ones
factory Tensor.ones(List<int> shape, {DType dtype = DType.float64}) {
final size = shape.fold<int>(1, (a, b) => a * b);
return Tensor(
shape: shape,
data: List.filled(size, 1.0),
dtype: dtype,
);
}
/// Creates a tensor filled with a specific value
factory Tensor.full(
List<int> shape,
double value, {
DType dtype = DType.float64,
}) {
final size = shape.fold<int>(1, (a, b) => a * b);
return Tensor(
shape: shape,
data: List.filled(size, value),
dtype: dtype,
);
}
/// Creates a tensor with random values from uniform distribution [0, 1)
factory Tensor.rand(List<int> shape, {DType dtype = DType.float64}) {
final size = shape.fold<int>(1, (a, b) => a * b);
final random = math.Random();
return Tensor(
shape: shape,
data: List.generate(size, (_) => random.nextDouble()),
dtype: dtype,
);
}
/// Creates a tensor with random values from normal distribution
factory Tensor.randn(
List<int> shape, {
double mean = 0.0,
double std = 1.0,
DType dtype = DType.float64,
}) {
final size = shape.fold<int>(1, (a, b) => a * b);
final random = math.Random();
// Box-Muller transform for normal distribution
double nextGaussian() {
final u1 = random.nextDouble();
final u2 = random.nextDouble();
return math.sqrt(-2 * math.log(u1)) * math.cos(2 * math.pi * u2);
}
return Tensor(
shape: shape,
data: List.generate(size, (_) => mean + std * nextGaussian()),
dtype: dtype,
);
}
/// Creates an identity matrix
factory Tensor.eye(int n, {DType dtype = DType.float64}) {
final data = List.filled(n * n, 0.0);
for (var i = 0; i < n; i++) {
data[i * n + i] = 1.0;
}
return Tensor(shape: [n, n], data: data, dtype: dtype);
}
/// Creates a tensor with evenly spaced values
factory Tensor.linspace(
double start,
double end,
int steps, {
DType dtype = DType.float64,
}) {
if (steps < 2) {
throw ArgumentError('Steps must be at least 2');
}
final step = (end - start) / (steps - 1);
return Tensor(
shape: [steps],
data: List.generate(steps, (i) => start + i * step),
dtype: dtype,
);
}
/// Creates a tensor with values in a range
factory Tensor.arange(
double start,
double end, {
double step = 1.0,
DType dtype = DType.float64,
}) {
final data = <double>[];
for (var v = start; v < end; v += step) {
data.add(v);
}
return Tensor(shape: [data.length], data: data, dtype: dtype);
}
/// Creates a tensor from JSON
factory Tensor.fromJson(Map<String, dynamic> json) {
final shape = (json['shape'] as List).cast<int>();
final rawData = json['data'];
List<double> data;
if (rawData is String) {
// Base64-encoded binary data
final bytes = base64Decode(rawData);
data = Float64List.view(bytes.buffer).toList();
} else if (rawData is List) {
data = _flattenList(rawData);
} else {
throw ArgumentError('Invalid tensor data format');
}
return Tensor(
shape: shape,
data: data,
dtype: DType.fromString(json['dtype'] as String? ?? 'float64'),
id: json['id'] as String?,
);
}
/// Flattens a nested list to 1D
static List<double> _flattenList(List<dynamic> nested) {
final result = <double>[];
void flatten(dynamic item) {
if (item is List) {
for (final e in item) {
flatten(e);
}
} else if (item is num) {
result.add(item.toDouble());
}
}
flatten(nested);
return result;
}
/// Number of dimensions
int get ndim => shape.length;
/// Total number of elements
int get size => data.length;
/// Number of bytes
int get nbytes => data.lengthInBytes;
/// Get element at index (for 1D tensors)
double operator [](int index) {
if (ndim != 1) {
throw StateError('Use at() for multi-dimensional indexing');
}
return data[index];
}
/// Get element at multi-dimensional index
double at(List<int> indices) {
if (indices.length != ndim) {
throw ArgumentError(
'Expected $ndim indices, got ${indices.length}',
);
}
var flatIndex = 0;
var stride = 1;
for (var i = ndim - 1; i >= 0; i--) {
if (indices[i] < 0 || indices[i] >= shape[i]) {
throw RangeError('Index ${indices[i]} out of bounds for axis $i '
'with size ${shape[i]}');
}
flatIndex += indices[i] * stride;
stride *= shape[i];
}
return data[flatIndex];
}
/// Reshape tensor to new shape
Tensor reshape(List<int> newShape) {
final newSize = newShape.fold<int>(1, (a, b) => a * b);
if (newSize != size) {
throw ArgumentError(
'Cannot reshape tensor of size $size to shape $newShape '
'(size $newSize)',
);
}
return Tensor.fromTypedData(
shape: newShape,
data: data,
dtype: dtype,
id: id,
);
}
/// Flatten tensor to 1D
Tensor flatten() => reshape([size]);
/// Transpose tensor (swap last two dimensions)
Tensor transpose() {
if (ndim < 2) {
return this;
}
final newShape = List<int>.from(shape);
final tmp = newShape[ndim - 1];
newShape[ndim - 1] = newShape[ndim - 2];
newShape[ndim - 2] = tmp;
final newData = Float64List(size);
final rows = shape[ndim - 2];
final cols = shape[ndim - 1];
final batchSize = size ~/ (rows * cols);
for (var b = 0; b < batchSize; b++) {
final offset = b * rows * cols;
for (var i = 0; i < rows; i++) {
for (var j = 0; j < cols; j++) {
newData[offset + j * rows + i] = data[offset + i * cols + j];
}
}
}
return Tensor.fromTypedData(
shape: newShape,
data: newData,
dtype: dtype,
);
}
/// Sum of all elements
double sum() => data.fold(0.0, (a, b) => a + b);
/// Mean of all elements
double mean() => sum() / size;
/// Standard deviation of all elements
double std() {
final m = mean();
final variance = data.fold(0.0, (sum, x) => sum + (x - m) * (x - m)) / size;
return math.sqrt(variance);
}
/// Minimum value
double min() => data.reduce(math.min);
/// Maximum value
double max() => data.reduce(math.max);
/// Index of minimum value
int argmin() {
var minIdx = 0;
var minVal = data[0];
for (var i = 1; i < size; i++) {
if (data[i] < minVal) {
minVal = data[i];
minIdx = i;
}
}
return minIdx;
}
/// Index of maximum value
int argmax() {
var maxIdx = 0;
var maxVal = data[0];
for (var i = 1; i < size; i++) {
if (data[i] > maxVal) {
maxVal = data[i];
maxIdx = i;
}
}
return maxIdx;
}
/// Element-wise addition
Tensor add(Tensor other) {
_checkShapesMatch(other);
final result = Float64List(size);
for (var i = 0; i < size; i++) {
result[i] = data[i] + other.data[i];
}
return Tensor.fromTypedData(shape: shape, data: result, dtype: dtype);
}
/// Element-wise subtraction
Tensor sub(Tensor other) {
_checkShapesMatch(other);
final result = Float64List(size);
for (var i = 0; i < size; i++) {
result[i] = data[i] - other.data[i];
}
return Tensor.fromTypedData(shape: shape, data: result, dtype: dtype);
}
/// Element-wise multiplication
Tensor mul(Tensor other) {
_checkShapesMatch(other);
final result = Float64List(size);
for (var i = 0; i < size; i++) {
result[i] = data[i] * other.data[i];
}
return Tensor.fromTypedData(shape: shape, data: result, dtype: dtype);
}
/// Element-wise division
Tensor div(Tensor other) {
_checkShapesMatch(other);
final result = Float64List(size);
for (var i = 0; i < size; i++) {
result[i] = data[i] / other.data[i];
}
return Tensor.fromTypedData(shape: shape, data: result, dtype: dtype);
}
/// Scalar operations
Tensor addScalar(double scalar) {
final result = Float64List(size);
for (var i = 0; i < size; i++) {
result[i] = data[i] + scalar;
}
return Tensor.fromTypedData(shape: shape, data: result, dtype: dtype);
}
Tensor mulScalar(double scalar) {
final result = Float64List(size);
for (var i = 0; i < size; i++) {
result[i] = data[i] * scalar;
}
return Tensor.fromTypedData(shape: shape, data: result, dtype: dtype);
}
/// Apply function element-wise
Tensor map(double Function(double) fn) {
final result = Float64List(size);
for (var i = 0; i < size; i++) {
result[i] = fn(data[i]);
}
return Tensor.fromTypedData(shape: shape, data: result, dtype: dtype);
}
/// ReLU activation
Tensor relu() => map((x) => x > 0 ? x : 0);
/// Sigmoid activation
Tensor sigmoid() => map((x) => 1.0 / (1.0 + math.exp(-x)));
/// Tanh activation
Tensor tanh() => map(math.tanh);
/// Softmax (for 1D or last axis of 2D)
Tensor softmax() {
if (ndim == 1) {
final maxVal = max();
final expData = data.map((x) => math.exp(x - maxVal)).toList();
final sumExp = expData.fold(0.0, (a, b) => a + b);
return Tensor(
shape: shape,
data: expData.map((x) => x / sumExp).toList(),
dtype: dtype,
);
} else if (ndim == 2) {
final rows = shape[0];
final cols = shape[1];
final result = Float64List(size);
for (var i = 0; i < rows; i++) {
var maxVal = double.negativeInfinity;
for (var j = 0; j < cols; j++) {
final v = data[i * cols + j];
if (v > maxVal) maxVal = v;
}
var sumExp = 0.0;
for (var j = 0; j < cols; j++) {
final exp = math.exp(data[i * cols + j] - maxVal);
result[i * cols + j] = exp;
sumExp += exp;
}
for (var j = 0; j < cols; j++) {
result[i * cols + j] /= sumExp;
}
}
return Tensor.fromTypedData(shape: shape, data: result, dtype: dtype);
}
throw UnsupportedError('Softmax only supported for 1D and 2D tensors');
}
void _checkShapesMatch(Tensor other) {
if (shape.length != other.shape.length) {
throw ArgumentError('Shape mismatch: $shape vs ${other.shape}');
}
for (var i = 0; i < shape.length; i++) {
if (shape[i] != other.shape[i]) {
throw ArgumentError('Shape mismatch: $shape vs ${other.shape}');
}
}
}
/// Convert to JSON for API serialization
Map<String, dynamic> toJson() => {
'shape': shape,
'data': base64Encode(data.buffer.asUint8List()),
'dtype': dtype.value,
if (id != null) 'id': id,
};
/// Convert to nested list representation
List<dynamic> toNestedList() {
if (ndim == 1) {
return data.toList();
}
List<dynamic> buildNested(int dim, int offset) {
if (dim == ndim - 1) {
return data.sublist(offset, offset + shape[dim]).toList();
}
final stride =
shape.sublist(dim + 1).fold<int>(1, (a, b) => a * b);
return List.generate(
shape[dim],
(i) => buildNested(dim + 1, offset + i * stride),
);
}
return buildNested(0, 0);
}
@override
String toString() {
if (size <= 20) {
return 'Tensor(shape: $shape, data: ${toNestedList()})';
}
return 'Tensor(shape: $shape, dtype: ${dtype.value})';
}
@override
bool operator ==(Object other) {
if (identical(this, other)) return true;
if (other is! Tensor) return false;
if (shape.length != other.shape.length) return false;
for (var i = 0; i < shape.length; i++) {
if (shape[i] != other.shape[i]) return false;
}
for (var i = 0; i < size; i++) {
if (data[i] != other.data[i]) return false;
}
return true;
}
@override
int get hashCode => Object.hash(shape, data);
}