Expands SDK support to 8 additional languages/frameworks: - Java SDK with Maven/OkHttp/Jackson - Kotlin SDK with Gradle/Ktor/kotlinx.serialization - Swift SDK with Swift Package Manager/async-await - C SDK with CMake/libcurl - C++ SDK with CMake/Modern C++20 - C# SDK with .NET 8.0/HttpClient - Ruby SDK with Bundler/Faraday - Rust SDK with Cargo/reqwest/tokio All SDKs include: - Tensor operations (matmul, conv2d, attention) - LLM inference with streaming support - Model registry, pricing, and usage APIs - Builder patterns where idiomatic - Full type safety
212 lines
4.8 KiB
Ruby
212 lines
4.8 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
module SynorCompute
|
|
# Multi-dimensional tensor for compute operations.
|
|
#
|
|
# @example
|
|
# # Create a 2D tensor
|
|
# matrix = Tensor.new([2, 3], [1, 2, 3, 4, 5, 6])
|
|
#
|
|
# # Create random tensor
|
|
# random = Tensor.rand([512, 512])
|
|
#
|
|
# # Operations
|
|
# mean = random.mean
|
|
# transposed = matrix.transpose
|
|
#
|
|
class Tensor
|
|
attr_reader :shape, :data, :dtype
|
|
|
|
def initialize(shape, data, dtype: Precision::FP32)
|
|
expected_size = shape.reduce(1, :*)
|
|
unless data.size == expected_size
|
|
raise ArgumentError, "Data size #{data.size} does not match shape #{shape}"
|
|
end
|
|
|
|
@shape = shape.dup.freeze
|
|
@data = data.dup.freeze
|
|
@dtype = dtype
|
|
end
|
|
|
|
# @return [Integer] total number of elements
|
|
def size
|
|
@data.size
|
|
end
|
|
|
|
# @return [Integer] number of dimensions
|
|
def ndim
|
|
@shape.size
|
|
end
|
|
|
|
# Get element at indices
|
|
def [](*indices)
|
|
raise ArgumentError, "Index dimensions must match tensor dimensions" unless indices.size == @shape.size
|
|
|
|
idx = 0
|
|
stride = 1
|
|
(@shape.size - 1).downto(0) do |i|
|
|
idx += indices[i] * stride
|
|
stride *= @shape[i]
|
|
end
|
|
@data[idx]
|
|
end
|
|
|
|
# Factory methods
|
|
|
|
def self.of(data)
|
|
if data.first.is_a?(Array)
|
|
# 2D array
|
|
rows = data.size
|
|
cols = data.first.size
|
|
flat = data.flatten
|
|
new([rows, cols], flat)
|
|
else
|
|
# 1D array
|
|
new([data.size], data)
|
|
end
|
|
end
|
|
|
|
def self.zeros(*shape)
|
|
shape = shape.first if shape.size == 1 && shape.first.is_a?(Array)
|
|
size = shape.reduce(1, :*)
|
|
new(shape, Array.new(size, 0.0))
|
|
end
|
|
|
|
def self.ones(*shape)
|
|
shape = shape.first if shape.size == 1 && shape.first.is_a?(Array)
|
|
size = shape.reduce(1, :*)
|
|
new(shape, Array.new(size, 1.0))
|
|
end
|
|
|
|
def self.rand(*shape)
|
|
shape = shape.first if shape.size == 1 && shape.first.is_a?(Array)
|
|
size = shape.reduce(1, :*)
|
|
new(shape, Array.new(size) { Random.rand })
|
|
end
|
|
|
|
def self.randn(*shape)
|
|
shape = shape.first if shape.size == 1 && shape.first.is_a?(Array)
|
|
size = shape.reduce(1, :*)
|
|
new(shape, Array.new(size) do
|
|
# Box-Muller transform
|
|
u1 = Random.rand
|
|
u2 = Random.rand
|
|
Math.sqrt(-2 * Math.log(u1)) * Math.cos(2 * Math::PI * u2)
|
|
end)
|
|
end
|
|
|
|
def self.eye(n)
|
|
data = Array.new(n * n, 0.0)
|
|
n.times { |i| data[i * n + i] = 1.0 }
|
|
new([n, n], data)
|
|
end
|
|
|
|
def self.arange(start, stop, step = 1.0)
|
|
size = ((stop - start) / step).ceil
|
|
data = Array.new(size) { |i| start + i * step }
|
|
new([size], data)
|
|
end
|
|
|
|
def self.linspace(start, stop, num)
|
|
step = (stop - start).to_f / (num - 1)
|
|
data = Array.new(num) { |i| start + i * step }
|
|
new([num], data)
|
|
end
|
|
|
|
# Operations
|
|
|
|
def reshape(*new_shape)
|
|
new_shape = new_shape.first if new_shape.size == 1 && new_shape.first.is_a?(Array)
|
|
new_size = new_shape.reduce(1, :*)
|
|
raise ArgumentError, "Cannot reshape tensor of size #{size} to #{new_shape}" unless new_size == size
|
|
|
|
Tensor.new(new_shape, @data.dup, dtype: @dtype)
|
|
end
|
|
|
|
def transpose
|
|
raise "Transpose only supported for 2D tensors" unless ndim == 2
|
|
|
|
rows, cols = @shape
|
|
transposed = Array.new(size)
|
|
rows.times do |i|
|
|
cols.times do |j|
|
|
transposed[j * rows + i] = @data[i * cols + j]
|
|
end
|
|
end
|
|
Tensor.new([cols, rows], transposed, dtype: @dtype)
|
|
end
|
|
|
|
# Reductions
|
|
|
|
def mean
|
|
@data.sum / @data.size.to_f
|
|
end
|
|
|
|
def sum
|
|
@data.sum
|
|
end
|
|
|
|
def std
|
|
m = mean
|
|
variance = @data.map { |x| (x - m)**2 }.sum / @data.size
|
|
Math.sqrt(variance)
|
|
end
|
|
|
|
def max
|
|
@data.max
|
|
end
|
|
|
|
def min
|
|
@data.min
|
|
end
|
|
|
|
# Activations
|
|
|
|
def relu
|
|
Tensor.new(@shape, @data.map { |x| [0, x].max }, dtype: @dtype)
|
|
end
|
|
|
|
def sigmoid
|
|
Tensor.new(@shape, @data.map { |x| 1.0 / (1.0 + Math.exp(-x)) }, dtype: @dtype)
|
|
end
|
|
|
|
def softmax
|
|
max_val = max
|
|
exp_values = @data.map { |x| Math.exp(x - max_val) }
|
|
sum = exp_values.sum
|
|
Tensor.new(@shape, exp_values.map { |x| x / sum }, dtype: @dtype)
|
|
end
|
|
|
|
# Conversion
|
|
|
|
def to_nested_array
|
|
case ndim
|
|
when 1
|
|
@data.dup
|
|
when 2
|
|
rows, cols = @shape
|
|
Array.new(rows) { |i| @data[i * cols, cols] }
|
|
else
|
|
raise "to_nested_array only supports 1D and 2D tensors"
|
|
end
|
|
end
|
|
|
|
def to_h
|
|
{
|
|
shape: @shape,
|
|
data: @data,
|
|
dtype: @dtype.to_s
|
|
}
|
|
end
|
|
|
|
def ==(other)
|
|
return false unless other.is_a?(Tensor)
|
|
|
|
@shape == other.shape && @data == other.data && @dtype == other.dtype
|
|
end
|
|
|
|
def to_s
|
|
"Tensor(shape=#{@shape}, dtype=#{@dtype})"
|
|
end
|
|
end
|
|
end
|