Expands SDK support to 8 additional languages/frameworks: - Java SDK with Maven/OkHttp/Jackson - Kotlin SDK with Gradle/Ktor/kotlinx.serialization - Swift SDK with Swift Package Manager/async-await - C SDK with CMake/libcurl - C++ SDK with CMake/Modern C++20 - C# SDK with .NET 8.0/HttpClient - Ruby SDK with Bundler/Faraday - Rust SDK with Cargo/reqwest/tokio All SDKs include: - Tensor operations (matmul, conv2d, attention) - LLM inference with streaming support - Model registry, pricing, and usage APIs - Builder patterns where idiomatic - Full type safety
48 lines
1.3 KiB
Ruby
48 lines
1.3 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
require_relative "synor_compute/version"
|
|
require_relative "synor_compute/types"
|
|
require_relative "synor_compute/tensor"
|
|
require_relative "synor_compute/client"
|
|
|
|
# Synor Compute SDK for Ruby
|
|
#
|
|
# Access distributed heterogeneous compute resources (CPU, GPU, TPU, NPU, LPU, FPGA, DSP)
|
|
# for AI/ML workloads at 90% cost reduction compared to traditional cloud.
|
|
#
|
|
# @example Quick Start
|
|
# require 'synor_compute'
|
|
#
|
|
# # Create client
|
|
# client = SynorCompute::Client.new(api_key: 'your-api-key')
|
|
#
|
|
# # Matrix multiplication on GPU
|
|
# a = SynorCompute::Tensor.rand([512, 512])
|
|
# b = SynorCompute::Tensor.rand([512, 512])
|
|
# result = client.matmul(a, b, processor: :gpu, precision: :fp16)
|
|
#
|
|
# if result.success?
|
|
# puts "Time: #{result.execution_time_ms}ms"
|
|
# end
|
|
#
|
|
# # LLM inference
|
|
# response = client.inference('llama-3-70b', 'Explain quantum computing')
|
|
# puts response.result
|
|
#
|
|
# # Streaming inference
|
|
# client.inference_stream('llama-3-70b', 'Write a poem') do |token|
|
|
# print token
|
|
# end
|
|
#
|
|
module SynorCompute
|
|
class Error < StandardError; end
|
|
class ApiError < Error
|
|
attr_reader :status_code
|
|
|
|
def initialize(message, status_code: nil)
|
|
super(message)
|
|
@status_code = status_code
|
|
end
|
|
end
|
|
class ClientClosedError < Error; end
|
|
end
|