synor/sdk/cpp/src/synor_compute.cpp
Gulshan Yadav 3aff77a799 feat(sdk): add consumer SDKs for Java, Kotlin, Swift, C, C++, C#, Ruby, and Rust
Expands SDK support to 8 additional languages/frameworks:
- Java SDK with Maven/OkHttp/Jackson
- Kotlin SDK with Gradle/Ktor/kotlinx.serialization
- Swift SDK with Swift Package Manager/async-await
- C SDK with CMake/libcurl
- C++ SDK with CMake/Modern C++20
- C# SDK with .NET 8.0/HttpClient
- Ruby SDK with Bundler/Faraday
- Rust SDK with Cargo/reqwest/tokio

All SDKs include:
- Tensor operations (matmul, conv2d, attention)
- LLM inference with streaming support
- Model registry, pricing, and usage APIs
- Builder patterns where idiomatic
- Full type safety
2026-01-11 17:46:22 +05:30

303 lines
9.6 KiB
C++

/**
* Synor Compute SDK - C++ Implementation
*/
#include "synor/compute.hpp"
#include <cmath>
#include <random>
#include <numeric>
#include <algorithm>
#include <stdexcept>
#include <sstream>
namespace synor {
// ============ Tensor Implementation ============
Tensor::Tensor(std::vector<int> shape, std::vector<double> data, Precision dtype)
: shape_(std::move(shape)), data_(std::move(data)), dtype_(dtype) {
size_t expected = 1;
for (int dim : shape_) {
expected *= dim;
}
if (data_.size() != expected) {
throw std::invalid_argument("Data size does not match shape");
}
}
Tensor::Tensor(std::vector<int> shape, std::span<const double> data, Precision dtype)
: shape_(std::move(shape)), data_(data.begin(), data.end()), dtype_(dtype) {
size_t expected = 1;
for (int dim : shape_) {
expected *= dim;
}
if (data_.size() != expected) {
throw std::invalid_argument("Data size does not match shape");
}
}
Tensor Tensor::zeros(std::vector<int> shape, Precision dtype) {
size_t size = 1;
for (int dim : shape) size *= dim;
return Tensor(std::move(shape), std::vector<double>(size, 0.0), dtype);
}
Tensor Tensor::ones(std::vector<int> shape, Precision dtype) {
size_t size = 1;
for (int dim : shape) size *= dim;
return Tensor(std::move(shape), std::vector<double>(size, 1.0), dtype);
}
Tensor Tensor::rand(std::vector<int> shape, Precision dtype) {
size_t size = 1;
for (int dim : shape) size *= dim;
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(0.0, 1.0);
std::vector<double> data(size);
std::generate(data.begin(), data.end(), [&]() { return dis(gen); });
return Tensor(std::move(shape), std::move(data), dtype);
}
Tensor Tensor::randn(std::vector<int> shape, Precision dtype) {
size_t size = 1;
for (int dim : shape) size *= dim;
std::random_device rd;
std::mt19937 gen(rd());
std::normal_distribution<> dis(0.0, 1.0);
std::vector<double> data(size);
std::generate(data.begin(), data.end(), [&]() { return dis(gen); });
return Tensor(std::move(shape), std::move(data), dtype);
}
Tensor Tensor::eye(int n, Precision dtype) {
std::vector<double> data(n * n, 0.0);
for (int i = 0; i < n; i++) {
data[i * n + i] = 1.0;
}
return Tensor({n, n}, std::move(data), dtype);
}
Tensor Tensor::arange(double start, double end, double step) {
int size = static_cast<int>(std::ceil((end - start) / step));
std::vector<double> data(size);
for (int i = 0; i < size; i++) {
data[i] = start + i * step;
}
return Tensor({size}, std::move(data));
}
Tensor Tensor::linspace(double start, double end, int num) {
std::vector<double> data(num);
double step = (end - start) / (num - 1);
for (int i = 0; i < num; i++) {
data[i] = start + i * step;
}
return Tensor({num}, std::move(data));
}
Tensor Tensor::reshape(std::vector<int> new_shape) const {
size_t new_size = 1;
for (int dim : new_shape) new_size *= dim;
if (new_size != size()) {
throw std::invalid_argument("Cannot reshape tensor to incompatible size");
}
return Tensor(std::move(new_shape), data_, dtype_);
}
Tensor Tensor::transpose() const {
if (ndim() != 2) {
throw std::invalid_argument("Transpose only supported for 2D tensors");
}
int rows = shape_[0];
int cols = shape_[1];
std::vector<double> transposed(data_.size());
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
transposed[j * rows + i] = data_[i * cols + j];
}
}
return Tensor({cols, rows}, std::move(transposed), dtype_);
}
double Tensor::mean() const {
return std::accumulate(data_.begin(), data_.end(), 0.0) / data_.size();
}
double Tensor::sum() const {
return std::accumulate(data_.begin(), data_.end(), 0.0);
}
double Tensor::std() const {
double m = mean();
double sum_sq = std::accumulate(data_.begin(), data_.end(), 0.0,
[m](double acc, double x) { return acc + (x - m) * (x - m); });
return std::sqrt(sum_sq / data_.size());
}
double Tensor::max() const {
return *std::max_element(data_.begin(), data_.end());
}
double Tensor::min() const {
return *std::min_element(data_.begin(), data_.end());
}
Tensor Tensor::relu() const {
std::vector<double> result(data_.size());
std::transform(data_.begin(), data_.end(), result.begin(),
[](double x) { return std::max(0.0, x); });
return Tensor(shape_, std::move(result), dtype_);
}
Tensor Tensor::sigmoid() const {
std::vector<double> result(data_.size());
std::transform(data_.begin(), data_.end(), result.begin(),
[](double x) { return 1.0 / (1.0 + std::exp(-x)); });
return Tensor(shape_, std::move(result), dtype_);
}
Tensor Tensor::softmax() const {
double max_val = max();
std::vector<double> exp_vals(data_.size());
std::transform(data_.begin(), data_.end(), exp_vals.begin(),
[max_val](double x) { return std::exp(x - max_val); });
double sum = std::accumulate(exp_vals.begin(), exp_vals.end(), 0.0);
std::transform(exp_vals.begin(), exp_vals.end(), exp_vals.begin(),
[sum](double x) { return x / sum; });
return Tensor(shape_, std::move(exp_vals), dtype_);
}
double Tensor::operator()(std::initializer_list<int> indices) const {
if (indices.size() != shape_.size()) {
throw std::invalid_argument("Index dimensions must match tensor dimensions");
}
size_t idx = 0;
size_t stride = 1;
auto it = indices.end();
for (int i = static_cast<int>(shape_.size()) - 1; i >= 0; i--) {
--it;
idx += *it * stride;
stride *= shape_[i];
}
return data_[idx];
}
bool Tensor::operator==(const Tensor& other) const {
return shape_ == other.shape_ && data_ == other.data_ && dtype_ == other.dtype_;
}
// ============ ModelInfo Implementation ============
std::string ModelInfo::formatted_parameters() const {
if (!parameters) return "Unknown";
int64_t p = *parameters;
if (p >= 1'000'000'000) {
return std::to_string(p / 1'000'000'000) + "B";
} else if (p >= 1'000'000) {
return std::to_string(p / 1'000'000) + "M";
} else if (p >= 1'000) {
return std::to_string(p / 1'000) + "K";
}
return std::to_string(p);
}
// ============ Utility Functions ============
std::string_view to_string(ProcessorType type) {
switch (type) {
case ProcessorType::CPU: return "cpu";
case ProcessorType::GPU: return "gpu";
case ProcessorType::TPU: return "tpu";
case ProcessorType::NPU: return "npu";
case ProcessorType::LPU: return "lpu";
case ProcessorType::FPGA: return "fpga";
case ProcessorType::DSP: return "dsp";
case ProcessorType::WebGPU: return "webgpu";
case ProcessorType::WASM: return "wasm";
case ProcessorType::Auto: return "auto";
}
return "unknown";
}
std::string_view to_string(Precision precision) {
switch (precision) {
case Precision::FP64: return "fp64";
case Precision::FP32: return "fp32";
case Precision::FP16: return "fp16";
case Precision::BF16: return "bf16";
case Precision::INT8: return "int8";
case Precision::INT4: return "int4";
}
return "unknown";
}
std::string_view to_string(Priority priority) {
switch (priority) {
case Priority::Critical: return "critical";
case Priority::High: return "high";
case Priority::Normal: return "normal";
case Priority::Low: return "low";
case Priority::Background: return "background";
}
return "unknown";
}
std::string_view to_string(JobStatus status) {
switch (status) {
case JobStatus::Pending: return "pending";
case JobStatus::Queued: return "queued";
case JobStatus::Running: return "running";
case JobStatus::Completed: return "completed";
case JobStatus::Failed: return "failed";
case JobStatus::Cancelled: return "cancelled";
}
return "unknown";
}
std::string_view to_string(ModelCategory category) {
switch (category) {
case ModelCategory::LLM: return "llm";
case ModelCategory::Embedding: return "embedding";
case ModelCategory::ImageGeneration: return "image_generation";
case ModelCategory::ImageClassification: return "image_classification";
case ModelCategory::ObjectDetection: return "object_detection";
case ModelCategory::SpeechToText: return "speech_to_text";
case ModelCategory::TextToSpeech: return "text_to_speech";
case ModelCategory::Code: return "code";
case ModelCategory::Custom: return "custom";
}
return "unknown";
}
std::optional<ProcessorType> processor_from_string(std::string_view str) {
if (str == "cpu") return ProcessorType::CPU;
if (str == "gpu") return ProcessorType::GPU;
if (str == "tpu") return ProcessorType::TPU;
if (str == "npu") return ProcessorType::NPU;
if (str == "lpu") return ProcessorType::LPU;
if (str == "fpga") return ProcessorType::FPGA;
if (str == "dsp") return ProcessorType::DSP;
if (str == "webgpu") return ProcessorType::WebGPU;
if (str == "wasm") return ProcessorType::WASM;
if (str == "auto") return ProcessorType::Auto;
return std::nullopt;
}
std::optional<Precision> precision_from_string(std::string_view str) {
if (str == "fp64") return Precision::FP64;
if (str == "fp32") return Precision::FP32;
if (str == "fp16") return Precision::FP16;
if (str == "bf16") return Precision::BF16;
if (str == "int8") return Precision::INT8;
if (str == "int4") return Precision::INT4;
return std::nullopt;
}
} // namespace synor