synor/sdk/c/include/synor_compute.h
Gulshan Yadav 3aff77a799 feat(sdk): add consumer SDKs for Java, Kotlin, Swift, C, C++, C#, Ruby, and Rust
Expands SDK support to 8 additional languages/frameworks:
- Java SDK with Maven/OkHttp/Jackson
- Kotlin SDK with Gradle/Ktor/kotlinx.serialization
- Swift SDK with Swift Package Manager/async-await
- C SDK with CMake/libcurl
- C++ SDK with CMake/Modern C++20
- C# SDK with .NET 8.0/HttpClient
- Ruby SDK with Bundler/Faraday
- Rust SDK with Cargo/reqwest/tokio

All SDKs include:
- Tensor operations (matmul, conv2d, attention)
- LLM inference with streaming support
- Model registry, pricing, and usage APIs
- Builder patterns where idiomatic
- Full type safety
2026-01-11 17:46:22 +05:30

481 lines
11 KiB
C

/**
* Synor Compute SDK - C Client
*
* Access distributed heterogeneous compute resources (CPU, GPU, TPU, NPU, LPU, FPGA, DSP)
* for AI/ML workloads at 90% cost reduction compared to traditional cloud.
*
* Example:
* ```c
* #include <synor_compute.h>
*
* int main() {
* // Create client
* synor_client_t* client = synor_create("your-api-key");
*
* // Create tensors
* int shape[] = {512, 512};
* synor_tensor_t* a = synor_tensor_rand(shape, 2);
* synor_tensor_t* b = synor_tensor_rand(shape, 2);
*
* // Matrix multiplication on GPU
* synor_matmul_options_t opts = {
* .precision = SYNOR_PRECISION_FP16,
* .processor = SYNOR_PROCESSOR_GPU
* };
* synor_job_result_t* result = synor_matmul(client, a, b, &opts);
*
* if (result->status == SYNOR_STATUS_COMPLETED) {
* printf("Execution time: %ldms\n", result->execution_time_ms);
* }
*
* // Cleanup
* synor_job_result_free(result);
* synor_tensor_free(a);
* synor_tensor_free(b);
* synor_destroy(client);
*
* return 0;
* }
* ```
*/
#ifndef SYNOR_COMPUTE_H
#define SYNOR_COMPUTE_H
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
/* ============ Version ============ */
#define SYNOR_VERSION_MAJOR 0
#define SYNOR_VERSION_MINOR 1
#define SYNOR_VERSION_PATCH 0
#define SYNOR_VERSION "0.1.0"
/* ============ Enums ============ */
/** Processor types for heterogeneous computing */
typedef enum {
SYNOR_PROCESSOR_CPU = 0,
SYNOR_PROCESSOR_GPU,
SYNOR_PROCESSOR_TPU,
SYNOR_PROCESSOR_NPU,
SYNOR_PROCESSOR_LPU,
SYNOR_PROCESSOR_FPGA,
SYNOR_PROCESSOR_DSP,
SYNOR_PROCESSOR_WEBGPU,
SYNOR_PROCESSOR_WASM,
SYNOR_PROCESSOR_AUTO
} synor_processor_t;
/** Precision levels for compute operations */
typedef enum {
SYNOR_PRECISION_FP64 = 0,
SYNOR_PRECISION_FP32,
SYNOR_PRECISION_FP16,
SYNOR_PRECISION_BF16,
SYNOR_PRECISION_INT8,
SYNOR_PRECISION_INT4
} synor_precision_t;
/** Task priority levels */
typedef enum {
SYNOR_PRIORITY_CRITICAL = 0,
SYNOR_PRIORITY_HIGH,
SYNOR_PRIORITY_NORMAL,
SYNOR_PRIORITY_LOW,
SYNOR_PRIORITY_BACKGROUND
} synor_priority_t;
/** Job status */
typedef enum {
SYNOR_STATUS_PENDING = 0,
SYNOR_STATUS_QUEUED,
SYNOR_STATUS_RUNNING,
SYNOR_STATUS_COMPLETED,
SYNOR_STATUS_FAILED,
SYNOR_STATUS_CANCELLED
} synor_status_t;
/** Model categories */
typedef enum {
SYNOR_MODEL_LLM = 0,
SYNOR_MODEL_EMBEDDING,
SYNOR_MODEL_IMAGE_GENERATION,
SYNOR_MODEL_IMAGE_CLASSIFICATION,
SYNOR_MODEL_OBJECT_DETECTION,
SYNOR_MODEL_SPEECH_TO_TEXT,
SYNOR_MODEL_TEXT_TO_SPEECH,
SYNOR_MODEL_CODE,
SYNOR_MODEL_CUSTOM
} synor_model_category_t;
/** Error codes */
typedef enum {
SYNOR_OK = 0,
SYNOR_ERROR_INVALID_ARGUMENT,
SYNOR_ERROR_OUT_OF_MEMORY,
SYNOR_ERROR_NETWORK,
SYNOR_ERROR_API,
SYNOR_ERROR_TIMEOUT,
SYNOR_ERROR_CLIENT_CLOSED
} synor_error_t;
/* ============ Types ============ */
/** Opaque client handle */
typedef struct synor_client synor_client_t;
/** Tensor structure */
typedef struct {
int* shape;
int ndim;
double* data;
size_t size;
synor_precision_t dtype;
} synor_tensor_t;
/** Job result structure */
typedef struct {
char* job_id;
synor_status_t status;
synor_tensor_t* result_tensor;
char* result_string;
char* error;
int64_t execution_time_ms;
synor_processor_t processor;
double cost;
} synor_job_result_t;
/** Configuration structure */
typedef struct {
const char* api_key;
const char* base_url;
synor_processor_t default_processor;
synor_precision_t default_precision;
synor_priority_t default_priority;
int timeout_ms;
bool debug;
} synor_config_t;
/** Matrix multiplication options */
typedef struct {
synor_precision_t precision;
synor_processor_t processor;
synor_priority_t priority;
} synor_matmul_options_t;
/** Convolution options */
typedef struct {
int stride[2];
int padding[2];
synor_precision_t precision;
synor_processor_t processor;
} synor_conv2d_options_t;
/** Attention options */
typedef struct {
int num_heads;
bool flash;
synor_precision_t precision;
synor_processor_t processor;
} synor_attention_options_t;
/** Inference options */
typedef struct {
int max_tokens;
double temperature;
double top_p;
int top_k;
synor_processor_t processor;
} synor_inference_options_t;
/** Model info structure */
typedef struct {
char* id;
char* name;
char* description;
synor_model_category_t category;
int64_t parameters;
int context_length;
char* format;
synor_processor_t recommended_processor;
char* license;
char* cid;
} synor_model_info_t;
/** Pricing info structure */
typedef struct {
synor_processor_t processor;
double price_per_second;
int available_units;
double utilization_percent;
double aws_equivalent_price;
double savings_percent;
} synor_pricing_info_t;
/** Usage stats structure */
typedef struct {
int total_jobs;
int completed_jobs;
int failed_jobs;
double total_compute_seconds;
double total_cost;
} synor_usage_stats_t;
/** Stream callback for inference */
typedef void (*synor_stream_callback_t)(const char* token, void* user_data);
/* ============ Client Functions ============ */
/**
* Create a new client with API key.
* @param api_key API key for authentication
* @return Client handle or NULL on error
*/
synor_client_t* synor_create(const char* api_key);
/**
* Create a new client with configuration.
* @param config Configuration structure
* @return Client handle or NULL on error
*/
synor_client_t* synor_create_with_config(const synor_config_t* config);
/**
* Destroy client and free resources.
* @param client Client handle
*/
void synor_destroy(synor_client_t* client);
/**
* Get default configuration.
* @param config Configuration structure to fill
*/
void synor_config_default(synor_config_t* config);
/* ============ Matrix Operations ============ */
/**
* Perform matrix multiplication.
* @param client Client handle
* @param a First tensor
* @param b Second tensor
* @param options Options (can be NULL for defaults)
* @return Job result (must be freed with synor_job_result_free)
*/
synor_job_result_t* synor_matmul(
synor_client_t* client,
const synor_tensor_t* a,
const synor_tensor_t* b,
const synor_matmul_options_t* options
);
/**
* Perform 2D convolution.
*/
synor_job_result_t* synor_conv2d(
synor_client_t* client,
const synor_tensor_t* input,
const synor_tensor_t* kernel,
const synor_conv2d_options_t* options
);
/**
* Perform attention computation.
*/
synor_job_result_t* synor_attention(
synor_client_t* client,
const synor_tensor_t* query,
const synor_tensor_t* key,
const synor_tensor_t* value,
const synor_attention_options_t* options
);
/* ============ LLM Inference ============ */
/**
* Run inference on a model.
* @param client Client handle
* @param model Model name or CID
* @param prompt Input prompt
* @param options Options (can be NULL for defaults)
* @return Job result (must be freed with synor_job_result_free)
*/
synor_job_result_t* synor_inference(
synor_client_t* client,
const char* model,
const char* prompt,
const synor_inference_options_t* options
);
/**
* Run streaming inference.
* @param client Client handle
* @param model Model name or CID
* @param prompt Input prompt
* @param options Options (can be NULL for defaults)
* @param callback Callback for each token
* @param user_data User data passed to callback
* @return Error code
*/
synor_error_t synor_inference_stream(
synor_client_t* client,
const char* model,
const char* prompt,
const synor_inference_options_t* options,
synor_stream_callback_t callback,
void* user_data
);
/* ============ Model Registry ============ */
/**
* List available models.
* @param client Client handle
* @param category Filter by category (or -1 for all)
* @param models Output array (must be freed with synor_model_info_array_free)
* @param count Output count
* @return Error code
*/
synor_error_t synor_list_models(
synor_client_t* client,
synor_model_category_t category,
synor_model_info_t** models,
size_t* count
);
/**
* Get model by ID.
*/
synor_model_info_t* synor_get_model(
synor_client_t* client,
const char* model_id
);
/**
* Search models.
*/
synor_error_t synor_search_models(
synor_client_t* client,
const char* query,
synor_model_info_t** models,
size_t* count
);
/* ============ Pricing & Usage ============ */
/**
* Get pricing information.
*/
synor_error_t synor_get_pricing(
synor_client_t* client,
synor_pricing_info_t** pricing,
size_t* count
);
/**
* Get usage statistics.
*/
synor_usage_stats_t* synor_get_usage(synor_client_t* client);
/* ============ Health Check ============ */
/**
* Check service health.
* @return true if healthy, false otherwise
*/
bool synor_health_check(synor_client_t* client);
/* ============ Tensor Functions ============ */
/**
* Create tensor from data.
*/
synor_tensor_t* synor_tensor_create(
const int* shape,
int ndim,
const double* data,
synor_precision_t dtype
);
/** Create tensor filled with zeros */
synor_tensor_t* synor_tensor_zeros(const int* shape, int ndim);
/** Create tensor filled with ones */
synor_tensor_t* synor_tensor_ones(const int* shape, int ndim);
/** Create tensor with uniform random values [0, 1) */
synor_tensor_t* synor_tensor_rand(const int* shape, int ndim);
/** Create tensor with normal random values */
synor_tensor_t* synor_tensor_randn(const int* shape, int ndim);
/** Create identity matrix */
synor_tensor_t* synor_tensor_eye(int n);
/** Reshape tensor */
synor_tensor_t* synor_tensor_reshape(const synor_tensor_t* tensor, const int* new_shape, int new_ndim);
/** Transpose 2D tensor */
synor_tensor_t* synor_tensor_transpose(const synor_tensor_t* tensor);
/** Get tensor mean */
double synor_tensor_mean(const synor_tensor_t* tensor);
/** Get tensor sum */
double synor_tensor_sum(const synor_tensor_t* tensor);
/** Get tensor std */
double synor_tensor_std(const synor_tensor_t* tensor);
/** Get tensor max */
double synor_tensor_max(const synor_tensor_t* tensor);
/** Get tensor min */
double synor_tensor_min(const synor_tensor_t* tensor);
/** Free tensor */
void synor_tensor_free(synor_tensor_t* tensor);
/* ============ Memory Management ============ */
/** Free job result */
void synor_job_result_free(synor_job_result_t* result);
/** Free model info */
void synor_model_info_free(synor_model_info_t* info);
/** Free model info array */
void synor_model_info_array_free(synor_model_info_t* models, size_t count);
/** Free pricing info array */
void synor_pricing_info_array_free(synor_pricing_info_t* pricing, size_t count);
/** Free usage stats */
void synor_usage_stats_free(synor_usage_stats_t* stats);
/* ============ Utility Functions ============ */
/** Get error string */
const char* synor_error_string(synor_error_t error);
/** Get processor type string */
const char* synor_processor_string(synor_processor_t processor);
/** Get precision string */
const char* synor_precision_string(synor_precision_t precision);
/** Get status string */
const char* synor_status_string(synor_status_t status);
#ifdef __cplusplus
}
#endif
#endif /* SYNOR_COMPUTE_H */