synor/sdk/flutter/lib/synor_compute.dart
Gulshan Yadav cb071a7a3b feat(sdk/flutter): add dataset upload APIs and comprehensive examples
Add comprehensive dataset management to the Flutter SDK including:
- Dataset formats: JSONL, CSV, Parquet, Arrow, HuggingFace, TFRecord, WebDataset, Text, ImageFolder, Custom
- Dataset types: text completion, instruction tuning, chat, Q&A, classification, NER, vision, audio
- Upload methods: uploadDataset, uploadDatasetFromFile, createDatasetFromRecords
- Management APIs: listDatasets, getDataset, deleteDataset
- Dataset preprocessing: splitting, shuffling, deduplication, tokenization
- Complete examples showing all formats and use cases
2026-01-11 16:47:47 +05:30

111 lines
3 KiB
Dart

/// Synor Compute SDK for Flutter/Dart
///
/// A high-performance SDK for distributed heterogeneous computing.
/// Supports CPU, GPU, TPU, NPU, LPU, FPGA, DSP, WebGPU, and WASM processors.
///
/// ## Quick Start
///
/// ```dart
/// import 'package:synor_compute/synor_compute.dart';
///
/// void main() async {
/// // Create client
/// final client = SynorCompute(apiKey: 'your-api-key');
///
/// // Matrix multiplication
/// final a = Tensor.rand([512, 512]);
/// final b = Tensor.rand([512, 512]);
/// final result = await client.matmul(a, b, options: MatMulOptions(
/// precision: Precision.fp16,
/// processor: ProcessorType.gpu,
/// ));
///
/// print('Result shape: ${result.result!.shape}');
/// print('Execution time: ${result.executionTimeMs}ms');
///
/// // LLM Inference
/// final response = await client.inference(
/// 'llama-3-70b',
/// 'Explain quantum computing',
/// options: InferenceOptions(maxTokens: 256),
/// );
/// print(response.result);
///
/// // Streaming inference
/// await for (final token in client.inferenceStream(
/// 'llama-3-70b',
/// 'Write a haiku about computing',
/// )) {
/// stdout.write(token);
/// }
///
/// // Clean up
/// client.dispose();
/// }
/// ```
///
/// ## Features
///
/// - **Matrix Operations**: matmul, conv2d, attention, elementwise, reduce
/// - **LLM Inference**: Standard and streaming inference
/// - **Tensor Management**: Upload, download, and delete tensors
/// - **Job Management**: Submit, poll, cancel, and list jobs
/// - **Pricing**: Get real-time pricing for all processor types
/// - **Usage Statistics**: Track compute usage and costs
///
/// ## Supported Processors
///
/// | Processor | Best For |
/// |-----------|----------|
/// | CPU | General compute, small batches |
/// | GPU | Large matrix operations, training |
/// | TPU | Tensor operations, inference |
/// | NPU | Neural network inference |
/// | LPU | Large language model inference |
/// | FPGA | Custom operations, low latency |
/// | DSP | Signal processing |
/// | WebGPU | Browser-based compute |
/// | WASM | Portable compute |
library synor_compute;
export 'src/types.dart'
show
Precision,
ProcessorType,
Priority,
JobStatus,
BalancingStrategy,
DType,
SynorConfig,
MatMulOptions,
Conv2dOptions,
AttentionOptions,
InferenceOptions,
PricingInfo,
UsageStats,
SynorException,
// Model types
ModelCategory,
ModelFormat,
MlFramework,
ModelInfo,
ModelUploadOptions,
ModelUploadResult,
// Training types
TrainingOptions,
TrainingResult,
TrainingProgress,
// Dataset types
DatasetFormat,
DatasetType,
DatasetUploadOptions,
DatasetSplit,
DatasetPreprocessing,
DatasetUploadResult,
DatasetInfo;
export 'src/tensor.dart' show Tensor;
export 'src/job.dart' show JobResult, JobStatusUpdate, Job, JobBatch;
export 'src/client.dart' show SynorCompute;