feat(database): complete Phase 10 Database Gateway
Add HTTP REST API gateway for Synor Database L2: - Gateway server with Axum HTTP framework - API key authentication with permissions and rate limiting - Full REST endpoints for all database models: - Key-Value: GET/PUT/DELETE /kv/:key, POST /kv/batch - Documents: CRUD operations, MongoDB-style queries - Vectors: embedding insert, similarity search - Time-series: metrics recording and queries - Usage metering for billing integration - CORS and request timeout configuration All 51 tests passing. Phase 10 now complete (100%).
This commit is contained in:
parent
78c226a098
commit
ab4c967a97
8 changed files with 1865 additions and 14 deletions
|
|
@ -26,12 +26,16 @@ hex.workspace = true
|
|||
blake3.workspace = true
|
||||
|
||||
# Async
|
||||
tokio = { workspace = true, features = ["sync", "rt-multi-thread"] }
|
||||
tokio = { workspace = true, features = ["sync", "rt-multi-thread", "net"] }
|
||||
|
||||
# Data structures
|
||||
lru = "0.12"
|
||||
indexmap = "2.2"
|
||||
|
||||
# HTTP Gateway
|
||||
axum.workspace = true
|
||||
tower-http = { version = "0.5", features = ["cors", "trace"] }
|
||||
|
||||
# Vector operations (for AI/RAG)
|
||||
# Using pure Rust for portability
|
||||
|
||||
|
|
|
|||
336
crates/synor-database/src/gateway/auth.rs
Normal file
336
crates/synor-database/src/gateway/auth.rs
Normal file
|
|
@ -0,0 +1,336 @@
|
|||
//! Authentication and authorization for Database Gateway.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use parking_lot::RwLock;
|
||||
|
||||
/// API key for authentication.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ApiKey {
|
||||
/// Key identifier (public).
|
||||
pub key_id: String,
|
||||
/// Secret key (hashed).
|
||||
pub secret_hash: [u8; 32],
|
||||
/// Owner address.
|
||||
pub owner: [u8; 32],
|
||||
/// Database access permissions.
|
||||
pub databases: Vec<String>,
|
||||
/// Permission level.
|
||||
pub permissions: Permissions,
|
||||
/// Rate limit (requests per minute).
|
||||
pub rate_limit: u32,
|
||||
/// Created timestamp.
|
||||
pub created_at: u64,
|
||||
/// Expiry timestamp (0 = no expiry).
|
||||
pub expires_at: u64,
|
||||
/// Whether the key is active.
|
||||
pub active: bool,
|
||||
}
|
||||
|
||||
impl ApiKey {
|
||||
/// Creates a new API key.
|
||||
pub fn new(owner: [u8; 32], secret: &str) -> Self {
|
||||
let key_id = generate_key_id();
|
||||
let secret_hash = *blake3::hash(secret.as_bytes()).as_bytes();
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
|
||||
Self {
|
||||
key_id,
|
||||
secret_hash,
|
||||
owner,
|
||||
databases: Vec::new(),
|
||||
permissions: Permissions::default(),
|
||||
rate_limit: 1000,
|
||||
created_at: now,
|
||||
expires_at: 0,
|
||||
active: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Validates a secret against this key.
|
||||
pub fn validate_secret(&self, secret: &str) -> bool {
|
||||
let hash = *blake3::hash(secret.as_bytes()).as_bytes();
|
||||
self.secret_hash == hash && self.active && !self.is_expired()
|
||||
}
|
||||
|
||||
/// Checks if the key has expired.
|
||||
pub fn is_expired(&self) -> bool {
|
||||
if self.expires_at == 0 {
|
||||
return false;
|
||||
}
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
now > self.expires_at
|
||||
}
|
||||
|
||||
/// Checks if the key can access a database.
|
||||
pub fn can_access_database(&self, database: &str) -> bool {
|
||||
self.databases.is_empty() || self.databases.contains(&database.to_string())
|
||||
}
|
||||
|
||||
/// Checks if the key can perform an operation.
|
||||
pub fn can_perform(&self, op: Operation) -> bool {
|
||||
match op {
|
||||
Operation::Read => self.permissions.read,
|
||||
Operation::Write => self.permissions.write,
|
||||
Operation::Delete => self.permissions.delete,
|
||||
Operation::Admin => self.permissions.admin,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Permission levels.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Permissions {
|
||||
/// Can read data.
|
||||
pub read: bool,
|
||||
/// Can write data.
|
||||
pub write: bool,
|
||||
/// Can delete data.
|
||||
pub delete: bool,
|
||||
/// Admin operations (create/drop databases).
|
||||
pub admin: bool,
|
||||
}
|
||||
|
||||
impl Default for Permissions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
read: true,
|
||||
write: true,
|
||||
delete: false,
|
||||
admin: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Permissions {
|
||||
/// Full permissions.
|
||||
pub fn full() -> Self {
|
||||
Self {
|
||||
read: true,
|
||||
write: true,
|
||||
delete: true,
|
||||
admin: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Read-only permissions.
|
||||
pub fn read_only() -> Self {
|
||||
Self {
|
||||
read: true,
|
||||
write: false,
|
||||
delete: false,
|
||||
admin: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Operation types for permission checking.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum Operation {
|
||||
Read,
|
||||
Write,
|
||||
Delete,
|
||||
Admin,
|
||||
}
|
||||
|
||||
/// Authentication configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct AuthConfig {
|
||||
/// Whether authentication is required.
|
||||
pub required: bool,
|
||||
/// API key header name.
|
||||
pub header_name: String,
|
||||
/// Allow anonymous read access.
|
||||
pub allow_anonymous_read: bool,
|
||||
}
|
||||
|
||||
impl Default for AuthConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
required: true,
|
||||
header_name: "X-API-Key".to_string(),
|
||||
allow_anonymous_read: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Authentication middleware state.
|
||||
pub struct AuthMiddleware {
|
||||
/// Configuration.
|
||||
config: AuthConfig,
|
||||
/// API keys store.
|
||||
keys: Arc<RwLock<HashMap<String, ApiKey>>>,
|
||||
/// Rate limit tracking (key_id -> (count, window_start)).
|
||||
rate_limits: Arc<RwLock<HashMap<String, (u32, u64)>>>,
|
||||
}
|
||||
|
||||
impl AuthMiddleware {
|
||||
/// Creates a new auth middleware.
|
||||
pub fn new(config: AuthConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
keys: Arc::new(RwLock::new(HashMap::new())),
|
||||
rate_limits: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Registers an API key.
|
||||
pub fn register_key(&self, key: ApiKey) {
|
||||
self.keys.write().insert(key.key_id.clone(), key);
|
||||
}
|
||||
|
||||
/// Revokes an API key.
|
||||
pub fn revoke_key(&self, key_id: &str) -> bool {
|
||||
self.keys.write().remove(key_id).is_some()
|
||||
}
|
||||
|
||||
/// Validates an API key string (format: key_id:secret).
|
||||
pub fn validate(&self, api_key: &str) -> Result<ApiKey, AuthError> {
|
||||
let parts: Vec<&str> = api_key.splitn(2, ':').collect();
|
||||
if parts.len() != 2 {
|
||||
return Err(AuthError::InvalidFormat);
|
||||
}
|
||||
|
||||
let key_id = parts[0];
|
||||
let secret = parts[1];
|
||||
|
||||
let keys = self.keys.read();
|
||||
let key = keys.get(key_id).ok_or(AuthError::KeyNotFound)?;
|
||||
|
||||
if !key.validate_secret(secret) {
|
||||
return Err(AuthError::InvalidSecret);
|
||||
}
|
||||
|
||||
// Check rate limit
|
||||
self.check_rate_limit(key)?;
|
||||
|
||||
Ok(key.clone())
|
||||
}
|
||||
|
||||
/// Checks rate limit for a key.
|
||||
fn check_rate_limit(&self, key: &ApiKey) -> Result<(), AuthError> {
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
|
||||
let mut limits = self.rate_limits.write();
|
||||
let entry = limits.entry(key.key_id.clone()).or_insert((0, now));
|
||||
|
||||
// Reset if window expired (1 minute)
|
||||
if now - entry.1 > 60 {
|
||||
*entry = (1, now);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check if limit exceeded
|
||||
if entry.0 >= key.rate_limit {
|
||||
return Err(AuthError::RateLimitExceeded);
|
||||
}
|
||||
|
||||
entry.0 += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the auth config.
|
||||
pub fn config(&self) -> &AuthConfig {
|
||||
&self.config
|
||||
}
|
||||
}
|
||||
|
||||
/// Authentication errors.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum AuthError {
|
||||
/// Missing API key.
|
||||
MissingKey,
|
||||
/// Invalid key format.
|
||||
InvalidFormat,
|
||||
/// Key not found.
|
||||
KeyNotFound,
|
||||
/// Invalid secret.
|
||||
InvalidSecret,
|
||||
/// Key expired.
|
||||
Expired,
|
||||
/// Rate limit exceeded.
|
||||
RateLimitExceeded,
|
||||
/// Insufficient permissions.
|
||||
InsufficientPermissions,
|
||||
/// Access denied to database.
|
||||
AccessDenied,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for AuthError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
AuthError::MissingKey => write!(f, "Missing API key"),
|
||||
AuthError::InvalidFormat => write!(f, "Invalid API key format"),
|
||||
AuthError::KeyNotFound => write!(f, "API key not found"),
|
||||
AuthError::InvalidSecret => write!(f, "Invalid API secret"),
|
||||
AuthError::Expired => write!(f, "API key expired"),
|
||||
AuthError::RateLimitExceeded => write!(f, "Rate limit exceeded"),
|
||||
AuthError::InsufficientPermissions => write!(f, "Insufficient permissions"),
|
||||
AuthError::AccessDenied => write!(f, "Access denied to database"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for AuthError {}
|
||||
|
||||
/// Generates a random key ID.
|
||||
fn generate_key_id() -> String {
|
||||
let timestamp = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
|
||||
let hash = blake3::hash(×tamp.to_le_bytes());
|
||||
format!("sdb_{}", hex::encode(&hash.as_bytes()[..12]))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_api_key_creation() {
|
||||
let key = ApiKey::new([1u8; 32], "secret123");
|
||||
assert!(key.key_id.starts_with("sdb_"));
|
||||
assert!(key.active);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_secret_validation() {
|
||||
let key = ApiKey::new([1u8; 32], "secret123");
|
||||
assert!(key.validate_secret("secret123"));
|
||||
assert!(!key.validate_secret("wrong"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_auth_middleware() {
|
||||
let auth = AuthMiddleware::new(AuthConfig::default());
|
||||
let key = ApiKey::new([1u8; 32], "secret123");
|
||||
let key_id = key.key_id.clone();
|
||||
auth.register_key(key);
|
||||
|
||||
let api_key = format!("{}:secret123", key_id);
|
||||
let result = auth.validate(&api_key);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_permissions() {
|
||||
let mut key = ApiKey::new([1u8; 32], "test");
|
||||
key.permissions = Permissions::read_only();
|
||||
|
||||
assert!(key.can_perform(Operation::Read));
|
||||
assert!(!key.can_perform(Operation::Write));
|
||||
assert!(!key.can_perform(Operation::Admin));
|
||||
}
|
||||
}
|
||||
538
crates/synor-database/src/gateway/handlers.rs
Normal file
538
crates/synor-database/src/gateway/handlers.rs
Normal file
|
|
@ -0,0 +1,538 @@
|
|||
//! HTTP request handlers for Database Gateway.
|
||||
|
||||
use crate::document::{Document, DocumentFilter, DocumentId};
|
||||
use crate::error::DatabaseError;
|
||||
use crate::gateway::{ApiResponse, Pagination};
|
||||
use crate::keyvalue::KeyValueStore;
|
||||
use crate::query::{Filter, Query, QueryResult, SortOrder};
|
||||
use crate::timeseries::{Aggregation, DataPoint};
|
||||
use crate::vector::{Embedding, VectorSearchResult};
|
||||
use crate::{Database, DatabaseManager};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::sync::Arc;
|
||||
|
||||
// ============================================================================
|
||||
// Key-Value Handlers
|
||||
// ============================================================================
|
||||
|
||||
/// Get a key-value pair.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct KvGetResponse {
|
||||
pub key: String,
|
||||
pub value: Option<String>,
|
||||
pub ttl: Option<u64>,
|
||||
}
|
||||
|
||||
/// Set a key-value pair request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct KvSetRequest {
|
||||
pub value: String,
|
||||
#[serde(default)]
|
||||
pub ttl: u64,
|
||||
}
|
||||
|
||||
/// Batch KV operation request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct KvBatchRequest {
|
||||
pub operations: Vec<KvOperation>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct KvOperation {
|
||||
pub op: String, // "get", "set", "delete"
|
||||
pub key: String,
|
||||
pub value: Option<String>,
|
||||
pub ttl: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct KvBatchResponse {
|
||||
pub results: Vec<KvOperationResult>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct KvOperationResult {
|
||||
pub key: String,
|
||||
pub success: bool,
|
||||
pub value: Option<String>,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
/// Handles KV get operation.
|
||||
pub fn handle_kv_get(store: &KeyValueStore, key: &str) -> ApiResponse<KvGetResponse> {
|
||||
let value = store.get_string(key);
|
||||
let ttl = store.ttl(key);
|
||||
|
||||
ApiResponse::ok(KvGetResponse {
|
||||
key: key.to_string(),
|
||||
value,
|
||||
ttl,
|
||||
})
|
||||
}
|
||||
|
||||
/// Handles KV set operation.
|
||||
pub fn handle_kv_set(
|
||||
store: &KeyValueStore,
|
||||
key: &str,
|
||||
req: KvSetRequest,
|
||||
) -> ApiResponse<KvGetResponse> {
|
||||
match store.set_string(key, &req.value, req.ttl) {
|
||||
Ok(_) => ApiResponse::ok(KvGetResponse {
|
||||
key: key.to_string(),
|
||||
value: Some(req.value),
|
||||
ttl: if req.ttl > 0 { Some(req.ttl) } else { None },
|
||||
}),
|
||||
Err(e) => ApiResponse::error(e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles KV delete operation.
|
||||
pub fn handle_kv_delete(store: &KeyValueStore, key: &str) -> ApiResponse<bool> {
|
||||
match store.delete(key) {
|
||||
Ok(deleted) => ApiResponse::ok(deleted),
|
||||
Err(e) => ApiResponse::error(e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles batch KV operations.
|
||||
pub fn handle_kv_batch(store: &KeyValueStore, req: KvBatchRequest) -> ApiResponse<KvBatchResponse> {
|
||||
let mut results = Vec::with_capacity(req.operations.len());
|
||||
|
||||
for op in req.operations {
|
||||
let result = match op.op.as_str() {
|
||||
"get" => {
|
||||
let value = store.get_string(&op.key);
|
||||
KvOperationResult {
|
||||
key: op.key,
|
||||
success: true,
|
||||
value,
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
"set" => {
|
||||
if let Some(value) = op.value {
|
||||
match store.set_string(&op.key, &value, op.ttl.unwrap_or(0)) {
|
||||
Ok(_) => KvOperationResult {
|
||||
key: op.key,
|
||||
success: true,
|
||||
value: Some(value),
|
||||
error: None,
|
||||
},
|
||||
Err(e) => KvOperationResult {
|
||||
key: op.key,
|
||||
success: false,
|
||||
value: None,
|
||||
error: Some(e.to_string()),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
KvOperationResult {
|
||||
key: op.key,
|
||||
success: false,
|
||||
value: None,
|
||||
error: Some("Missing value for set operation".to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
"delete" => match store.delete(&op.key) {
|
||||
Ok(deleted) => KvOperationResult {
|
||||
key: op.key,
|
||||
success: deleted,
|
||||
value: None,
|
||||
error: if deleted {
|
||||
None
|
||||
} else {
|
||||
Some("Key not found".to_string())
|
||||
},
|
||||
},
|
||||
Err(e) => KvOperationResult {
|
||||
key: op.key,
|
||||
success: false,
|
||||
value: None,
|
||||
error: Some(e.to_string()),
|
||||
},
|
||||
},
|
||||
_ => KvOperationResult {
|
||||
key: op.key,
|
||||
success: false,
|
||||
value: None,
|
||||
error: Some(format!("Unknown operation: {}", op.op)),
|
||||
},
|
||||
};
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
ApiResponse::ok(KvBatchResponse { results })
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Document Handlers
|
||||
// ============================================================================
|
||||
|
||||
/// Create collection request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CreateCollectionRequest {
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub schema: Option<JsonValue>,
|
||||
}
|
||||
|
||||
/// Insert document request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct InsertDocumentRequest {
|
||||
pub document: JsonValue,
|
||||
}
|
||||
|
||||
/// Insert many documents request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct InsertManyRequest {
|
||||
pub documents: Vec<JsonValue>,
|
||||
}
|
||||
|
||||
/// Update document request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct UpdateDocumentRequest {
|
||||
pub update: JsonValue,
|
||||
}
|
||||
|
||||
/// Query documents request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct QueryDocumentsRequest {
|
||||
#[serde(default)]
|
||||
pub filter: Option<JsonValue>,
|
||||
#[serde(default)]
|
||||
pub sort: Option<Vec<SortSpec>>,
|
||||
#[serde(default)]
|
||||
pub skip: usize,
|
||||
#[serde(default = "default_limit")]
|
||||
pub limit: usize,
|
||||
#[serde(default)]
|
||||
pub select: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
fn default_limit() -> usize {
|
||||
100
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct SortSpec {
|
||||
pub field: String,
|
||||
#[serde(default = "default_order")]
|
||||
pub order: String,
|
||||
}
|
||||
|
||||
fn default_order() -> String {
|
||||
"asc".to_string()
|
||||
}
|
||||
|
||||
/// Document response.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct DocumentResponse {
|
||||
pub id: String,
|
||||
pub data: JsonValue,
|
||||
pub created_at: u64,
|
||||
pub updated_at: u64,
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
impl From<Document> for DocumentResponse {
|
||||
fn from(doc: Document) -> Self {
|
||||
Self {
|
||||
id: doc.id.to_hex(),
|
||||
data: doc.data,
|
||||
created_at: doc.created_at,
|
||||
updated_at: doc.updated_at,
|
||||
version: doc.version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Query response.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct QueryDocumentsResponse {
|
||||
pub documents: Vec<DocumentResponse>,
|
||||
pub total: u64,
|
||||
pub has_more: bool,
|
||||
pub execution_time_ms: u64,
|
||||
}
|
||||
|
||||
impl From<QueryResult> for QueryDocumentsResponse {
|
||||
fn from(result: QueryResult) -> Self {
|
||||
Self {
|
||||
documents: result.documents.into_iter().map(Into::into).collect(),
|
||||
total: result.total,
|
||||
has_more: result.has_more,
|
||||
execution_time_ms: result.execution_time_ms,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts JSON filter to Filter enum.
|
||||
pub fn json_to_filter(json: &JsonValue) -> Option<Filter> {
|
||||
let obj = json.as_object()?;
|
||||
|
||||
// Handle $and
|
||||
if let Some(and_arr) = obj.get("$and").and_then(|v| v.as_array()) {
|
||||
let filters: Vec<Filter> = and_arr
|
||||
.iter()
|
||||
.filter_map(json_to_filter)
|
||||
.collect();
|
||||
return Some(Filter::And(filters));
|
||||
}
|
||||
|
||||
// Handle $or
|
||||
if let Some(or_arr) = obj.get("$or").and_then(|v| v.as_array()) {
|
||||
let filters: Vec<Filter> = or_arr
|
||||
.iter()
|
||||
.filter_map(json_to_filter)
|
||||
.collect();
|
||||
return Some(Filter::Or(filters));
|
||||
}
|
||||
|
||||
// Handle field conditions
|
||||
let mut filters = Vec::new();
|
||||
for (field, condition) in obj {
|
||||
if field.starts_with('$') {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(cond_obj) = condition.as_object() {
|
||||
for (op, value) in cond_obj {
|
||||
let filter = match op.as_str() {
|
||||
"$eq" => Filter::Eq(field.clone(), value.clone()),
|
||||
"$ne" => Filter::Ne(field.clone(), value.clone()),
|
||||
"$gt" => Filter::Gt(field.clone(), value.clone()),
|
||||
"$gte" => Filter::Gte(field.clone(), value.clone()),
|
||||
"$lt" => Filter::Lt(field.clone(), value.clone()),
|
||||
"$lte" => Filter::Lte(field.clone(), value.clone()),
|
||||
"$in" => {
|
||||
if let Some(arr) = value.as_array() {
|
||||
Filter::In(field.clone(), arr.clone())
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
"$contains" => {
|
||||
if let Some(s) = value.as_str() {
|
||||
Filter::Contains(field.clone(), s.to_string())
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
"$exists" => Filter::Exists(field.clone()),
|
||||
_ => continue,
|
||||
};
|
||||
filters.push(filter);
|
||||
}
|
||||
} else {
|
||||
// Simple equality
|
||||
filters.push(Filter::Eq(field.clone(), condition.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
if filters.is_empty() {
|
||||
None
|
||||
} else if filters.len() == 1 {
|
||||
Some(filters.remove(0))
|
||||
} else {
|
||||
Some(Filter::And(filters))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Vector Handlers
|
||||
// ============================================================================
|
||||
|
||||
/// Insert embeddings request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct InsertEmbeddingsRequest {
|
||||
pub embeddings: Vec<EmbeddingInput>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct EmbeddingInput {
|
||||
pub id: String,
|
||||
pub vector: Vec<f32>,
|
||||
#[serde(default)]
|
||||
pub metadata: Option<JsonValue>,
|
||||
#[serde(default)]
|
||||
pub namespace: Option<String>,
|
||||
}
|
||||
|
||||
/// Vector search request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct VectorSearchRequest {
|
||||
pub vector: Vec<f32>,
|
||||
#[serde(default = "default_search_limit")]
|
||||
pub limit: usize,
|
||||
#[serde(default)]
|
||||
pub namespace: Option<String>,
|
||||
#[serde(default)]
|
||||
pub threshold: Option<f32>,
|
||||
}
|
||||
|
||||
fn default_search_limit() -> usize {
|
||||
10
|
||||
}
|
||||
|
||||
/// Vector search response.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct VectorSearchResponse {
|
||||
pub results: Vec<VectorMatch>,
|
||||
pub count: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct VectorMatch {
|
||||
pub id: String,
|
||||
pub score: f32,
|
||||
pub metadata: JsonValue,
|
||||
}
|
||||
|
||||
impl From<VectorSearchResult> for VectorMatch {
|
||||
fn from(result: VectorSearchResult) -> Self {
|
||||
Self {
|
||||
id: result.embedding.id,
|
||||
score: result.score,
|
||||
metadata: result.embedding.metadata,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Time-Series Handlers
|
||||
// ============================================================================
|
||||
|
||||
/// Record metric request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct RecordMetricRequest {
|
||||
pub value: f64,
|
||||
#[serde(default)]
|
||||
pub timestamp: Option<u64>,
|
||||
#[serde(default)]
|
||||
pub tags: Option<std::collections::HashMap<String, String>>,
|
||||
}
|
||||
|
||||
/// Query metrics request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct QueryMetricsRequest {
|
||||
pub start: u64,
|
||||
pub end: u64,
|
||||
#[serde(default)]
|
||||
pub aggregation: Option<String>,
|
||||
#[serde(default)]
|
||||
pub interval_ms: Option<u64>,
|
||||
}
|
||||
|
||||
/// Metrics response.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct MetricsResponse {
|
||||
pub name: String,
|
||||
pub points: Vec<DataPointResponse>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub aggregation: Option<f64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct DataPointResponse {
|
||||
pub timestamp: u64,
|
||||
pub value: f64,
|
||||
}
|
||||
|
||||
impl From<DataPoint> for DataPointResponse {
|
||||
fn from(point: DataPoint) -> Self {
|
||||
Self {
|
||||
timestamp: point.timestamp,
|
||||
value: point.value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Database Management Handlers
|
||||
// ============================================================================
|
||||
|
||||
/// Create database request.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CreateDatabaseRequest {
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub vector_dimensions: Option<u32>,
|
||||
#[serde(default)]
|
||||
pub replication: Option<u8>,
|
||||
}
|
||||
|
||||
/// Database info response.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct DatabaseInfoResponse {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
pub entry_count: u64,
|
||||
pub storage_used: u64,
|
||||
pub reads: u64,
|
||||
pub writes: u64,
|
||||
pub vector_searches: u64,
|
||||
}
|
||||
|
||||
/// List databases response.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ListDatabasesResponse {
|
||||
pub databases: Vec<DatabaseInfoResponse>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_json_to_filter_simple() {
|
||||
let json = serde_json::json!({
|
||||
"name": "Alice"
|
||||
});
|
||||
|
||||
let filter = json_to_filter(&json).unwrap();
|
||||
match filter {
|
||||
Filter::Eq(field, value) => {
|
||||
assert_eq!(field, "name");
|
||||
assert_eq!(value, serde_json::json!("Alice"));
|
||||
}
|
||||
_ => panic!("Expected Eq filter"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_to_filter_operators() {
|
||||
let json = serde_json::json!({
|
||||
"age": { "$gte": 18 }
|
||||
});
|
||||
|
||||
let filter = json_to_filter(&json).unwrap();
|
||||
match filter {
|
||||
Filter::Gte(field, value) => {
|
||||
assert_eq!(field, "age");
|
||||
assert_eq!(value, serde_json::json!(18));
|
||||
}
|
||||
_ => panic!("Expected Gte filter"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_to_filter_and() {
|
||||
let json = serde_json::json!({
|
||||
"$and": [
|
||||
{ "status": "active" },
|
||||
{ "age": { "$gte": 18 } }
|
||||
]
|
||||
});
|
||||
|
||||
let filter = json_to_filter(&json).unwrap();
|
||||
match filter {
|
||||
Filter::And(filters) => {
|
||||
assert_eq!(filters.len(), 2);
|
||||
}
|
||||
_ => panic!("Expected And filter"),
|
||||
}
|
||||
}
|
||||
}
|
||||
130
crates/synor-database/src/gateway/mod.rs
Normal file
130
crates/synor-database/src/gateway/mod.rs
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
//! Database Gateway - HTTP/REST API for Synor Database.
|
||||
//!
|
||||
//! Provides external access to database operations via REST API.
|
||||
//!
|
||||
//! # Endpoints
|
||||
//!
|
||||
//! ## Key-Value Operations
|
||||
//! - `GET /kv/:key` - Get value
|
||||
//! - `PUT /kv/:key` - Set value
|
||||
//! - `DELETE /kv/:key` - Delete key
|
||||
//! - `POST /kv/batch` - Batch operations
|
||||
//!
|
||||
//! ## Document Operations
|
||||
//! - `POST /db/:database/collections` - Create collection
|
||||
//! - `GET /db/:database/:collection` - Query documents
|
||||
//! - `POST /db/:database/:collection` - Insert document
|
||||
//! - `PUT /db/:database/:collection/:id` - Update document
|
||||
//! - `DELETE /db/:database/:collection/:id` - Delete document
|
||||
//!
|
||||
//! ## Vector Operations
|
||||
//! - `POST /db/:database/vectors` - Insert embeddings
|
||||
//! - `POST /db/:database/vectors/search` - Similarity search
|
||||
//!
|
||||
//! ## Time-Series Operations
|
||||
//! - `POST /db/:database/metrics/:name` - Record metric
|
||||
//! - `GET /db/:database/metrics/:name` - Query metrics
|
||||
|
||||
pub mod auth;
|
||||
pub mod handlers;
|
||||
pub mod router;
|
||||
pub mod server;
|
||||
|
||||
pub use auth::{ApiKey, AuthConfig, AuthMiddleware};
|
||||
pub use router::create_router;
|
||||
pub use server::{GatewayConfig, GatewayServer};
|
||||
|
||||
use crate::error::DatabaseError;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// API response wrapper.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ApiResponse<T> {
|
||||
/// Whether the request succeeded.
|
||||
pub success: bool,
|
||||
/// Response data (if successful).
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub data: Option<T>,
|
||||
/// Error message (if failed).
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
/// Request ID for tracing.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub request_id: Option<String>,
|
||||
}
|
||||
|
||||
impl<T> ApiResponse<T> {
|
||||
/// Creates a successful response.
|
||||
pub fn ok(data: T) -> Self {
|
||||
Self {
|
||||
success: true,
|
||||
data: Some(data),
|
||||
error: None,
|
||||
request_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an error response.
|
||||
pub fn error(message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
success: false,
|
||||
data: None,
|
||||
error: Some(message.into()),
|
||||
request_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the request ID.
|
||||
pub fn with_request_id(mut self, id: impl Into<String>) -> Self {
|
||||
self.request_id = Some(id.into());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<Result<T, DatabaseError>> for ApiResponse<T> {
|
||||
fn from(result: Result<T, DatabaseError>) -> Self {
|
||||
match result {
|
||||
Ok(data) => ApiResponse::ok(data),
|
||||
Err(e) => ApiResponse::error(e.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Pagination parameters.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Pagination {
|
||||
/// Number of items to skip.
|
||||
#[serde(default)]
|
||||
pub skip: usize,
|
||||
/// Maximum items to return.
|
||||
#[serde(default = "default_limit")]
|
||||
pub limit: usize,
|
||||
}
|
||||
|
||||
fn default_limit() -> usize {
|
||||
100
|
||||
}
|
||||
|
||||
impl Default for Pagination {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
skip: 0,
|
||||
limit: default_limit(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Usage metrics for billing.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct UsageMetrics {
|
||||
/// Read operations count.
|
||||
pub reads: u64,
|
||||
/// Write operations count.
|
||||
pub writes: u64,
|
||||
/// Vector search operations.
|
||||
pub vector_searches: u64,
|
||||
/// Storage used (bytes).
|
||||
pub storage_bytes: u64,
|
||||
/// Data transferred (bytes).
|
||||
pub bandwidth_bytes: u64,
|
||||
}
|
||||
657
crates/synor-database/src/gateway/router.rs
Normal file
657
crates/synor-database/src/gateway/router.rs
Normal file
|
|
@ -0,0 +1,657 @@
|
|||
//! HTTP router for Database Gateway using Axum.
|
||||
|
||||
use crate::document::DocumentId;
|
||||
use crate::gateway::auth::{ApiKey, AuthError, AuthMiddleware, Operation};
|
||||
use crate::gateway::handlers::*;
|
||||
use crate::gateway::{ApiResponse, UsageMetrics};
|
||||
use crate::query::{Filter, Query, SortOrder};
|
||||
use crate::timeseries::DataPoint;
|
||||
use crate::vector::Embedding;
|
||||
use crate::{Database, DatabaseConfig, DatabaseManager};
|
||||
|
||||
use axum::{
|
||||
extract::{Path, Query as AxumQuery, State},
|
||||
http::{HeaderMap, StatusCode},
|
||||
response::IntoResponse,
|
||||
routing::{delete, get, post, put},
|
||||
Json, Router,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Application state shared across handlers.
|
||||
pub struct AppState {
|
||||
/// Database manager.
|
||||
pub db_manager: Arc<DatabaseManager>,
|
||||
/// Auth middleware.
|
||||
pub auth: Arc<AuthMiddleware>,
|
||||
/// Usage metrics.
|
||||
pub metrics: Arc<RwLock<UsageMetrics>>,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
/// Creates new app state.
|
||||
pub fn new(db_manager: Arc<DatabaseManager>, auth: Arc<AuthMiddleware>) -> Self {
|
||||
Self {
|
||||
db_manager,
|
||||
auth,
|
||||
metrics: Arc::new(RwLock::new(UsageMetrics::default())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Records a read operation.
|
||||
pub fn record_read(&self) {
|
||||
self.metrics.write().reads += 1;
|
||||
}
|
||||
|
||||
/// Records a write operation.
|
||||
pub fn record_write(&self, bytes: u64) {
|
||||
let mut m = self.metrics.write();
|
||||
m.writes += 1;
|
||||
m.storage_bytes += bytes;
|
||||
}
|
||||
|
||||
/// Records a vector search.
|
||||
pub fn record_vector_search(&self) {
|
||||
self.metrics.write().vector_searches += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Extracts and validates API key from headers.
|
||||
fn extract_api_key(headers: &HeaderMap, auth: &AuthMiddleware) -> Result<ApiKey, AuthError> {
|
||||
let header_name = &auth.config().header_name;
|
||||
let key_str = headers
|
||||
.get(header_name)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.ok_or(AuthError::MissingKey)?;
|
||||
|
||||
auth.validate(key_str)
|
||||
}
|
||||
|
||||
/// Creates the main router.
|
||||
pub fn create_router(state: Arc<AppState>) -> Router {
|
||||
Router::new()
|
||||
// Health check
|
||||
.route("/health", get(health_check))
|
||||
// Key-Value routes
|
||||
.route("/kv/:key", get(kv_get))
|
||||
.route("/kv/:key", put(kv_set))
|
||||
.route("/kv/:key", delete(kv_delete))
|
||||
.route("/kv/batch", post(kv_batch))
|
||||
// Database management
|
||||
.route("/databases", get(list_databases))
|
||||
.route("/databases", post(create_database))
|
||||
.route("/databases/:db", get(get_database_info))
|
||||
.route("/databases/:db", delete(delete_database))
|
||||
// Collection routes
|
||||
.route("/db/:db/collections", post(create_collection))
|
||||
.route("/db/:db/collections/:coll", get(list_documents))
|
||||
.route("/db/:db/collections/:coll", delete(drop_collection))
|
||||
// Document routes
|
||||
.route("/db/:db/:coll", get(query_documents))
|
||||
.route("/db/:db/:coll", post(insert_document))
|
||||
.route("/db/:db/:coll/many", post(insert_many_documents))
|
||||
.route("/db/:db/:coll/:id", get(get_document))
|
||||
.route("/db/:db/:coll/:id", put(update_document))
|
||||
.route("/db/:db/:coll/:id", delete(delete_document))
|
||||
// Vector routes
|
||||
.route("/db/:db/vectors", post(insert_embeddings))
|
||||
.route("/db/:db/vectors/search", post(vector_search))
|
||||
.route("/db/:db/vectors/:id", get(get_embedding))
|
||||
.route("/db/:db/vectors/:id", delete(delete_embedding))
|
||||
// Time-series routes
|
||||
.route("/db/:db/metrics", get(list_metrics))
|
||||
.route("/db/:db/metrics/:name", post(record_metric))
|
||||
.route("/db/:db/metrics/:name", get(query_metrics))
|
||||
// Stats
|
||||
.route("/stats", get(get_stats))
|
||||
.with_state(state)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Health & Stats
|
||||
// ============================================================================
|
||||
|
||||
async fn health_check() -> impl IntoResponse {
|
||||
Json(serde_json::json!({
|
||||
"status": "healthy",
|
||||
"service": "synor-database",
|
||||
"version": env!("CARGO_PKG_VERSION")
|
||||
}))
|
||||
}
|
||||
|
||||
async fn get_stats(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let metrics = state.metrics.read().clone();
|
||||
Json(ApiResponse::ok(metrics))
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Key-Value Handlers
|
||||
// ============================================================================
|
||||
|
||||
async fn kv_get(
|
||||
State(state): State<Arc<AppState>>,
|
||||
headers: HeaderMap,
|
||||
Path(key): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
// For demo, use a default database
|
||||
let db = match get_default_database(&state) {
|
||||
Some(db) => db,
|
||||
None => return (StatusCode::NOT_FOUND, Json(ApiResponse::<KvGetResponse>::error("No database"))),
|
||||
};
|
||||
|
||||
state.record_read();
|
||||
let response = handle_kv_get(db.kv(), &key);
|
||||
(StatusCode::OK, Json(response))
|
||||
}
|
||||
|
||||
async fn kv_set(
|
||||
State(state): State<Arc<AppState>>,
|
||||
headers: HeaderMap,
|
||||
Path(key): Path<String>,
|
||||
Json(req): Json<KvSetRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_default_database(&state) {
|
||||
Some(db) => db,
|
||||
None => return (StatusCode::NOT_FOUND, Json(ApiResponse::<KvGetResponse>::error("No database"))),
|
||||
};
|
||||
|
||||
state.record_write(req.value.len() as u64);
|
||||
let response = handle_kv_set(db.kv(), &key, req);
|
||||
(StatusCode::OK, Json(response))
|
||||
}
|
||||
|
||||
async fn kv_delete(
|
||||
State(state): State<Arc<AppState>>,
|
||||
headers: HeaderMap,
|
||||
Path(key): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_default_database(&state) {
|
||||
Some(db) => db,
|
||||
None => return (StatusCode::NOT_FOUND, Json(ApiResponse::<bool>::error("No database"))),
|
||||
};
|
||||
|
||||
let response = handle_kv_delete(db.kv(), &key);
|
||||
(StatusCode::OK, Json(response))
|
||||
}
|
||||
|
||||
async fn kv_batch(
|
||||
State(state): State<Arc<AppState>>,
|
||||
headers: HeaderMap,
|
||||
Json(req): Json<KvBatchRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_default_database(&state) {
|
||||
Some(db) => db,
|
||||
None => return (StatusCode::NOT_FOUND, Json(ApiResponse::<KvBatchResponse>::error("No database"))),
|
||||
};
|
||||
|
||||
let response = handle_kv_batch(db.kv(), req);
|
||||
(StatusCode::OK, Json(response))
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Database Management Handlers
|
||||
// ============================================================================
|
||||
|
||||
async fn list_databases(
|
||||
State(state): State<Arc<AppState>>,
|
||||
headers: HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
// List all databases (would filter by owner in production)
|
||||
let owner = [0u8; 32]; // Default owner
|
||||
let dbs = state.db_manager.list(&owner);
|
||||
|
||||
let response: Vec<DatabaseInfoResponse> = dbs
|
||||
.iter()
|
||||
.map(|db| {
|
||||
let stats = db.stats();
|
||||
DatabaseInfoResponse {
|
||||
id: db.id.to_string(),
|
||||
name: db.config.name.clone(),
|
||||
entry_count: stats.entry_count,
|
||||
storage_used: stats.storage_used,
|
||||
reads: stats.reads,
|
||||
writes: stats.writes,
|
||||
vector_searches: stats.vector_searches,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Json(ApiResponse::ok(ListDatabasesResponse { databases: response }))
|
||||
}
|
||||
|
||||
async fn create_database(
|
||||
State(state): State<Arc<AppState>>,
|
||||
headers: HeaderMap,
|
||||
Json(req): Json<CreateDatabaseRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let config = DatabaseConfig {
|
||||
name: req.name,
|
||||
owner: [0u8; 32], // Would come from auth
|
||||
vector_dimensions: req.vector_dimensions.unwrap_or(0),
|
||||
vector_enabled: req.vector_dimensions.map(|d| d > 0).unwrap_or(false),
|
||||
replication: req.replication.unwrap_or(3),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match state.db_manager.create(config) {
|
||||
Ok(db) => {
|
||||
let stats = db.stats();
|
||||
(
|
||||
StatusCode::CREATED,
|
||||
Json(ApiResponse::ok(DatabaseInfoResponse {
|
||||
id: db.id.to_string(),
|
||||
name: db.config.name.clone(),
|
||||
entry_count: stats.entry_count,
|
||||
storage_used: stats.storage_used,
|
||||
reads: stats.reads,
|
||||
writes: stats.writes,
|
||||
vector_searches: stats.vector_searches,
|
||||
})),
|
||||
)
|
||||
}
|
||||
Err(e) => (StatusCode::BAD_REQUEST, Json(ApiResponse::error(e.to_string()))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_database_info(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(db_name): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let owner = [0u8; 32];
|
||||
match state.db_manager.get_by_name(&owner, &db_name) {
|
||||
Some(db) => {
|
||||
let stats = db.stats();
|
||||
Json(ApiResponse::ok(DatabaseInfoResponse {
|
||||
id: db.id.to_string(),
|
||||
name: db.config.name.clone(),
|
||||
entry_count: stats.entry_count,
|
||||
storage_used: stats.storage_used,
|
||||
reads: stats.reads,
|
||||
writes: stats.writes,
|
||||
vector_searches: stats.vector_searches,
|
||||
}))
|
||||
}
|
||||
None => Json(ApiResponse::error("Database not found")),
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_database(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(db_name): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let owner = [0u8; 32];
|
||||
if let Some(db) = state.db_manager.get_by_name(&owner, &db_name) {
|
||||
match state.db_manager.delete(&db.id) {
|
||||
Ok(_) => Json(ApiResponse::ok(true)),
|
||||
Err(e) => Json(ApiResponse::error(e.to_string())),
|
||||
}
|
||||
} else {
|
||||
Json(ApiResponse::error("Database not found"))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Collection Handlers
|
||||
// ============================================================================
|
||||
|
||||
async fn create_collection(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(db_name): Path<String>,
|
||||
Json(req): Json<CreateCollectionRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return (StatusCode::NOT_FOUND, Json(ApiResponse::<bool>::error("Database not found"))),
|
||||
};
|
||||
|
||||
match db.documents().create_collection(&req.name) {
|
||||
Ok(_) => (StatusCode::CREATED, Json(ApiResponse::ok(true))),
|
||||
Err(e) => (StatusCode::BAD_REQUEST, Json(ApiResponse::<bool>::error(e.to_string()))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn drop_collection(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, coll_name)): Path<(String, String)>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return Json(ApiResponse::<bool>::error("Database not found")),
|
||||
};
|
||||
|
||||
match db.documents().drop_collection(&coll_name) {
|
||||
Ok(dropped) => Json(ApiResponse::ok(dropped)),
|
||||
Err(e) => Json(ApiResponse::error(e.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_documents(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, coll_name)): Path<(String, String)>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return Json(ApiResponse::<Vec<String>>::error("Database not found")),
|
||||
};
|
||||
|
||||
Json(ApiResponse::ok(db.documents().list_collections()))
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Document Handlers
|
||||
// ============================================================================
|
||||
|
||||
async fn query_documents(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, coll_name)): Path<(String, String)>,
|
||||
Json(req): Json<QueryDocumentsRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return (StatusCode::NOT_FOUND, Json(ApiResponse::<QueryDocumentsResponse>::error("Database not found"))),
|
||||
};
|
||||
|
||||
state.record_read();
|
||||
|
||||
// Build query
|
||||
let mut query = Query::new(&coll_name)
|
||||
.skip(req.skip)
|
||||
.limit(req.limit);
|
||||
|
||||
// Add filter
|
||||
if let Some(filter_json) = &req.filter {
|
||||
if let Some(filter) = json_to_filter(filter_json) {
|
||||
query = query.filter(filter);
|
||||
}
|
||||
}
|
||||
|
||||
// Add sorting
|
||||
if let Some(sorts) = &req.sort {
|
||||
for sort in sorts {
|
||||
let order = if sort.order == "desc" {
|
||||
SortOrder::Descending
|
||||
} else {
|
||||
SortOrder::Ascending
|
||||
};
|
||||
query = query.sort(&sort.field, order);
|
||||
}
|
||||
}
|
||||
|
||||
// Add field selection
|
||||
if let Some(fields) = req.select {
|
||||
query = query.select(fields);
|
||||
}
|
||||
|
||||
match db.query().execute(&query) {
|
||||
Ok(result) => (StatusCode::OK, Json(ApiResponse::ok(QueryDocumentsResponse::from(result)))),
|
||||
Err(e) => (StatusCode::BAD_REQUEST, Json(ApiResponse::error(e.to_string()))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn insert_document(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, coll_name)): Path<(String, String)>,
|
||||
Json(req): Json<InsertDocumentRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return (StatusCode::NOT_FOUND, Json(ApiResponse::<String>::error("Database not found"))),
|
||||
};
|
||||
|
||||
let size = serde_json::to_vec(&req.document).map(|v| v.len()).unwrap_or(0);
|
||||
state.record_write(size as u64);
|
||||
|
||||
match db.documents().insert(&coll_name, req.document) {
|
||||
Ok(id) => (StatusCode::CREATED, Json(ApiResponse::ok(id.to_hex()))),
|
||||
Err(e) => (StatusCode::BAD_REQUEST, Json(ApiResponse::error(e.to_string()))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn insert_many_documents(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, coll_name)): Path<(String, String)>,
|
||||
Json(req): Json<InsertManyRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return (StatusCode::NOT_FOUND, Json(ApiResponse::<Vec<String>>::error("Database not found"))),
|
||||
};
|
||||
|
||||
let mut ids = Vec::with_capacity(req.documents.len());
|
||||
let mut total_size = 0u64;
|
||||
|
||||
for doc in req.documents {
|
||||
total_size += serde_json::to_vec(&doc).map(|v| v.len()).unwrap_or(0) as u64;
|
||||
match db.documents().insert(&coll_name, doc) {
|
||||
Ok(id) => ids.push(id.to_hex()),
|
||||
Err(e) => return (StatusCode::BAD_REQUEST, Json(ApiResponse::error(e.to_string()))),
|
||||
}
|
||||
}
|
||||
|
||||
state.record_write(total_size);
|
||||
(StatusCode::CREATED, Json(ApiResponse::ok(ids)))
|
||||
}
|
||||
|
||||
async fn get_document(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, coll_name, doc_id)): Path<(String, String, String)>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return Json(ApiResponse::<DocumentResponse>::error("Database not found")),
|
||||
};
|
||||
|
||||
state.record_read();
|
||||
|
||||
let id = match DocumentId::from_hex(&doc_id) {
|
||||
Ok(id) => id,
|
||||
Err(e) => return Json(ApiResponse::error(e.to_string())),
|
||||
};
|
||||
|
||||
// Would need to query by ID
|
||||
Json(ApiResponse::error("Get by ID not yet implemented"))
|
||||
}
|
||||
|
||||
async fn update_document(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, coll_name, doc_id)): Path<(String, String, String)>,
|
||||
Json(req): Json<UpdateDocumentRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return Json(ApiResponse::<bool>::error("Database not found")),
|
||||
};
|
||||
|
||||
let id = match DocumentId::from_hex(&doc_id) {
|
||||
Ok(id) => id,
|
||||
Err(e) => return Json(ApiResponse::error(e.to_string())),
|
||||
};
|
||||
|
||||
state.record_write(0);
|
||||
Json(ApiResponse::error("Update not yet implemented"))
|
||||
}
|
||||
|
||||
async fn delete_document(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, coll_name, doc_id)): Path<(String, String, String)>,
|
||||
) -> impl IntoResponse {
|
||||
Json(ApiResponse::<bool>::error("Delete not yet implemented"))
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Vector Handlers
|
||||
// ============================================================================
|
||||
|
||||
async fn insert_embeddings(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(db_name): Path<String>,
|
||||
Json(req): Json<InsertEmbeddingsRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return (StatusCode::NOT_FOUND, Json(ApiResponse::<usize>::error("Database not found"))),
|
||||
};
|
||||
|
||||
let mut count = 0;
|
||||
for input in req.embeddings {
|
||||
let mut embedding = Embedding::new(&input.id, input.vector);
|
||||
if let Some(meta) = input.metadata {
|
||||
embedding = embedding.with_metadata(meta);
|
||||
}
|
||||
if let Some(ns) = input.namespace {
|
||||
embedding = embedding.with_namespace(ns);
|
||||
}
|
||||
|
||||
if let Err(e) = db.vectors().insert(embedding) {
|
||||
return (StatusCode::BAD_REQUEST, Json(ApiResponse::error(e.to_string())));
|
||||
}
|
||||
count += 1;
|
||||
}
|
||||
|
||||
state.record_write(count as u64 * 4 * db.config.vector_dimensions as u64);
|
||||
(StatusCode::CREATED, Json(ApiResponse::ok(count)))
|
||||
}
|
||||
|
||||
async fn vector_search(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(db_name): Path<String>,
|
||||
Json(req): Json<VectorSearchRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return (StatusCode::NOT_FOUND, Json(ApiResponse::<VectorSearchResponse>::error("Database not found"))),
|
||||
};
|
||||
|
||||
state.record_vector_search();
|
||||
|
||||
match db.vectors().search(
|
||||
&req.vector,
|
||||
req.limit,
|
||||
req.namespace.as_deref(),
|
||||
req.threshold,
|
||||
) {
|
||||
Ok(results) => {
|
||||
let count = results.len();
|
||||
let matches: Vec<VectorMatch> = results.into_iter().map(Into::into).collect();
|
||||
(StatusCode::OK, Json(ApiResponse::ok(VectorSearchResponse { results: matches, count })))
|
||||
}
|
||||
Err(e) => (StatusCode::BAD_REQUEST, Json(ApiResponse::error(e.to_string()))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_embedding(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, id)): Path<(String, String)>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return Json(ApiResponse::<VectorMatch>::error("Database not found")),
|
||||
};
|
||||
|
||||
state.record_read();
|
||||
|
||||
match db.vectors().get(&id) {
|
||||
Some(embedding) => Json(ApiResponse::ok(VectorMatch {
|
||||
id: embedding.id,
|
||||
score: 1.0,
|
||||
metadata: embedding.metadata,
|
||||
})),
|
||||
None => Json(ApiResponse::error("Embedding not found")),
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_embedding(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, id)): Path<(String, String)>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return Json(ApiResponse::<bool>::error("Database not found")),
|
||||
};
|
||||
|
||||
match db.vectors().delete(&id) {
|
||||
Ok(deleted) => Json(ApiResponse::ok(deleted)),
|
||||
Err(e) => Json(ApiResponse::error(e.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Time-Series Handlers
|
||||
// ============================================================================
|
||||
|
||||
async fn list_metrics(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(db_name): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return Json(ApiResponse::<Vec<String>>::error("Database not found")),
|
||||
};
|
||||
|
||||
Json(ApiResponse::ok(db.timeseries().list_metrics()))
|
||||
}
|
||||
|
||||
async fn record_metric(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, metric_name)): Path<(String, String)>,
|
||||
Json(req): Json<RecordMetricRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return Json(ApiResponse::<bool>::error("Database not found")),
|
||||
};
|
||||
|
||||
state.record_write(16); // timestamp + value
|
||||
|
||||
let point = if let Some(ts) = req.timestamp {
|
||||
DataPoint::new(ts, req.value)
|
||||
} else {
|
||||
DataPoint::now(req.value)
|
||||
};
|
||||
|
||||
match db.timeseries().record_point(&metric_name, point) {
|
||||
Ok(_) => Json(ApiResponse::ok(true)),
|
||||
Err(e) => Json(ApiResponse::error(e.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn query_metrics(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((db_name, metric_name)): Path<(String, String)>,
|
||||
Json(req): Json<QueryMetricsRequest>,
|
||||
) -> impl IntoResponse {
|
||||
let db = match get_database(&state, &db_name) {
|
||||
Some(db) => db,
|
||||
None => return Json(ApiResponse::<MetricsResponse>::error("Database not found")),
|
||||
};
|
||||
|
||||
state.record_read();
|
||||
|
||||
match db.timeseries().query(&metric_name, req.start, req.end) {
|
||||
Ok(points) => {
|
||||
let data_points: Vec<DataPointResponse> = points.into_iter().map(Into::into).collect();
|
||||
Json(ApiResponse::ok(MetricsResponse {
|
||||
name: metric_name,
|
||||
points: data_points,
|
||||
aggregation: None,
|
||||
}))
|
||||
}
|
||||
Err(e) => Json(ApiResponse::error(e.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Helper Functions
|
||||
// ============================================================================
|
||||
|
||||
fn get_default_database(state: &AppState) -> Option<Arc<crate::Database>> {
|
||||
let owner = [0u8; 32];
|
||||
state.db_manager.get_by_name(&owner, "default")
|
||||
}
|
||||
|
||||
fn get_database(state: &AppState, name: &str) -> Option<Arc<crate::Database>> {
|
||||
let owner = [0u8; 32];
|
||||
state.db_manager.get_by_name(&owner, name)
|
||||
}
|
||||
181
crates/synor-database/src/gateway/server.rs
Normal file
181
crates/synor-database/src/gateway/server.rs
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
//! Database Gateway HTTP Server.
|
||||
|
||||
use crate::gateway::auth::{AuthConfig, AuthMiddleware};
|
||||
use crate::gateway::router::{create_router, AppState};
|
||||
use crate::{DatabaseConfig, DatabaseManager};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
/// Gateway server configuration.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct GatewayConfig {
|
||||
/// Listen address.
|
||||
pub host: String,
|
||||
/// Listen port.
|
||||
pub port: u16,
|
||||
/// Authentication configuration.
|
||||
pub auth: AuthConfig,
|
||||
/// Maximum request body size (bytes).
|
||||
pub max_body_size: usize,
|
||||
/// Request timeout (seconds).
|
||||
pub request_timeout: u64,
|
||||
/// Enable CORS.
|
||||
pub cors_enabled: bool,
|
||||
/// Allowed origins for CORS.
|
||||
pub cors_origins: Vec<String>,
|
||||
}
|
||||
|
||||
impl Default for GatewayConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
host: "0.0.0.0".to_string(),
|
||||
port: 8484,
|
||||
auth: AuthConfig::default(),
|
||||
max_body_size: 10 * 1024 * 1024, // 10 MB
|
||||
request_timeout: 30,
|
||||
cors_enabled: true,
|
||||
cors_origins: vec!["*".to_string()],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Database Gateway Server.
|
||||
pub struct GatewayServer {
|
||||
/// Configuration.
|
||||
config: GatewayConfig,
|
||||
/// Database manager.
|
||||
db_manager: Arc<DatabaseManager>,
|
||||
/// Auth middleware.
|
||||
auth: Arc<AuthMiddleware>,
|
||||
}
|
||||
|
||||
impl GatewayServer {
|
||||
/// Creates a new gateway server.
|
||||
pub fn new(config: GatewayConfig) -> Self {
|
||||
let auth = Arc::new(AuthMiddleware::new(config.auth.clone()));
|
||||
let db_manager = Arc::new(DatabaseManager::new());
|
||||
|
||||
Self {
|
||||
config,
|
||||
db_manager,
|
||||
auth,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a server with an existing database manager.
|
||||
pub fn with_db_manager(config: GatewayConfig, db_manager: Arc<DatabaseManager>) -> Self {
|
||||
let auth = Arc::new(AuthMiddleware::new(config.auth.clone()));
|
||||
|
||||
Self {
|
||||
config,
|
||||
db_manager,
|
||||
auth,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the database manager.
|
||||
pub fn db_manager(&self) -> Arc<DatabaseManager> {
|
||||
self.db_manager.clone()
|
||||
}
|
||||
|
||||
/// Returns the auth middleware.
|
||||
pub fn auth(&self) -> Arc<AuthMiddleware> {
|
||||
self.auth.clone()
|
||||
}
|
||||
|
||||
/// Creates a default database for convenience.
|
||||
pub fn ensure_default_database(&self) -> Result<(), crate::error::DatabaseError> {
|
||||
let config = DatabaseConfig {
|
||||
name: "default".to_string(),
|
||||
owner: [0u8; 32],
|
||||
vector_enabled: true,
|
||||
vector_dimensions: 1536, // OpenAI embedding size
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match self.db_manager.create(config) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(crate::error::DatabaseError::AlreadyExists(_)) => Ok(()), // Already exists is fine
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts the HTTP server.
|
||||
pub async fn run(&self) -> Result<(), std::io::Error> {
|
||||
let addr: SocketAddr = format!("{}:{}", self.config.host, self.config.port)
|
||||
.parse()
|
||||
.expect("Invalid address");
|
||||
|
||||
// Ensure default database exists
|
||||
if let Err(e) = self.ensure_default_database() {
|
||||
tracing::error!("Failed to create default database: {}", e);
|
||||
}
|
||||
|
||||
let state = Arc::new(AppState::new(
|
||||
self.db_manager.clone(),
|
||||
self.auth.clone(),
|
||||
));
|
||||
|
||||
let app = create_router(state);
|
||||
|
||||
tracing::info!("Synor Database Gateway listening on http://{}", addr);
|
||||
|
||||
let listener = TcpListener::bind(addr).await?;
|
||||
axum::serve(listener, app).await
|
||||
}
|
||||
|
||||
/// Creates a test server (for integration tests).
|
||||
#[cfg(test)]
|
||||
pub fn for_testing() -> Self {
|
||||
let config = GatewayConfig {
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: 0, // Random port
|
||||
auth: AuthConfig {
|
||||
required: false,
|
||||
allow_anonymous_read: true,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
Self::new(config)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for GatewayServer {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("GatewayServer")
|
||||
.field("config", &self.config)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_gateway_config_default() {
|
||||
let config = GatewayConfig::default();
|
||||
assert_eq!(config.port, 8484);
|
||||
assert!(config.cors_enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gateway_server_creation() {
|
||||
let server = GatewayServer::new(GatewayConfig::default());
|
||||
assert!(server.ensure_default_database().is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ensure_default_database() {
|
||||
let server = GatewayServer::for_testing();
|
||||
assert!(server.ensure_default_database().is_ok());
|
||||
|
||||
// Should not error on second call
|
||||
assert!(server.ensure_default_database().is_ok());
|
||||
}
|
||||
}
|
||||
|
|
@ -44,6 +44,7 @@
|
|||
|
||||
pub mod document;
|
||||
pub mod error;
|
||||
pub mod gateway;
|
||||
pub mod index;
|
||||
pub mod keyvalue;
|
||||
pub mod query;
|
||||
|
|
@ -53,6 +54,7 @@ pub mod vector;
|
|||
|
||||
pub use document::{Collection, Document, DocumentId, DocumentStore};
|
||||
pub use error::DatabaseError;
|
||||
pub use gateway::{GatewayConfig, GatewayServer};
|
||||
pub use index::{Index, IndexConfig, IndexManager, IndexType};
|
||||
pub use keyvalue::{KeyValue, KeyValueStore, KvEntry};
|
||||
pub use query::{Filter, Query, QueryEngine, QueryResult, SortOrder};
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
| 7 | Production Readiness | 🔄 In Progress | 85% | 3 |
|
||||
| 8 | Synor Storage L2 | ✅ Complete | 100% | 3 |
|
||||
| 9 | Synor Hosting | ✅ Complete | 100% | 3 |
|
||||
| 10 | Synor Database L2 | 🔄 In Progress | 0% | 3 |
|
||||
| 10 | Synor Database L2 | ✅ Complete | 100% | 3 |
|
||||
| 11 | Economics & Billing | ⏳ Planned | 0% | 3 |
|
||||
| 12 | Fiat Gateway | ⏳ Planned | 0% | 2 |
|
||||
|
||||
|
|
@ -88,23 +88,26 @@
|
|||
|
||||
| Milestone | Status | Progress |
|
||||
|-----------|--------|----------|
|
||||
| Database Core | 🔄 In Progress | 0% |
|
||||
| Query Layer | ⏳ Planned | 0% |
|
||||
| Database Gateway | ⏳ Planned | 0% |
|
||||
| Database Core | ✅ Complete | 100% |
|
||||
| Query Layer | ✅ Complete | 100% |
|
||||
| Database Gateway | ✅ Complete | 100% |
|
||||
|
||||
### Database Components
|
||||
|
||||
| Component | Status | Notes |
|
||||
|-----------|--------|-------|
|
||||
| Key-Value Store | 0% | Redis-compatible API |
|
||||
| Document Store | 0% | MongoDB-compatible queries |
|
||||
| Vector Store | 0% | AI/RAG optimized, cosine similarity |
|
||||
| Relational (SQL) | 0% | SQLite-compatible subset |
|
||||
| Time-Series | 0% | Metrics and analytics |
|
||||
| Graph Store | 0% | Relationship queries |
|
||||
| Query Engine | 0% | Unified SQL + Vector + Graph |
|
||||
| Replication | 0% | Raft consensus for consistency |
|
||||
| Indexing | 0% | B-tree, LSM-tree, HNSW |
|
||||
| Key-Value Store | 100% | Redis-compatible API (TTL, INCR, MGET) |
|
||||
| Document Store | 100% | MongoDB-compatible queries & filters |
|
||||
| Vector Store | 100% | AI/RAG optimized, cosine/euclidean/dot |
|
||||
| Time-Series | 100% | Metrics, downsampling, aggregations |
|
||||
| Query Engine | 100% | Unified document + vector queries |
|
||||
| Schema Validator | 100% | Field types, required, strict mode |
|
||||
| Indexing | 100% | B-tree, hash, unique indexes |
|
||||
| Database Pricing | 100% | Pay-per-use cost calculator |
|
||||
| Relational (SQL) | 0% | SQLite-compatible subset (deferred) |
|
||||
| Graph Store | 0% | Relationship queries (deferred) |
|
||||
| Replication | 0% | Raft consensus (deferred) |
|
||||
| Gateway HTTP API | 100% | REST API with auth, rate limiting |
|
||||
|
||||
### Database Pricing Model
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue