From 37596c47caca7853aa25dce8805d83a3a0170138 Mon Sep 17 00:00:00 2001 From: Alastair Ong Date: Wed, 22 Apr 2026 10:52:12 +0100 Subject: [PATCH 1/5] Add order_bytes to OrderSummary API response Return the ABI-encoded order bytes in order list responses so the frontend can decode OrderV4 directly without a separate Raindex hydration round-trip. Matches albion.rest.api which already returns this. Co-Authored-By: Claude Opus 4.6 --- src/routes/orders/mod.rs | 1 + src/types/orders.rs | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/routes/orders/mod.rs b/src/routes/orders/mod.rs index 33eb380..3260a51 100644 --- a/src/routes/orders/mod.rs +++ b/src/routes/orders/mod.rs @@ -236,6 +236,7 @@ pub(crate) fn build_order_summary( Ok(OrderSummary { order_hash: order.order_hash(), owner: order.owner(), + order_bytes: order.order_bytes(), input_token: TokenRef { address: input_token_info.address(), symbol: input_token_info.symbol().unwrap_or_default(), diff --git a/src/types/orders.rs b/src/types/orders.rs index 6d9936f..194df6d 100644 --- a/src/types/orders.rs +++ b/src/types/orders.rs @@ -1,5 +1,5 @@ use crate::types::common::TokenRef; -use alloy::primitives::{Address, FixedBytes}; +use alloy::primitives::{Address, Bytes, FixedBytes}; use rocket::form::{FromForm, FromFormField}; use serde::{Deserialize, Serialize}; use utoipa::{IntoParams, ToSchema}; @@ -46,6 +46,8 @@ pub struct OrderSummary { pub order_hash: FixedBytes<32>, #[schema(value_type = String, example = "0x1234567890abcdef1234567890abcdef12345678")] pub owner: Address, + #[schema(value_type = String, example = "0x01")] + pub order_bytes: Bytes, pub input_token: TokenRef, pub output_token: TokenRef, #[schema(example = "500000")] From 99cd19a7bb58567d2d254e42bf844fb311443780 Mon Sep 17 00:00:00 2001 From: Alastair Ong Date: Wed, 22 Apr 2026 15:59:10 +0100 Subject: [PATCH 2/5] Add GET /v1/trades/token/{address} endpoint for token-filtered trade history Enables the frontend to fetch trades for a specific token instead of all trades globally. Uses GetOrdersTokenFilter to find orders where the token appears as input or output, then batch-fetches trades via DirectTradesFetcher with time-range filtering and pagination. Co-Authored-By: Claude Opus 4.6 --- src/main.rs | 3 + src/routes/trades.rs | 256 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 258 insertions(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 082822e..abd4e3c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -66,6 +66,7 @@ enum StartupError { routes::orders::get_orders_by_token, routes::admin::put_registry, routes::trades::get_trades_by_tx, + routes::trades::get_trades_by_token, routes::trades::get_trades_by_address, routes::trades::get_taker_trades, routes::trades::post_trades_batch, @@ -134,6 +135,7 @@ pub(crate) fn rocket( let trades_by_tx_cache = routes::trades::trades_by_tx_cache(); let trades_by_order_hash_cache = routes::trades::trades_by_order_hash_cache(); let taker_trades_tx_hash_cache = routes::trades::taker_trades_tx_hash_cache(); + let trades_by_token_cache = routes::trades::trades_by_token_cache(); let orders_by_token_cache = routes::orders::orders_by_token_cache(); let orders_by_owner_cache = routes::orders::orders_by_owner_cache(); @@ -145,6 +147,7 @@ pub(crate) fn rocket( .manage(trades_by_tx_cache) .manage(trades_by_order_hash_cache) .manage(taker_trades_tx_hash_cache) + .manage(trades_by_token_cache) .manage(orders_by_token_cache) .manage(orders_by_owner_cache) .manage(direct_trades_fetcher) diff --git a/src/routes/trades.rs b/src/routes/trades.rs index 12282de..2c5d777 100644 --- a/src/routes/trades.rs +++ b/src/routes/trades.rs @@ -14,7 +14,9 @@ use async_trait::async_trait; use futures::future::join_all; use rain_math_float::Float; use rain_orderbook_common::local_db::OrderbookIdentifier; -use rain_orderbook_common::raindex_client::orders::{GetOrdersFilters, RaindexOrder}; +use rain_orderbook_common::raindex_client::orders::{ + GetOrdersFilters, GetOrdersTokenFilter, RaindexOrder, +}; use rain_orderbook_common::raindex_client::trades::RaindexTrade; use rain_orderbook_common::raindex_client::{RaindexClient, RaindexError}; use rocket::serde::json::Json; @@ -59,6 +61,17 @@ pub(crate) fn taker_trades_tx_hash_cache() -> TakerTradesTxHashCache { AppCache::new(TRADES_CACHE_CAPACITY, TAKER_TX_HASH_CACHE_TTL) } +const TRADES_BY_TOKEN_CACHE_TTL: Duration = Duration::from_secs(15); + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct TokenTradesCacheKey(Address, u32, u32, Option, Option); + +type TradesByTokenCache = AppCache; + +pub(crate) fn trades_by_token_cache() -> TradesByTokenCache { + AppCache::new(TRADES_CACHE_CAPACITY, TRADES_BY_TOKEN_CACHE_TTL) +} + #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum TxIndexState { Indexed, @@ -696,6 +709,198 @@ async fn get_cached_trades_by_address( .map_err(ApiError::from) } +async fn process_get_trades_by_token( + ds: &dyn TradesDataSource, + direct_trades: Option<&crate::direct_trades::DirectTradesFetcher>, + token_address: Address, + params: TradesPaginationParams, +) -> Result { + let start = Instant::now(); + + // Find ALL orders (active + inactive) involving this token as input or output + let token_filter = GetOrdersTokenFilter { + inputs: Some(vec![token_address]), + outputs: Some(vec![token_address]), + }; + let all_orders = fetch_all_orders( + ds, + GetOrdersFilters { + tokens: Some(token_filter), + ..Default::default() + }, + ) + .await?; + + let orders_duration_ms = start.elapsed().as_millis() as u64; + tracing::info!( + token = %token_address, + order_count = all_orders.len(), + orders_duration_ms, + "fetched orders for trades-by-token" + ); + + // Reuse the same trade-building logic as trades-by-address + let trades = if let Some(fetcher) = direct_trades { + // Fast path: batch SQLite query via DirectTradesFetcher + let order_hashes: Vec = all_orders.iter().map(|o| o.order_hash()).collect(); + + // Build order_hash → token info lookup + let mut token_map: std::collections::HashMap = + std::collections::HashMap::new(); + for order in &all_orders { + if let Ok((input_vault, output_vault)) = super::resolve_io_vaults(order) { + let input_token_info = input_vault.token(); + let output_token_info = output_vault.token(); + token_map.insert( + order.order_hash(), + ( + TokenRef { + address: input_token_info.address(), + symbol: input_token_info.symbol().unwrap_or_default(), + decimals: input_token_info.decimals(), + }, + TokenRef { + address: output_token_info.address(), + symbol: output_token_info.symbol().unwrap_or_default(), + decimals: output_token_info.decimals(), + }, + ), + ); + } + } + + let batch_start = Instant::now(); + match fetcher.batch_fetch(&order_hashes).await { + Ok(batch_result) => { + let batch_duration_ms = batch_start.elapsed().as_millis() as u64; + tracing::info!( + token = %token_address, + order_count = order_hashes.len(), + batch_duration_ms, + "direct batch trades completed for trades-by-token" + ); + + let mut trades = Vec::new(); + for (order_hash, entries) in &batch_result { + let (input_token, output_token) = + token_map.get(order_hash).cloned().unwrap_or_else(|| { + ( + TokenRef { + address: Address::ZERO, + symbol: String::new(), + decimals: 0, + }, + TokenRef { + address: Address::ZERO, + symbol: String::new(), + decimals: 0, + }, + ) + }); + + for entry in entries { + if let Some(start_time) = params.start_time { + if entry.timestamp < start_time { + continue; + } + } + if let Some(end_time) = params.end_time { + if entry.timestamp > end_time { + continue; + } + } + + trades.push(TradeByAddress { + tx_hash: entry.tx_hash, + input_amount: entry.input_amount.clone(), + output_amount: entry.output_amount.clone(), + input_token: input_token.clone(), + output_token: output_token.clone(), + order_hash: Some(*order_hash), + timestamp: entry.timestamp, + block_number: 0, + }); + } + } + trades + } + Err(e) => { + tracing::warn!( + error = %e, + token = %token_address, + "direct batch trades failed for trades-by-token; falling back to library" + ); + build_trades_from_library(ds, &all_orders, ¶ms).await? + } + } + } else { + build_trades_from_library(ds, &all_orders, ¶ms).await? + }; + + let mut trades = trades; + trades.sort_by_key(|t| (Reverse(t.timestamp), Reverse(t.block_number))); + + let page = params.page.unwrap_or(1); + let page_size = params.page_size.unwrap_or(20); + let total_trades = trades.len() as u64; + let total_pages = if page_size == 0 { + 0 + } else { + total_trades.div_ceil(u64::from(page_size)) + }; + + let offset = (u64::from(page.saturating_sub(1)) * u64::from(page_size)) as usize; + let paginated = if offset >= trades.len() { + Vec::new() + } else { + let end = std::cmp::min(offset + page_size as usize, trades.len()); + trades[offset..end].to_vec() + }; + + tracing::info!( + token = %token_address, + page, + page_size, + total_trades, + returned_trades = paginated.len(), + total_duration_ms = start.elapsed().as_millis() as u64, + "resolved trades by token" + ); + + Ok(TradesByAddressResponse { + trades: paginated, + pagination: TradesPagination { + page, + page_size, + total_trades, + total_pages, + has_more: u64::from(page) < total_pages, + }, + }) +} + +async fn get_cached_trades_by_token( + cache: &TradesByTokenCache, + ds: &dyn TradesDataSource, + direct_trades: Option<&crate::direct_trades::DirectTradesFetcher>, + token_address: Address, + params: TradesPaginationParams, +) -> Result { + let cache_key = TokenTradesCacheKey( + token_address, + params.page.unwrap_or(1), + params.page_size.unwrap_or(20), + params.start_time, + params.end_time, + ); + cache + .get_or_try_insert(cache_key, || async move { + process_get_trades_by_token(ds, direct_trades, token_address, params).await + }) + .await + .map_err(ApiError::from) +} + async fn process_get_taker_trades( ds: &dyn TradesDataSource, direct_trades: Option<&crate::direct_trades::DirectTradesFetcher>, @@ -855,6 +1060,54 @@ pub async fn get_trades_by_tx( .await } +#[utoipa::path( + get, + path = "/v1/trades/token/{address}", + tag = "Trades", + security(("basicAuth" = [])), + params( + ("address" = String, Path, description = "Token address"), + TradesPaginationParams, + ), + responses( + (status = 200, description = "Paginated list of trades for token", body = TradesByAddressResponse), + (status = 400, description = "Bad request", body = ApiErrorResponse), + (status = 401, description = "Unauthorized", body = ApiErrorResponse), + (status = 429, description = "Rate limited", body = ApiErrorResponse), + (status = 500, description = "Internal server error", body = ApiErrorResponse), + ) +)] +#[get("/token/
?")] +pub async fn get_trades_by_token( + _global: GlobalRateLimit, + _key: AuthenticatedKey, + shared_raindex: &State, + trades_by_token_cache: &State, + direct_trades: &State>, + span: TracingSpan, + address: ValidatedAddress, + params: TradesPaginationParams, +) -> Result, ApiError> { + async move { + tracing::info!(address = ?address, params = ?params, "trades by token request received"); + let raindex = shared_raindex.read().await; + let ds = RaindexTradesDataSource { + client: raindex.client(), + }; + let response = get_cached_trades_by_token( + trades_by_token_cache, + &ds, + direct_trades.inner().as_ref(), + address.0, + params, + ) + .await?; + Ok(Json(response)) + } + .instrument(span.0) + .await +} + #[utoipa::path( get, path = "/v1/trades/{address}", @@ -1118,6 +1371,7 @@ pub async fn post_trades_batch( pub fn routes() -> Vec { rocket::routes![ + get_trades_by_token, get_trades_by_tx, get_taker_trades, get_trades_by_address, From b3dba2fdbe32da971aad88bfcb53d7cf1b956b73 Mon Sep 17 00:00:00 2001 From: Alastair Ong Date: Wed, 22 Apr 2026 17:13:34 +0100 Subject: [PATCH 3/5] Return simulated maxOutput from quote in order summaries Add max_output field to OrderSummary populated from the on-chain quote simulation's formattedMaxOutput. Falls back to output_vault_balance when no quote is available. This allows the frontend depth chart to show accurate per-epoch availability for DCA/strategy orders instead of the full vault balance. Co-Authored-By: Claude Opus 4.6 --- src/routes/orders/get_by_owner.rs | 23 ++++++++++++---- src/routes/orders/get_by_token.rs | 23 ++++++++++++---- src/routes/orders/mod.rs | 46 ++++++++++++++++++++++--------- src/types/orders.rs | 4 +++ 4 files changed, 71 insertions(+), 25 deletions(-) diff --git a/src/routes/orders/get_by_owner.rs b/src/routes/orders/get_by_owner.rs index d5c3a2c..8246023 100644 --- a/src/routes/orders/get_by_owner.rs +++ b/src/routes/orders/get_by_owner.rs @@ -1,6 +1,6 @@ use super::{ - build_order_summary, build_pagination, OrdersListDataSource, RaindexOrdersListDataSource, - DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE, + build_order_summary, build_pagination, extract_quote_fields, OrdersListDataSource, + QuoteFields, RaindexOrdersListDataSource, DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE, }; use crate::auth::AuthenticatedKey; use crate::cache::AppCache; @@ -78,14 +78,25 @@ pub(crate) async fn process_get_orders_by_owner( let quotes_stage_duration_ms = quotes_stage_start.elapsed().as_millis(); // Map quote results back to original order positions - let mut io_ratios: Vec = vec!["-".into(); orders.len()]; + let mut quote_fields: Vec = (0..orders.len()) + .map(|_| QuoteFields { + io_ratio: "-".into(), + max_output: None, + }) + .collect(); for (qi, &original_idx) in quotable_indices.iter().enumerate() { - io_ratios[original_idx] = super::quote_result_to_io_ratio(&orders[original_idx], quote_results.get(qi).cloned().unwrap_or_else(|| Err(ApiError::Internal("missing quote".into())))); + quote_fields[original_idx] = extract_quote_fields( + &orders[original_idx], + quote_results + .get(qi) + .cloned() + .unwrap_or_else(|| Err(ApiError::Internal("missing quote".into()))), + ); } let mut summaries = Vec::with_capacity(orders.len()); - for (order, io_ratio) in orders.iter().zip(io_ratios.iter()) { - summaries.push(build_order_summary(order, io_ratio)?); + for (order, fields) in orders.iter().zip(quote_fields.iter()) { + summaries.push(build_order_summary(order, fields)?); } let pagination = build_pagination(total_count, page_num.into(), effective_page_size.into()); diff --git a/src/routes/orders/get_by_token.rs b/src/routes/orders/get_by_token.rs index addf858..01add0b 100644 --- a/src/routes/orders/get_by_token.rs +++ b/src/routes/orders/get_by_token.rs @@ -1,6 +1,6 @@ use super::{ - build_order_summary, build_pagination, OrdersListDataSource, RaindexOrdersListDataSource, - DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE, + build_order_summary, build_pagination, extract_quote_fields, OrdersListDataSource, + QuoteFields, RaindexOrdersListDataSource, DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE, }; use crate::auth::AuthenticatedKey; use crate::cache::AppCache; @@ -96,14 +96,25 @@ pub(crate) async fn process_get_orders_by_token( let quotes_stage_duration_ms = quotes_stage_start.elapsed().as_millis(); // Map quote results back to original order positions - let mut io_ratios: Vec = vec!["-".into(); orders.len()]; + let mut quote_fields: Vec = (0..orders.len()) + .map(|_| QuoteFields { + io_ratio: "-".into(), + max_output: None, + }) + .collect(); for (qi, &original_idx) in quotable_indices.iter().enumerate() { - io_ratios[original_idx] = super::quote_result_to_io_ratio(&orders[original_idx], quote_results.get(qi).cloned().unwrap_or_else(|| Err(ApiError::Internal("missing quote".into())))); + quote_fields[original_idx] = extract_quote_fields( + &orders[original_idx], + quote_results + .get(qi) + .cloned() + .unwrap_or_else(|| Err(ApiError::Internal("missing quote".into()))), + ); } let mut summaries = Vec::with_capacity(orders.len()); - for (order, io_ratio) in orders.iter().zip(io_ratios.iter()) { - summaries.push(build_order_summary(order, io_ratio)?); + for (order, fields) in orders.iter().zip(quote_fields.iter()) { + summaries.push(build_order_summary(order, fields)?); } let pagination = build_pagination(total_count, page_num.into(), effective_page_size.into()); diff --git a/src/routes/orders/mod.rs b/src/routes/orders/mod.rs index 3260a51..1a5d442 100644 --- a/src/routes/orders/mod.rs +++ b/src/routes/orders/mod.rs @@ -223,15 +223,27 @@ impl<'a> OrdersListDataSource for RaindexOrdersListDataSource<'a> { } } +/// Extracted quote fields for building order summaries. +pub(crate) struct QuoteFields { + pub io_ratio: String, + /// Simulated max output from on-chain quote. None when quote failed or unavailable. + pub max_output: Option, +} + pub(crate) fn build_order_summary( order: &RaindexOrder, - io_ratio: &str, + quote: &QuoteFields, ) -> Result { let (input, output) = super::resolve_io_vaults(order)?; let input_token_info = input.token(); let output_token_info = output.token(); let created_at: u64 = order.timestamp_added().try_into().unwrap_or(0); + let vault_balance = output.formatted_balance(); + let max_output = quote + .max_output + .clone() + .unwrap_or_else(|| vault_balance.clone()); Ok(OrderSummary { order_hash: order.order_hash(), @@ -247,30 +259,38 @@ pub(crate) fn build_order_summary( symbol: output_token_info.symbol().unwrap_or_default(), decimals: output_token_info.decimals(), }, - output_vault_balance: output.formatted_balance(), - io_ratio: io_ratio.to_string(), + output_vault_balance: vault_balance, + max_output, + io_ratio: quote.io_ratio.clone(), created_at, orderbook_id: order.orderbook(), }) } -pub(crate) fn quote_result_to_io_ratio( +pub(crate) fn extract_quote_fields( order: &RaindexOrder, quotes_result: OrderQuoteResult, -) -> String { +) -> QuoteFields { match quotes_result { - Ok(quotes) => quotes - .first() - .and_then(|quote| quote.data.as_ref()) - .map(|quote| quote.formatted_ratio.clone()) - .unwrap_or_else(|| "-".into()), + Ok(quotes) => { + let data = quotes.first().and_then(|quote| quote.data.as_ref()); + QuoteFields { + io_ratio: data + .map(|d| d.formatted_ratio.clone()) + .unwrap_or_else(|| "-".into()), + max_output: data.map(|d| d.formatted_max_output.clone()), + } + } Err(err) => { tracing::warn!( order_hash = ?order.order_hash(), error = ?err, "quote fetch failed; using fallback io_ratio" ); - "-".into() + QuoteFields { + io_ratio: "-".into(), + max_output: None, + } } } } @@ -309,8 +329,8 @@ pub(crate) fn build_orders_list_response( let mut summaries = Vec::with_capacity(orders.len()); for (order, quotes_result) in orders.iter().zip(quote_results) { - let io_ratio = quote_result_to_io_ratio(order, quotes_result); - summaries.push(build_order_summary(order, &io_ratio)?); + let quote = extract_quote_fields(order, quotes_result); + summaries.push(build_order_summary(order, "e)?); } Ok(OrdersListResponse { diff --git a/src/types/orders.rs b/src/types/orders.rs index 194df6d..c27ff94 100644 --- a/src/types/orders.rs +++ b/src/types/orders.rs @@ -52,6 +52,10 @@ pub struct OrderSummary { pub output_token: TokenRef, #[schema(example = "500000")] pub output_vault_balance: String, + /// Simulated max output from on-chain quote (smaller than vault balance for DCA/strategy orders). + /// Falls back to output_vault_balance when no quote is available. + #[schema(example = "100")] + pub max_output: String, #[schema(example = "0.0005")] pub io_ratio: String, #[schema(example = 1718452800)] From 7a279a9165e0b04aa8886ec10344ce3c156866c1 Mon Sep 17 00:00:00 2001 From: Alastair Ong Date: Thu, 23 Apr 2026 11:01:36 +0100 Subject: [PATCH 4/5] Add detailed health endpoint with raindex sync status reporting The existing /health endpoint only returns {"status":"ok"} without checking actual system health. The new GET /health/detailed endpoint reports app DB connectivity, raindex DB connectivity, and per-orderbook sync progress (last synced block, updated_at timestamp, latest trade age). This enables diagnosing sync stalls like the Apr 17 stoppage. Co-Authored-By: Claude Opus 4.6 --- src/main.rs | 1 + src/routes/health.rs | 312 ++++++++++++++++++++++++++++++++++++++++++- src/types/health.rs | 70 ++++++++++ 3 files changed, 380 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index abd4e3c..4413d56 100644 --- a/src/main.rs +++ b/src/main.rs @@ -54,6 +54,7 @@ enum StartupError { #[openapi( paths( routes::health::get_health, + routes::health::get_health_detailed, routes::tokens::get_tokens, routes::swap::post_swap_quote, routes::swap::post_swap_calldata, diff --git a/src/routes/health.rs b/src/routes/health.rs index d08d2ee..ed89ea6 100644 --- a/src/routes/health.rs +++ b/src/routes/health.rs @@ -1,8 +1,13 @@ +use crate::db::DbPool; use crate::error::ApiError; use crate::fairings::TracingSpan; -use crate::types::health::HealthResponse; +use crate::raindex::SharedRaindexProvider; +use crate::types::health::{ + DbStatus, DetailedHealthResponse, HealthResponse, OrderbookSyncInfo, RaindexDbStatus, +}; use rocket::serde::json::Json; -use rocket::Route; +use rocket::{Route, State}; +use tokio::task::spawn_blocking; use tracing::Instrument; #[utoipa::path( @@ -25,6 +30,307 @@ pub async fn get_health(span: TracingSpan) -> Result, ApiEr .await } +#[utoipa::path( + get, + path = "/health/detailed", + tag = "Health", + responses( + (status = 200, description = "Detailed service health including sync status", body = DetailedHealthResponse), + ) +)] +#[get("/health/detailed")] +pub async fn get_health_detailed( + span: TracingSpan, + pool: &State, + shared_raindex: &State, +) -> Result, ApiError> { + async move { + tracing::info!("detailed health check request received"); + + // 1. Check app database connectivity + let app_db = check_app_db(pool).await; + + // 2. Check raindex database and sync status + let raindex_db = check_raindex_db(shared_raindex).await; + + // 3. Determine overall status + let status = if app_db.connected && raindex_db.connected && !raindex_db.orderbooks.is_empty() + { + "ok".to_string() + } else if app_db.connected || raindex_db.connected { + "degraded".to_string() + } else { + "error".to_string() + }; + + Ok(Json(DetailedHealthResponse { + status, + app_db, + raindex_db, + })) + } + .instrument(span.0) + .await +} + +async fn check_app_db(pool: &DbPool) -> DbStatus { + match sqlx::query("SELECT 1").execute(pool).await { + Ok(_) => DbStatus { + connected: true, + error: None, + }, + Err(e) => { + tracing::warn!(error = %e, "app database health check failed"); + DbStatus { + connected: false, + error: Some(e.to_string()), + } + } + } +} + +async fn check_raindex_db(shared_raindex: &SharedRaindexProvider) -> RaindexDbStatus { + let raindex = shared_raindex.read().await; + + let db_path = match raindex.db_path() { + Some(path) => path, + None => { + return RaindexDbStatus { + connected: false, + error: Some("no local db path configured".into()), + db_path: None, + orderbooks: vec![], + }; + } + }; + + let db_path_str = db_path.display().to_string(); + + if !db_path.exists() { + return RaindexDbStatus { + connected: false, + error: Some("raindex database file does not exist".into()), + db_path: Some(db_path_str), + orderbooks: vec![], + }; + } + + // Get all configured orderbooks to know which chain_id + address combos to query + let orderbooks = match raindex.client().get_all_orderbooks() { + Ok(obs) => obs, + Err(e) => { + tracing::warn!(error = %e, "failed to get orderbooks from raindex config"); + return RaindexDbStatus { + connected: false, + error: Some(format!("failed to read orderbook config: {e}")), + db_path: Some(db_path_str), + orderbooks: vec![], + }; + } + }; + + // Open a read-only connection to raindex.db and query sync_status + latest trades + let db_path_clone = db_path.clone(); + let orderbook_configs: Vec<(u32, String)> = orderbooks + .values() + .map(|ob| (ob.network.chain_id, format!("{:#x}", ob.address))) + .collect(); + + let query_result = spawn_blocking(move || { + query_raindex_sync_status(&db_path_clone, &orderbook_configs) + }) + .await; + + match query_result { + Ok(Ok(orderbook_infos)) => RaindexDbStatus { + connected: true, + error: None, + db_path: Some(db_path_str), + orderbooks: orderbook_infos, + }, + Ok(Err(e)) => { + tracing::warn!(error = %e, "raindex db sync status query failed"); + RaindexDbStatus { + connected: false, + error: Some(e), + db_path: Some(db_path_str), + orderbooks: vec![], + } + } + Err(e) => { + tracing::warn!(error = %e, "raindex db query task panicked"); + RaindexDbStatus { + connected: false, + error: Some("query task failed".into()), + db_path: Some(db_path_str), + orderbooks: vec![], + } + } + } +} + +fn query_raindex_sync_status( + db_path: &std::path::Path, + orderbook_configs: &[(u32, String)], +) -> Result, String> { + let conn = rusqlite::Connection::open_with_flags( + db_path, + rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY | rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX, + ) + .map_err(|e| format!("failed to open raindex db: {e}"))?; + + conn.busy_timeout(std::time::Duration::from_secs(2)) + .map_err(|e| format!("failed to set busy_timeout: {e}"))?; + + let mut results = Vec::new(); + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + for (chain_id, ob_address) in orderbook_configs { + let mut info = OrderbookSyncInfo { + chain_id: *chain_id, + orderbook_address: ob_address.clone(), + last_synced_block: 0, + updated_at: None, + latest_trade_timestamp: None, + latest_trade_age: None, + }; + + // Query sync_status table + match conn.query_row( + "SELECT last_synced_block, updated_at FROM sync_status WHERE chain_id = ?1 AND orderbook_address = ?2", + rusqlite::params![*chain_id as i64, ob_address], + |row| { + let block: i64 = row.get(0)?; + let updated_at: Option = row.get(1)?; + Ok((block, updated_at)) + }, + ) { + Ok((block, updated_at)) => { + info.last_synced_block = block as u64; + info.updated_at = updated_at; + } + Err(rusqlite::Error::QueryReturnedNoRows) => { + // sync_status row doesn't exist yet — sync hasn't started + tracing::info!( + chain_id = chain_id, + orderbook = %ob_address, + "no sync_status row found" + ); + } + Err(e) => { + tracing::warn!( + error = %e, + chain_id = chain_id, + orderbook = %ob_address, + "failed to query sync_status" + ); + } + } + + // Query latest trade timestamp from take_orders table + match conn.query_row( + "SELECT MAX(block_timestamp) FROM take_orders WHERE chain_id = ?1 AND orderbook_address = ?2", + rusqlite::params![*chain_id as i64, ob_address], + |row| { + let ts: Option = row.get(0)?; + Ok(ts) + }, + ) { + Ok(Some(ts)) if ts > 0 => { + let ts_u64 = ts as u64; + info.latest_trade_timestamp = Some(ts_u64); + info.latest_trade_age = Some(format_age(now, ts_u64)); + } + Ok(_) => {} + Err(e) => { + tracing::warn!( + error = %e, + chain_id = chain_id, + "failed to query latest trade timestamp" + ); + } + } + + results.push(info); + } + + Ok(results) +} + +fn format_age(now_secs: u64, timestamp_secs: u64) -> String { + if timestamp_secs > now_secs { + return "just now".to_string(); + } + + let diff = now_secs - timestamp_secs; + + if diff < 60 { + format!("{diff}s ago") + } else if diff < 3600 { + let minutes = diff / 60; + format!("{minutes}m ago") + } else if diff < 86400 { + let hours = diff / 3600; + let minutes = (diff % 3600) / 60; + if minutes > 0 { + format!("{hours}h {minutes}m ago") + } else { + format!("{hours}h ago") + } + } else { + let days = diff / 86400; + let hours = (diff % 86400) / 3600; + if hours > 0 { + format!("{days}d {hours}h ago") + } else { + format!("{days}d ago") + } + } +} + pub fn routes() -> Vec { - rocket::routes![get_health] + rocket::routes![get_health, get_health_detailed] +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_format_age_seconds() { + assert_eq!(format_age(1000, 970), "30s ago"); + } + + #[test] + fn test_format_age_minutes() { + assert_eq!(format_age(1000, 700), "5m ago"); + } + + #[test] + fn test_format_age_hours_and_minutes() { + assert_eq!(format_age(10000, 2200), "2h 10m ago"); + } + + #[test] + fn test_format_age_days_and_hours() { + assert_eq!(format_age(200000, 10000), "2d 4h ago"); + } + + #[test] + fn test_format_age_future_timestamp() { + assert_eq!(format_age(1000, 2000), "just now"); + } + + #[test] + fn test_format_age_exact_hour() { + assert_eq!(format_age(3600, 0), "1h ago"); + } + + #[test] + fn test_format_age_exact_day() { + assert_eq!(format_age(86400, 0), "1d ago"); + } } diff --git a/src/types/health.rs b/src/types/health.rs index a2d66f1..3e2c50f 100644 --- a/src/types/health.rs +++ b/src/types/health.rs @@ -6,3 +6,73 @@ pub struct HealthResponse { #[schema(example = "ok")] pub status: String, } + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct DetailedHealthResponse { + /// Overall API status: "ok", "degraded", or "error" + #[schema(example = "ok")] + pub status: String, + + /// st0x application database connectivity + pub app_db: DbStatus, + + /// raindex local database connectivity and sync status + pub raindex_db: RaindexDbStatus, +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct DbStatus { + /// Whether the database is reachable + #[schema(example = true)] + pub connected: bool, + + /// Error message if not connected + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct RaindexDbStatus { + /// Whether the raindex database file exists and is readable + #[schema(example = true)] + pub connected: bool, + + /// Error message if not connected + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + + /// Path to the raindex database file + #[serde(skip_serializing_if = "Option::is_none")] + pub db_path: Option, + + /// Per-orderbook sync status from the sync_status table + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub orderbooks: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct OrderbookSyncInfo { + /// Chain ID (e.g. 8453 for Base) + #[schema(example = 8453)] + pub chain_id: u32, + + /// Orderbook contract address + #[schema(example = "0xd2938e7c9fe3597f78832ce780feb61945c377d7")] + pub orderbook_address: String, + + /// Last block number synced by raindex + #[schema(example = 12345678)] + pub last_synced_block: u64, + + /// Timestamp when sync_status was last updated (ISO 8601) + #[serde(skip_serializing_if = "Option::is_none")] + pub updated_at: Option, + + /// Timestamp of the most recent trade in the database + #[serde(skip_serializing_if = "Option::is_none")] + pub latest_trade_timestamp: Option, + + /// Human-readable age of the latest trade (e.g. "2h 15m ago") + #[serde(skip_serializing_if = "Option::is_none")] + pub latest_trade_age: Option, +} From e77b2a11fb46d5511aa95bb1410fc9034cc0bed3 Mon Sep 17 00:00:00 2001 From: Alastair Ong Date: Tue, 28 Apr 2026 20:24:50 +0100 Subject: [PATCH 5/5] Snapshot: deployed preview state to be split into per-topic PRs This commit captures the working-tree state currently deployed to api.preview.st0x.io, prior to splitting it into single-topic PRs. Includes: - cache_warmer + supporting caches (block_number, limit_ratio, stale_price_skip, swap_quote) - market_calendar - ops tooling (docs/ops.md, scripts/smoke.sh, uptimerobot-setup.sh) - RPC override config - direct_trades_fetcher extensions - Health detailed endpoint additions for cache_warmer status - Various route + infra adjustments NOTE: lib/rain.orderbook submodule has local modifications that are not committed inside the submodule itself; the submodule pointer in this commit is unchanged. Those upstream changes need separate handling before any per-topic PR can build cleanly against main. Co-Authored-By: Claude Opus 4.7 (1M context) --- .gitignore | 1 + Cargo.lock | 38 +- Cargo.toml | 2 + config/rest-api.toml | 16 + docs/ops.md | 92 ++++ flake.nix | 8 + os.nix | 9 +- scripts/smoke.sh | 145 +++++++ scripts/uptimerobot-setup.sh | 108 +++++ src/cache.rs | 2 + src/cache_warmer.rs | 132 ++++++ src/config.rs | 20 + src/db/pool.rs | 2 +- src/direct_trades.rs | 339 +++++++++++++-- src/main.rs | 53 ++- src/market_calendar.rs | 93 ++++ src/raindex/block_cache.rs | 199 +++++++++ src/raindex/config.rs | 209 ++++++++- src/raindex/mod.rs | 4 +- src/routes/admin.rs | 6 +- src/routes/health.rs | 109 ++++- src/routes/orders/get_by_owner.rs | 26 +- src/routes/orders/get_by_token.rs | 26 +- src/routes/orders/limit_cache.rs | 100 +++++ src/routes/orders/mod.rs | 594 +++++++++++++++++++++++++- src/routes/orders/stale_price_skip.rs | 59 +++ src/routes/swap/mod.rs | 17 +- src/routes/swap/quote.rs | 85 +++- src/routes/tokens.rs | 20 +- src/routes/trades.rs | 150 ++++++- src/test_helpers.rs | 28 +- src/types/health.rs | 36 ++ 32 files changed, 2605 insertions(+), 123 deletions(-) create mode 100644 docs/ops.md create mode 100755 scripts/smoke.sh create mode 100755 scripts/uptimerobot-setup.sh create mode 100644 src/cache_warmer.rs create mode 100644 src/market_calendar.rs create mode 100644 src/raindex/block_cache.rs create mode 100644 src/routes/orders/limit_cache.rs create mode 100644 src/routes/orders/stale_price_skip.rs diff --git a/.gitignore b/.gitignore index 7ae1d6a..ab9480f 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ logs/ data/* !data/.gitkeep docs/book/ +.claude/ diff --git a/Cargo.lock b/Cargo.lock index 01cdca2..75350c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1967,6 +1967,16 @@ dependencies = [ "windows-link", ] +[[package]] +name = "chrono-tz" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6139a8597ed92cf816dfb33f5dd6cf0bb93a6adc938f11039f371bc5bcd26c3" +dependencies = [ + "chrono", + "phf 0.12.1", +] + [[package]] name = "ciborium" version = "0.2.2" @@ -6049,17 +6059,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ "phf_macros", - "phf_shared", + "phf_shared 0.11.3", "serde", ] +[[package]] +name = "phf" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7" +dependencies = [ + "phf_shared 0.12.1", +] + [[package]] name = "phf_generator" version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared", + "phf_shared 0.11.3", "rand 0.8.5", ] @@ -6070,7 +6089,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" dependencies = [ "phf_generator", - "phf_shared", + "phf_shared 0.11.3", "proc-macro2", "quote", "syn 2.0.104", @@ -6085,6 +6104,15 @@ dependencies = [ "siphasher", ] +[[package]] +name = "phf_shared" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.1.10" @@ -7172,7 +7200,7 @@ checksum = "942fe4724cf552fd28db6b0a2ca5b79e884d40dd8288a4027ed1e9090e0c6f49" dependencies = [ "bitvec", "once_cell", - "phf", + "phf 0.11.3", "revm-primitives", "serde", ] @@ -8960,6 +8988,8 @@ dependencies = [ "argon2", "async-trait", "base64 0.22.1", + "chrono", + "chrono-tz", "clap", "futures", "moka", diff --git a/Cargo.toml b/Cargo.toml index 7b50c84..a64ef54 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,8 @@ rain-math-float = { path = "lib/rain.orderbook/lib/rain.interpreter/lib/rain.int wasm-bindgen = "=0.2.100" moka = { version = "0.12", features = ["future"] } rusqlite = { version = "0.32" } +chrono = "0.4" +chrono-tz = "0.10" [dev-dependencies] tracing-test = "0.2" diff --git a/config/rest-api.toml b/config/rest-api.toml index 0c67a0d..b8d0387 100644 --- a/config/rest-api.toml +++ b/config/rest-api.toml @@ -5,3 +5,19 @@ rate_limit_global_rpm = 600 rate_limit_per_key_rpm = 60 docs_dir = "/var/lib/st0x-docs" local_db_path = "/mnt/data/st0x-rest-api/raindex.db" + +# Replace the registry's single-URL `rpcs:` list with a pool of public Base +# RPCs. alloy's FallbackLayer (active_transport_count = 1, see +# `mk_read_provider`) health-routes to the best-scored transport and demotes +# any that 429 or error. +[rpc_override] +base = [ + "https://mainnet.base.org", + "https://base.llamarpc.com", + "https://base.drpc.org", + "https://base-rpc.publicnode.com", + "https://base.meowrpc.com", + "https://base-mainnet.public.blastapi.io", + "https://base.gateway.tenderly.co", + "https://base.rpc.subquery.network/public", +] diff --git a/docs/ops.md b/docs/ops.md new file mode 100644 index 0000000..ee817eb --- /dev/null +++ b/docs/ops.md @@ -0,0 +1,92 @@ +# Operations cheat sheet + +Quick journalctl + curl recipes for the deployed `rest-api` service. SSH in with `nix develop -c remote` (or `ssh root@` if your key is in `roles.ssh`). + +## Service health + +```bash +# Quick liveness probe (no auth) +curl -sS https://api.preview.st0x.io/health | jq + +# Full status — includes db connectivity, raindex sync, cache_warmer +curl -sS https://api.preview.st0x.io/health/detailed | jq +``` + +Key fields in `/health/detailed.cache_warmer`: +- `running` — `false` until the warmer completes its first cycle (~15-30s after restart while caches are cold) +- `last_cycle_ms` — should track the steady-state cycle duration; sustained > 10s suggests upstream RPC slowness +- `seconds_since_last_complete` — should bounce between `0` and `~20` (cycle duration + REFRESH_INTERVAL); much higher means the warmer has frozen +- `last_errors` — per-token failures during the last cycle; non-zero is worth investigating + +## Common journalctl queries + +All queries run via `ssh root@api.preview.st0x.io '...'` or after `nix develop -c remote`. + +### 429 rate + +```bash +# Count in the last hour +journalctl -u rest-api --since '1 hour ago' --no-pager | grep -c 'error code 429' + +# Per-RPC breakdown (when the backing RPC is identifiable from the error body) +journalctl -u rest-api --since '1 hour ago' --no-pager \ + | grep -oE 'error code -32016|error code 429|StalePrice' \ + | sort | uniq -c +``` + +### Cache warmer cycles + +```bash +# Last 10 cycle durations + completion timestamps +journalctl -u rest-api --since '10 minutes ago' --no-pager \ + | grep 'cache warmer: orders-by-token refresh complete' \ + | sed -E 's/.*timestamp":"([^"]+)".*duration_ms":"?([0-9]+)"?.*/\1 cycle_ms=\2/' \ + | tail -10 +``` + +### ERROR-level rate + +```bash +journalctl -u rest-api --since '5 minutes ago' --no-pager \ + | grep -c 'level":"ERROR' +``` + +Most ERROR lines are benign (`No matching routes for HEAD /health` from external uptime checkers, or `task NNNN was cancelled` during graceful restart). Real signal: +- `failed to query orders` outside a deploy window +- `applied RPC override` should appear once on startup with the expected `url_count` + +### Slow requests + +```bash +# Requests > 5s in the last hour (raw rocket access logs) +journalctl -u rest-api --since '1 hour ago' --no-pager \ + | grep 'request completed' \ + | grep -oE 'duration_ms":[0-9]+\.[0-9]+' \ + | awk -F: '$2 > 5000 { print }' \ + | wc -l +``` + +## Smoke tests + +```bash +# Run the smoke battery against the live preview +API_KEY= API_SECRET= ./scripts/smoke.sh + +# Override target +API_URL=https://api.st0x.io API_KEY=... API_SECRET=... ./scripts/smoke.sh +``` + +The script returns non-zero on any FAIL. Run post-deploy or wire into a cron + alert. SLOW (over `LATENCY_BUDGET_MS=3000`) is reported as a warning, not a failure. + +## Suggested cron / external monitoring + +A minimal external probe (run from any machine that can reach the public hostname): + +```bash +# Run every 5 minutes; alert on non-zero exit or 502/503 in the body +*/5 * * * * cd /path/to/st0x.rest.api && \ + API_KEY=... API_SECRET=... ./scripts/smoke.sh > /tmp/smoke.last 2>&1 || \ + alert-channel "smoke failed: $(tail -5 /tmp/smoke.last)" +``` + +Higher-fidelity options (Prometheus + Grafana, Datadog, etc.) are deferred — the smoke + journalctl recipes cover most regressions for a single-instance preview. diff --git a/flake.nix b/flake.nix index 417ed40..5ce492e 100644 --- a/flake.nix +++ b/flake.nix @@ -25,6 +25,14 @@ specialArgs = { docsRoot = self.packages.x86_64-linux.st0x-docs; + # Public hostname this box answers on. Drives the nginx vhost + # name and the ACME cert. Defaults to `api.st0x.io` for prod; + # override with `SITE_HOSTNAME` env var for preview / staging + # deploys (e.g. `SITE_HOSTNAME=api.preview.st0x.io`). Requires + # `--impure` (already passed by the deploy wrappers). + siteHostname = + let env = builtins.getEnv "SITE_HOSTNAME"; + in if env == "" then "api.st0x.io" else env; }; modules = diff --git a/os.nix b/os.nix index 0f41a3e..09e3498 100644 --- a/os.nix +++ b/os.nix @@ -1,4 +1,4 @@ -{ pkgs, lib, modulesPath, docsRoot, ... }: +{ pkgs, lib, modulesPath, docsRoot, siteHostname, ... }: let inherit (import ./keys.nix) roles; @@ -105,9 +105,14 @@ in { # Rate-limit zone: 10 req/s per IP, burst 20 appendHttpConfig = '' limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s; + + # UptimeRobot's keyword monitor operates on the raw response bytes + # without decompressing, so a gzip'd JSON response causes false + # "Keyword Not Found" alarms. Send uncompressed bodies to UR only. + gzip_disable "UptimeRobot"; ''; - virtualHosts."api.st0x.io" = { + virtualHosts."${siteHostname}" = { enableACME = true; forceSSL = true; diff --git a/scripts/smoke.sh b/scripts/smoke.sh new file mode 100755 index 0000000..43540ac --- /dev/null +++ b/scripts/smoke.sh @@ -0,0 +1,145 @@ +#!/usr/bin/env bash +# smoke.sh — End-to-end correctness + latency smoke tests against a deployed +# st0x-rest-api instance. Designed to be run post-deploy or on a cron. +# +# Usage: +# API_URL=https://api.preview.st0x.io \ +# API_KEY= API_SECRET= \ +# ./scripts/smoke.sh +# +# Exits 0 if all checks pass, non-zero otherwise. Prints a summary with +# per-check status + latency. Uses only curl + jq. + +set -uo pipefail + +API_URL="${API_URL:-https://api.preview.st0x.io}" +API_KEY="${API_KEY:-}" +API_SECRET="${API_SECRET:-}" + +# Tokens to probe. Override via env if the registry changes. +USDC_BASE="${SMOKE_USDC:-0x833589fcd6edb6e08f4c7c32d4f71b54bda02913}" +SAMPLE_OWNER="${SMOKE_OWNER:-0x71b94911fd1ce621fc40970450004c544e5287a8}" + +# Latency budget per endpoint, in ms. Failures over budget are warnings, not +# hard failures, so a flaky network doesn't sink CI; tune if real regressions +# slip through. +LATENCY_BUDGET_MS="${LATENCY_BUDGET_MS:-3000}" + +PASS=0 +FAIL=0 +WARN=0 + +color() { + case "$1" in + green) printf '\033[32m%s\033[0m' "$2" ;; + red) printf '\033[31m%s\033[0m' "$2" ;; + yellow) printf '\033[33m%s\033[0m' "$2" ;; + *) printf '%s' "$2" ;; + esac +} + +# probe NAME METHOD PATH EXPECTED_STATUS [JQ_FILTER] +# The optional JQ_FILTER must produce a non-null, non-empty value for the +# check to pass — used to assert on response shape, not just status code. +probe() { + local name="$1" + local method="$2" + local path="$3" + local expected_status="$4" + local jq_filter="${5:-}" + local auth_header="" + if [[ -n "$API_KEY" && -n "$API_SECRET" ]]; then + auth_header="-u $API_KEY:$API_SECRET" + fi + + local tmp + tmp=$(mktemp) + # shellcheck disable=SC2086 + local result + result=$(curl -sS -X "$method" $auth_header \ + -o "$tmp" \ + -w '%{http_code} %{time_total}\n' \ + --max-time 30 \ + "$API_URL$path" 2>&1) || true + + local status time_s + status=$(echo "$result" | awk '{print $1}') + time_s=$(echo "$result" | awk '{print $2}') + local time_ms + time_ms=$(awk -v t="$time_s" 'BEGIN { printf "%d", t * 1000 }') + + local check_status="FAIL" + local detail="" + + if [[ "$status" == "$expected_status" ]]; then + if [[ -n "$jq_filter" ]]; then + if jq -e "$jq_filter" >/dev/null 2>&1 < "$tmp"; then + check_status="PASS" + else + check_status="FAIL" + detail="(shape mismatch)" + fi + else + check_status="PASS" + fi + else + body=$(head -c 200 "$tmp") + detail="(got $status, body: $body)" + fi + + rm -f "$tmp" + + local latency_marker="" + if [[ "$check_status" == "PASS" && "$time_ms" -gt "$LATENCY_BUDGET_MS" ]]; then + latency_marker=" $(color yellow SLOW)" + WARN=$((WARN + 1)) + fi + + case "$check_status" in + PASS) + printf ' [%s] %-50s %4dms%s\n' "$(color green PASS)" "$name" "$time_ms" "$latency_marker" + PASS=$((PASS + 1)) + ;; + *) + printf ' [%s] %-50s %4dms %s\n' "$(color red FAIL)" "$name" "$time_ms" "$detail" + FAIL=$((FAIL + 1)) + ;; + esac +} + +echo "smoke tests against $API_URL" +echo " budget per check: ${LATENCY_BUDGET_MS}ms" +echo + +# 1. Public endpoints (no auth) +probe "GET /health" GET "/health" 200 '.status == "ok"' +probe "GET /health/detailed" GET "/health/detailed" 200 '.status' +probe "GET /health/detailed has cache_warmer" GET "/health/detailed" 200 '.cache_warmer' + +# 2. Protected endpoints reject missing/invalid auth +SAVED_KEY="$API_KEY"; SAVED_SECRET="$API_SECRET" +API_KEY="" API_SECRET="" +probe "GET /v1/tokens (no auth)" GET "/v1/tokens" 401 +API_KEY="$SAVED_KEY"; API_SECRET="$SAVED_SECRET" + +# 3. Authenticated endpoints — only run if creds are set +if [[ -n "$API_KEY" && -n "$API_SECRET" ]]; then + probe "GET /v1/tokens" GET "/v1/tokens" 200 '.tokens | type == "array"' + probe "GET /v1/orders/token/{usdc}" GET "/v1/orders/token/$USDC_BASE" 200 '.orders | type == "array" and .pagination' + probe "GET /v1/orders/owner/{owner}" GET "/v1/orders/owner/$SAMPLE_OWNER" 200 '.orders | type == "array"' + probe "GET /v1/trades/token/{usdc}" GET "/v1/trades/token/$USDC_BASE?pageSize=10" 200 '.trades | type == "array"' + probe "GET /v1/trades/{owner}" GET "/v1/trades/$SAMPLE_OWNER?pageSize=10" 200 '.trades | type == "array"' + # Path validation only kicks in after auth succeeds — Rocket auth fairing + # runs first, so an invalid-address probe without auth would 401. + probe "GET /v1/orders/token/" GET "/v1/orders/token/not-an-address" 422 +else + echo " (skipping authenticated checks; set API_KEY + API_SECRET to enable)" +fi + +echo +echo "summary: $(color green "$PASS pass"), $(color red "$FAIL fail"), $(color yellow "$WARN slow")" + +if [[ "$FAIL" -gt 0 ]]; then + exit 1 +fi +exit 0 diff --git a/scripts/uptimerobot-setup.sh b/scripts/uptimerobot-setup.sh new file mode 100755 index 0000000..09dd0aa --- /dev/null +++ b/scripts/uptimerobot-setup.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +# uptimerobot-setup.sh — Creates the 3 baseline monitors against +# `${API_URL}` via UptimeRobot's REST API. Run once per environment +# (preview, prod). Re-running creates duplicates, so check the dashboard +# first if you're not sure whether they already exist. +# +# Usage: +# UPTIMEROBOT_API_KEY= ./scripts/uptimerobot-setup.sh +# +# # Override target (default: https://api.preview.st0x.io) +# API_URL=https://api.st0x.io \ +# UPTIMEROBOT_API_KEY=... ./scripts/uptimerobot-setup.sh +# +# # Attach an existing alert contact (Telegram, email, etc.) at creation: +# UPTIMEROBOT_API_KEY=... \ +# ALERT_CONTACT_ID=8253505 \ +# ./scripts/uptimerobot-setup.sh +# +# Get your API key from: https://uptimerobot.com/integrations/ → "Main API Key". + +set -uo pipefail + +: "${UPTIMEROBOT_API_KEY:?UPTIMEROBOT_API_KEY is required}" +API_URL="${API_URL:-https://api.preview.st0x.io}" +INTERVAL_SECONDS="${INTERVAL:-300}" # 5 minutes (free-tier minimum) +# Optional: attach an existing alert contact (e.g. a Telegram integration). +# Discover candidate IDs with: +# curl -sS -X POST -d api_key=$UPTIMEROBOT_API_KEY -d format=json \ +# https://api.uptimerobot.com/v2/getAlertContacts | jq '.alert_contacts' +ALERT_CONTACT_ID="${ALERT_CONTACT_ID:-}" + +# Use the full hostname as the friendly-name prefix so alerts (especially +# Telegram pushes that show only the friendly_name) immediately identify +# which environment fired. Override with FRIENDLY_LABEL if you'd rather +# something shorter. +DEFAULT_LABEL=$(echo "$API_URL" | sed -E 's|https?://||; s|/.*$||') +LABEL="${FRIENDLY_LABEL:-$DEFAULT_LABEL}" + +echo "Creating UptimeRobot monitors for $API_URL (label: $LABEL)" +echo + +# create_monitor NAME URL TYPE THRESHOLD_MIN [KEYWORD] +# TYPE: 1 = HTTP(s) status, 2 = HTTP(s) keyword +# THRESHOLD_MIN: minutes of detected down before alerting (0 = immediate). +create_monitor() { + local name="$1" + local url="$2" + local type="$3" + local threshold="$4" + local keyword="${5:-}" + + local args=( + --data-urlencode "api_key=$UPTIMEROBOT_API_KEY" + --data-urlencode "format=json" + --data-urlencode "friendly_name=$name" + --data-urlencode "url=$url" + --data-urlencode "type=$type" + --data-urlencode "interval=$INTERVAL_SECONDS" + ) + + if [[ -n "$ALERT_CONTACT_ID" ]]; then + args+=(--data-urlencode "alert_contacts=${ALERT_CONTACT_ID}_${threshold}_0") + fi + + if [[ "$type" == "2" && -n "$keyword" ]]; then + # 1 = "Exists" — alert when keyword is NOT found in body. + args+=( + --data-urlencode "keyword_type=1" + --data-urlencode "keyword_case_type=0" + --data-urlencode "keyword_value=$keyword" + ) + fi + + local resp + resp=$(curl -sS -X POST "${args[@]}" \ + https://api.uptimerobot.com/v2/newMonitor) + + local stat id + stat=$(echo "$resp" | jq -r '.stat // "unknown"') + if [[ "$stat" == "ok" ]]; then + id=$(echo "$resp" | jq -r '.monitor.id // "?"') + echo " [ok] $name (id=$id, threshold=${threshold}min)" + else + echo " [fail] $name" + echo " response: $resp" + fi +} + +# Threshold 0 → page immediately on first detected failure (hard down). +# Threshold 5 → wait one extra check interval before paging (avoids +# flapping during deploy restarts and the post-restart cache-warmer +# transient). +create_monitor "$LABEL — liveness — /health" \ + "$API_URL/health" 1 0 + +create_monitor "$LABEL — component health — /health/detailed status=ok" \ + "$API_URL/health/detailed" 2 5 '"status":"ok"' + +create_monitor "$LABEL — cache warmer — /health/detailed running=true" \ + "$API_URL/health/detailed" 2 5 '"running":true' + +echo +if [[ -n "$ALERT_CONTACT_ID" ]]; then + echo "Alert contact $ALERT_CONTACT_ID attached to all 3 monitors." +else + echo "No ALERT_CONTACT_ID set — monitors won't page until you wire up alert" + echo "contacts via the dashboard or re-run with ALERT_CONTACT_ID set." +fi diff --git a/src/cache.rs b/src/cache.rs index 5567c54..9855eee 100644 --- a/src/cache.rs +++ b/src/cache.rs @@ -3,6 +3,8 @@ use std::future::Future; use std::sync::Arc; use std::time::Duration; +/// Thread-safe cache backed by moka. Cloning is cheap (Arc-based). +#[derive(Clone)] pub(crate) struct AppCache(Cache) where K: std::hash::Hash + Eq + Send + Sync + 'static, diff --git a/src/cache_warmer.rs b/src/cache_warmer.rs new file mode 100644 index 0000000..7196675 --- /dev/null +++ b/src/cache_warmer.rs @@ -0,0 +1,132 @@ +use crate::raindex::{BlockNumberCache, SharedRaindexProvider}; +use crate::routes::orders::{ + process_get_orders_by_token, LimitOrderRatioCache, OrdersByTokenCache, + RaindexOrdersListDataSource, StalePriceSkipCache, MAX_PAGE_SIZE, +}; +use std::sync::Arc; +use std::time::{Instant, SystemTime, UNIX_EPOCH}; +use tokio::sync::RwLock; +use tokio::time::{sleep, Duration}; + +const REFRESH_INTERVAL: Duration = Duration::from_secs(10); + +/// Snapshot of the cache warmer's most recent activity, exposed via +/// `/v1/health/detailed`. Mutated under a short-lived write lock at the end +/// of each cycle. +#[derive(Debug, Default, Clone)] +pub(crate) struct CacheWarmerStats { + pub total_cycles: u64, + pub last_cycle_ms: Option, + pub last_tokens: Option, + pub last_errors: Option, + /// Unix timestamp (seconds) of the most recent cycle completion. + pub last_complete_at_unix: Option, +} + +pub(crate) type SharedCacheWarmerStats = Arc>; + +pub(crate) fn shared_cache_warmer_stats() -> SharedCacheWarmerStats { + Arc::new(RwLock::new(CacheWarmerStats::default())) +} + +/// Background loop that keeps the orders-by-token cache warm. +/// +/// For every token in the registry it calls `process_get_orders_by_token` +/// with `side=None, page=1, page_size=MAX_PAGE_SIZE` and inserts the result +/// into the shared cache. After each cycle the loop sleeps for +/// `REFRESH_INTERVAL` regardless of how long the cycle took, guaranteeing a +/// fixed idle gap between cycles so an over-running cycle never causes the +/// next one to start back-to-back (the pathology that produced the original +/// 429 storm). +pub(crate) async fn run_orders_by_token_warmer( + cache: OrdersByTokenCache, + shared_raindex: SharedRaindexProvider, + block_number_cache: BlockNumberCache, + limit_ratio_cache: LimitOrderRatioCache, + stale_price_skip_cache: StalePriceSkipCache, + stats: SharedCacheWarmerStats, +) { + loop { + let start = Instant::now(); + + // Collect token addresses from the registry under a short-lived read lock. + let token_addresses: Vec = { + let raindex = shared_raindex.read().await; + match raindex.client().get_all_tokens() { + Ok(tokens) => tokens.values().map(|t| t.address).collect(), + Err(e) => { + tracing::warn!(error = %e, "cache warmer: failed to get token list, skipping cycle"); + sleep(REFRESH_INTERVAL).await; + continue; + } + } + }; + + if token_addresses.is_empty() { + tracing::debug!("cache warmer: no tokens in registry, skipping cycle"); + sleep(REFRESH_INTERVAL).await; + continue; + } + + let page: u16 = 1; + let page_size: u16 = MAX_PAGE_SIZE; + let mut ok_count: usize = 0; + let mut err_count: usize = 0; + + for addr in &token_addresses { + // Acquire the read lock per-token so other writers (e.g. admin + // registry reload) are not blocked for the entire cycle. + let result = { + let raindex = shared_raindex.read().await; + let ds = RaindexOrdersListDataSource { + client: raindex.client(), + block_number_cache: &block_number_cache, + limit_ratio_cache: &limit_ratio_cache, + stale_price_skip_cache: &stale_price_skip_cache, + }; + process_get_orders_by_token(&ds, *addr, None, Some(page), Some(page_size)).await + }; + + match result { + Ok(response) => { + let cache_key = (*addr, None, page, page_size); + cache.insert(cache_key, response).await; + ok_count += 1; + } + Err(e) => { + tracing::warn!( + token = ?addr, + error = ?e, + "cache warmer: failed to refresh orders for token" + ); + err_count += 1; + } + } + } + + let cycle_ms = start.elapsed().as_millis() as u64; + tracing::info!( + tokens = token_addresses.len(), + ok = ok_count, + errors = err_count, + duration_ms = cycle_ms, + "cache warmer: orders-by-token refresh complete" + ); + + { + let mut s = stats.write().await; + s.total_cycles += 1; + s.last_cycle_ms = Some(cycle_ms); + s.last_tokens = Some(token_addresses.len() as u32); + s.last_errors = Some(err_count as u32); + s.last_complete_at_unix = Some( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0), + ); + } + + sleep(REFRESH_INTERVAL).await; + } +} diff --git a/src/config.rs b/src/config.rs index fc3f127..ebf1bd9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,4 +1,5 @@ use serde::Deserialize; +use std::collections::HashMap; use std::path::Path; #[derive(Deserialize)] @@ -10,6 +11,25 @@ pub struct Config { pub rate_limit_per_key_rpm: u64, pub docs_dir: String, pub local_db_path: String, + /// Optional per-network RPC URL override. + /// + /// Replaces the `rpcs:` list in the rain.strategies registry settings for + /// the named network. Use to point at private/paid RPC endpoints without + /// forking the registry. Example in `config.toml`: + /// + /// ```toml + /// [rpc_override] + /// base = [ + /// "https://base-mainnet.g.alchemy.com/v2/YOUR_KEY", + /// "https://base.drpc.org", + /// ] + /// ``` + /// + /// When multiple URLs are given the underlying provider treats them as + /// health-routed failover (alloy `FallbackLayer` with + /// `active_transport_count = 1`). + #[serde(default)] + pub rpc_override: HashMap>, } impl Config { diff --git a/src/db/pool.rs b/src/db/pool.rs index 44f74ae..bb0bc81 100644 --- a/src/db/pool.rs +++ b/src/db/pool.rs @@ -9,7 +9,7 @@ pub(super) async fn create(database_url: &str) -> Result { .foreign_keys(true); let pool = SqlitePoolOptions::new() - .max_connections(5) + .max_connections(20) .connect_with(options) .await?; diff --git a/src/direct_trades.rs b/src/direct_trades.rs index e980f42..1548d80 100644 --- a/src/direct_trades.rs +++ b/src/direct_trades.rs @@ -1,34 +1,66 @@ /// Direct SQLite trade fetcher /// /// Bypasses the rain.orderbook library's per-query connection model by -/// maintaining a single shared connection. Runs a batch SQL query for -/// multiple order hashes in one call instead of N individual queries -/// that each open their own connection. +/// running batch SQL queries for multiple order hashes in one call instead +/// of N individual queries that each open their own connection. +/// +/// Opens a fresh read-only connection per query so that concurrent API +/// requests can read in parallel under SQLite WAL mode without blocking +/// each other or the background sync writer. use crate::error::ApiError; use crate::types::order::OrderTradeEntry; use alloy::primitives::{Address, B256}; use rain_math_float::Float; -use rusqlite::Connection; +use rusqlite::{Connection, OpenFlags}; use std::collections::HashMap; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::str::FromStr; -use std::sync::{Arc, Mutex}; use std::time::Instant; use tokio::task::spawn_blocking; -/// Holds a shared SQLite connection to the raindex local database. +/// Holds configuration for opening read-only SQLite connections to the +/// raindex local database. Each query opens its own connection so that +/// concurrent readers never block each other (SQLite WAL allows this). pub(crate) struct DirectTradesFetcher { - conn: Arc>, + db_path: PathBuf, chain_id: i64, orderbook_address: String, } +/// Open a read-only connection with WAL mode and appropriate timeouts. +fn open_read_connection(db_path: &Path) -> Result { + let conn = Connection::open_with_flags( + db_path, + OpenFlags::SQLITE_OPEN_READ_ONLY + | OpenFlags::SQLITE_OPEN_NO_MUTEX + | OpenFlags::SQLITE_OPEN_URI, + ) + .map_err(|e| { + tracing::error!(error = %e, "failed to open raindex db for reading"); + ApiError::Internal("trade query failed".into()) + })?; + + conn.pragma_update(None, "journal_mode", "wal") + .map_err(|e| { + tracing::error!(error = %e, "failed to set WAL"); + ApiError::Internal("trade query failed".into()) + })?; + conn.busy_timeout(std::time::Duration::from_secs(10)) + .map_err(|e| { + tracing::error!(error = %e, "failed to set busy_timeout"); + ApiError::Internal("trade query failed".into()) + })?; + + Ok(conn) +} + impl DirectTradesFetcher { pub(crate) fn new( db_path: &Path, chain_id: u32, orderbook_address: Address, ) -> Result { + // Open a temporary connection to create indexes, then drop it. let conn = Connection::open(db_path).map_err(|e| format!("failed to open raindex db: {e}"))?; @@ -47,6 +79,8 @@ impl DirectTradesFetcher { ON vault_balance_changes (chain_id, orderbook_address, owner, token, vault_id, block_number, log_index)", "CREATE INDEX IF NOT EXISTS idx_take_orders_sender \ ON take_orders (chain_id, orderbook_address, sender)", + "CREATE INDEX IF NOT EXISTS idx_take_orders_sender_covering \ + ON take_orders (chain_id, orderbook_address, sender, transaction_hash, block_timestamp)", ]; for sql in &indexes { if let Err(e) = conn.execute_batch(sql) { @@ -54,8 +88,10 @@ impl DirectTradesFetcher { } } + drop(conn); + Ok(Self { - conn: Arc::new(Mutex::new(conn)), + db_path: db_path.to_path_buf(), chain_id: chain_id as i64, orderbook_address: format!("{:#x}", orderbook_address), }) @@ -70,17 +106,14 @@ impl DirectTradesFetcher { return Ok(HashMap::new()); } - let conn = Arc::clone(&self.conn); + let db_path = self.db_path.clone(); let chain_id = self.chain_id; let ob_addr = self.orderbook_address.clone(); let hash_strings: Vec = hashes.iter().map(|h| format!("{:#x}", h)).collect(); spawn_blocking(move || { let start = Instant::now(); - let conn = conn.lock().map_err(|e| { - tracing::error!(error = %e, "failed to lock direct trades connection"); - ApiError::Internal("trade query failed".into()) - })?; + let conn = open_read_connection(&db_path)?; let placeholders: Vec = (0..hash_strings.len()) .map(|i| format!("?{}", i + 3)) @@ -168,17 +201,14 @@ impl DirectTradesFetcher { &self, sender: &Address, ) -> Result, ApiError> { - let conn = Arc::clone(&self.conn); + let db_path = self.db_path.clone(); let chain_id = self.chain_id; let ob_addr = self.orderbook_address.clone(); let sender_hex = format!("{:#x}", sender); spawn_blocking(move || { let start = Instant::now(); - let conn = conn.lock().map_err(|e| { - tracing::error!(error = %e, "failed to lock direct trades connection"); - ApiError::Internal("taker trades query failed".into()) - })?; + let conn = open_read_connection(&db_path)?; let mut stmt = conn .prepare( @@ -239,17 +269,14 @@ impl DirectTradesFetcher { &self, tx_hash: &B256, ) -> Result>, ApiError> { - let conn = Arc::clone(&self.conn); + let db_path = self.db_path.clone(); let chain_id = self.chain_id; let ob_addr = self.orderbook_address.clone(); let tx_hex = format!("{:#x}", tx_hash); spawn_blocking(move || { let start = Instant::now(); - let conn = conn.lock().map_err(|e| { - tracing::error!(error = %e, "failed to lock direct trades connection"); - ApiError::Internal("trade query failed".into()) - })?; + let conn = open_read_connection(&db_path)?; let query = build_tx_hash_query(); let mut stmt = conn.prepare(&query).map_err(|e| { @@ -314,6 +341,116 @@ impl DirectTradesFetcher { ApiError::Internal("trade query failed".into()) })? } + + /// Fetch enriched trades for multiple transaction hashes in a single batch query. + /// Returns trades grouped by tx_hash with all fields needed for TradesByTxResponse + /// (order_owner, token addresses, block_number, etc.). + /// This avoids the slow library path entirely. + pub(crate) async fn fetch_taker_tx_trades( + &self, + tx_hashes: &[B256], + ) -> Result>, ApiError> { + if tx_hashes.is_empty() { + return Ok(HashMap::new()); + } + + let db_path = self.db_path.clone(); + let chain_id = self.chain_id; + let ob_addr = self.orderbook_address.clone(); + let tx_hex_strings: Vec = tx_hashes.iter().map(|h| format!("{:#x}", h)).collect(); + + spawn_blocking(move || { + let start = Instant::now(); + let conn = open_read_connection(&db_path)?; + + let placeholders: Vec = (0..tx_hex_strings.len()) + .map(|i| format!("?{}", i + 3)) + .collect(); + let in_clause = placeholders.join(", "); + let query = build_taker_tx_batch_query(&in_clause); + + let mut stmt = conn.prepare(&query).map_err(|e| { + tracing::error!(error = %e, "failed to prepare taker tx batch query"); + ApiError::Internal("trade query failed".into()) + })?; + + let mut params: Vec> = + Vec::with_capacity(tx_hex_strings.len() + 2); + params.push(Box::new(chain_id)); + params.push(Box::new(ob_addr)); + for h in &tx_hex_strings { + params.push(Box::new(h.clone())); + } + let param_refs: Vec<&dyn rusqlite::types::ToSql> = + params.iter().map(|p| p.as_ref()).collect(); + + let rows = stmt + .query_map(param_refs.as_slice(), |row| { + Ok(RawEnrichedTradeRow { + order_hash: row.get(0)?, + transaction_hash: row.get(1)?, + block_number: row.get(2)?, + block_timestamp: row.get(3)?, + sender: row.get(4)?, + order_owner: row.get(5)?, + input_delta: row.get(6)?, + output_delta_raw: row.get(7)?, + input_token: row.get(8)?, + output_token: row.get(9)?, + input_token_symbol: row.get(10)?, + output_token_symbol: row.get(11)?, + input_token_decimals: row.get(12)?, + output_token_decimals: row.get(13)?, + }) + }) + .map_err(|e| { + tracing::error!(error = %e, "taker tx batch query failed"); + ApiError::Internal("trade query failed".into()) + })?; + + let mut result: HashMap> = HashMap::new(); + let mut row_count = 0u32; + + for row_result in rows { + let raw = row_result.map_err(|e| { + tracing::error!(error = %e, "failed to read enriched trade row"); + ApiError::Internal("trade query failed".into()) + })?; + + row_count += 1; + + match convert_enriched_trade(&raw) { + Ok(enriched) => { + result + .entry(enriched.transaction_hash) + .or_default() + .push(enriched); + } + Err(e) => { + tracing::warn!( + error = %e, + order_hash = %raw.order_hash, + "skipping malformed enriched trade row" + ); + } + } + } + + tracing::info!( + tx_count = tx_hex_strings.len(), + trade_rows = row_count, + duration_ms = start.elapsed().as_millis() as u64, + "direct taker tx batch query completed" + ); + + Ok(result) + }) + .await + .map_err(|e| { + tracing::error!(error = %e, "taker tx batch blocking task failed"); + ApiError::Internal("trade query failed".into()) + })? + } } struct RawTradeRow { @@ -326,6 +463,25 @@ struct RawTradeRow { trade_id: String, } +/// Enriched trade row with token and owner info for building TradesByTxResponse directly. +#[allow(dead_code)] +pub(crate) struct EnrichedTradeRow { + pub order_hash: B256, + pub transaction_hash: B256, + pub block_number: u64, + pub block_timestamp: u64, + pub sender: Address, + pub order_owner: Address, + pub input_amount: String, + pub output_amount: String, + pub input_token: Address, + pub output_token: Address, + pub input_token_symbol: String, + pub output_token_symbol: String, + pub input_token_decimals: u8, + pub output_token_decimals: u8, +} + fn convert_raw_trade(raw: &RawTradeRow) -> Result<(B256, OrderTradeEntry), ApiError> { let order_hash = B256::from_str(&raw.order_hash) .map_err(|e| ApiError::Internal(format!("invalid order hash: {e}")))?; @@ -383,6 +539,141 @@ fn negate_and_format_float_hex(hex: &str) -> Result { }) } +struct RawEnrichedTradeRow { + order_hash: String, + transaction_hash: String, + block_number: i64, + block_timestamp: i64, + sender: String, + order_owner: String, + input_delta: String, + output_delta_raw: String, + input_token: Option, + output_token: Option, + input_token_symbol: Option, + output_token_symbol: Option, + input_token_decimals: Option, + output_token_decimals: Option, +} + +fn convert_enriched_trade(raw: &RawEnrichedTradeRow) -> Result { + let order_hash = B256::from_str(&raw.order_hash) + .map_err(|e| ApiError::Internal(format!("invalid order hash: {e}")))?; + let tx_hash = B256::from_str(&raw.transaction_hash) + .map_err(|e| ApiError::Internal(format!("invalid tx hash: {e}")))?; + let sender = Address::from_str(&raw.sender) + .map_err(|e| ApiError::Internal(format!("invalid sender address: {e}")))?; + let order_owner = Address::from_str(&raw.order_owner) + .map_err(|e| ApiError::Internal(format!("invalid order owner: {e}")))?; + let input_token = raw + .input_token + .as_deref() + .map(Address::from_str) + .transpose() + .map_err(|e| ApiError::Internal(format!("invalid input token: {e}")))? + .unwrap_or(Address::ZERO); + let output_token = raw + .output_token + .as_deref() + .map(Address::from_str) + .transpose() + .map_err(|e| ApiError::Internal(format!("invalid output token: {e}")))? + .unwrap_or(Address::ZERO); + + let input_amount = format_float_hex(&raw.input_delta)?; + let output_amount = negate_and_format_float_hex(&raw.output_delta_raw)?; + + Ok(EnrichedTradeRow { + order_hash, + transaction_hash: tx_hash, + block_number: raw.block_number as u64, + block_timestamp: raw.block_timestamp as u64, + sender, + order_owner, + input_amount, + output_amount, + input_token, + output_token, + input_token_symbol: raw.input_token_symbol.clone().unwrap_or_default(), + output_token_symbol: raw.output_token_symbol.clone().unwrap_or_default(), + input_token_decimals: raw.input_token_decimals.unwrap_or(0) as u8, + output_token_decimals: raw.output_token_decimals.unwrap_or(0) as u8, + }) +} + +/// Build a batch query for trades across multiple transaction hashes. +/// Joins order_ios + erc20_tokens to get token addresses and metadata. +/// ?1 = chain_id, ?2 = orderbook_address, ?3..N = transaction hashes +fn build_taker_tx_batch_query(in_clause: &str) -> String { + format!( + r#" +SELECT + oe.order_hash, + t.transaction_hash, + t.block_number, + t.block_timestamp, + t.sender, + oe.order_owner, + t.taker_output AS input_delta, + t.taker_input AS output_delta_raw, + io_in.token AS input_token, + io_out.token AS output_token, + tok_in.symbol AS input_token_symbol, + tok_out.symbol AS output_token_symbol, + tok_in.decimals AS input_token_decimals, + tok_out.decimals AS output_token_decimals +FROM take_orders t +JOIN order_events oe + ON oe.chain_id = t.chain_id + AND oe.orderbook_address = t.orderbook_address + AND oe.order_owner = t.order_owner + AND oe.order_nonce = t.order_nonce + AND oe.event_type = 'AddOrderV3' + AND (oe.block_number < t.block_number + OR (oe.block_number = t.block_number AND oe.log_index <= t.log_index)) + AND NOT EXISTS ( + SELECT 1 FROM order_events newer + WHERE newer.chain_id = oe.chain_id + AND newer.orderbook_address = oe.orderbook_address + AND newer.order_owner = oe.order_owner + AND newer.order_nonce = oe.order_nonce + AND newer.event_type = 'AddOrderV3' + AND (newer.block_number < t.block_number + OR (newer.block_number = t.block_number AND newer.log_index <= t.log_index)) + AND (newer.block_number > oe.block_number + OR (newer.block_number = oe.block_number AND newer.log_index > oe.log_index)) + ) +LEFT JOIN order_ios io_in + ON io_in.chain_id = oe.chain_id + AND io_in.orderbook_address = oe.orderbook_address + AND io_in.transaction_hash = oe.transaction_hash + AND io_in.log_index = oe.log_index + AND io_in.io_index = t.input_io_index + AND io_in.io_type = 'input' +LEFT JOIN order_ios io_out + ON io_out.chain_id = oe.chain_id + AND io_out.orderbook_address = oe.orderbook_address + AND io_out.transaction_hash = oe.transaction_hash + AND io_out.log_index = oe.log_index + AND io_out.io_index = t.output_io_index + AND io_out.io_type = 'output' +LEFT JOIN erc20_tokens tok_in + ON tok_in.chain_id = oe.chain_id + AND tok_in.orderbook_address = oe.orderbook_address + AND tok_in.token_address = io_in.token +LEFT JOIN erc20_tokens tok_out + ON tok_out.chain_id = oe.chain_id + AND tok_out.orderbook_address = oe.orderbook_address + AND tok_out.token_address = io_out.token +WHERE t.chain_id = ?1 + AND t.orderbook_address = ?2 + AND t.transaction_hash IN ({in_clause}) +ORDER BY t.transaction_hash, t.block_timestamp DESC, t.log_index DESC +"#, + in_clause = in_clause + ) +} + /// Build a batch trade query with a dynamic IN-clause. This is a simplified /// version of rain.orderbook's `fetch_order_trades/query.sql` that: /// - Accepts multiple order hashes at once (via IN-clause) diff --git a/src/main.rs b/src/main.rs index 4413d56..a43df61 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,6 +3,7 @@ extern crate rocket; mod auth; mod cache; +mod cache_warmer; mod catchers; mod cli; mod config; @@ -10,6 +11,7 @@ mod db; mod direct_trades; mod error; mod fairings; +mod market_calendar; mod raindex; mod routes; mod telemetry; @@ -119,12 +121,19 @@ fn configure_cors() -> Result { .to_cors()?) } +#[allow(clippy::too_many_arguments)] pub(crate) fn rocket( pool: db::DbPool, rate_limiter: fairings::RateLimiter, raindex_config: raindex::SharedRaindexProvider, docs_dir: String, direct_trades_fetcher: Option, + orders_by_token_cache: routes::orders::OrdersByTokenCache, + block_number_cache: raindex::BlockNumberCache, + limit_ratio_cache: routes::orders::LimitOrderRatioCache, + stale_price_skip_cache: routes::orders::StalePriceSkipCache, + swap_quote_cache: routes::swap::SwapQuoteCache, + cache_warmer_stats: cache_warmer::SharedCacheWarmerStats, ) -> Result, StartupError> { let cors = configure_cors()?; @@ -137,7 +146,6 @@ pub(crate) fn rocket( let trades_by_order_hash_cache = routes::trades::trades_by_order_hash_cache(); let taker_trades_tx_hash_cache = routes::trades::taker_trades_tx_hash_cache(); let trades_by_token_cache = routes::trades::trades_by_token_cache(); - let orders_by_token_cache = routes::orders::orders_by_token_cache(); let orders_by_owner_cache = routes::orders::orders_by_owner_cache(); Ok(rocket::custom(figment) @@ -151,6 +159,11 @@ pub(crate) fn rocket( .manage(trades_by_token_cache) .manage(orders_by_token_cache) .manage(orders_by_owner_cache) + .manage(block_number_cache) + .manage(limit_ratio_cache) + .manage(stale_price_skip_cache) + .manage(swap_quote_cache) + .manage(cache_warmer_stats) .manage(direct_trades_fetcher) .mount("/", routes::health::routes()) .mount("/v1/tokens", routes::tokens::routes()) @@ -262,6 +275,7 @@ async fn main() { let raindex_config = match raindex::RaindexProvider::load( ®istry_url, Some(local_db_path), + cfg.rpc_override.clone(), ) .await { @@ -326,7 +340,7 @@ async fn main() { } }; - let shared_raindex = tokio::sync::RwLock::new(raindex_config); + let shared_raindex = std::sync::Arc::new(tokio::sync::RwLock::new(raindex_config)); let rate_limiter = fairings::RateLimiter::new(cfg.rate_limit_global_rpm, cfg.rate_limit_per_key_rpm); @@ -337,12 +351,47 @@ async fn main() { } tracing::info!(docs_dir = %cfg.docs_dir, "serving documentation at /docs"); + let orders_by_token_cache = routes::orders::orders_by_token_cache(); + let block_number_cache = raindex::block_number_cache(); + let limit_ratio_cache = routes::orders::limit_order_ratio_cache(); + let stale_price_skip_cache = routes::orders::stale_price_skip_cache(); + let swap_quote_cache = routes::swap::swap_quote_cache(); + let cache_warmer_stats = cache_warmer::shared_cache_warmer_stats(); + + // Spawn background task to keep the orders-by-token cache warm. + // Refreshes every 10s so real requests almost always hit the cache. + { + let cache = orders_by_token_cache.clone(); + let raindex = std::sync::Arc::clone(&shared_raindex); + let block_cache = block_number_cache.clone(); + let limit_cache = limit_ratio_cache.clone(); + let stale_cache = stale_price_skip_cache.clone(); + let stats = std::sync::Arc::clone(&cache_warmer_stats); + tokio::spawn(async move { + cache_warmer::run_orders_by_token_warmer( + cache, + raindex, + block_cache, + limit_cache, + stale_cache, + stats, + ) + .await; + }); + } + let rocket = match rocket( pool, rate_limiter, shared_raindex, cfg.docs_dir, direct_trades_fetcher, + orders_by_token_cache, + block_number_cache, + limit_ratio_cache, + stale_price_skip_cache, + swap_quote_cache, + cache_warmer_stats, ) { Ok(r) => r, Err(e) => { diff --git a/src/market_calendar.rs b/src/market_calendar.rs new file mode 100644 index 0000000..9cf02a9 --- /dev/null +++ b/src/market_calendar.rs @@ -0,0 +1,93 @@ +use chrono::{DateTime, Datelike, Timelike, Utc, Weekday}; +use chrono_tz::America::New_York; + +/// True when the NYSE regular trading session is currently open in +/// `America/New_York`: 09:30 (inclusive) through 16:00 (exclusive), +/// Monday through Friday. +/// +/// Holidays (e.g. Independence Day, Thanksgiving) are NOT modeled. On a +/// holiday this returns true, which means stale-marked orders will be +/// re-quoted once and likely re-marked on the next cycle. That single +/// wasted quote per order per holiday is acceptable in exchange for not +/// needing to maintain a holiday calendar. +pub(crate) fn is_nyse_open(now_utc: DateTime) -> bool { + let now_et = now_utc.with_timezone(&New_York); + + if matches!(now_et.weekday(), Weekday::Sat | Weekday::Sun) { + return false; + } + + let minutes = now_et.hour() * 60 + now_et.minute(); + let market_open = 9 * 60 + 30; + let market_close = 16 * 60; + minutes >= market_open && minutes < market_close +} + +#[cfg(test)] +mod tests { + use super::*; + use chrono::TimeZone; + + fn et(year: i32, month: u32, day: u32, hour: u32, minute: u32) -> DateTime { + // Build a wall-clock time in New_York and convert to UTC. This + // automatically handles EDT vs EST. + let local = New_York + .with_ymd_and_hms(year, month, day, hour, minute, 0) + .single() + .expect("unique New_York timestamp"); + local.with_timezone(&Utc) + } + + #[test] + fn test_open_at_market_open() { + // Monday 2026-04-27 09:30 ET (DST in effect → 13:30 UTC). + assert!(is_nyse_open(et(2026, 4, 27, 9, 30))); + } + + #[test] + fn test_open_one_minute_before_close() { + assert!(is_nyse_open(et(2026, 4, 27, 15, 59))); + } + + #[test] + fn test_closed_at_market_close() { + // 16:00 is the bell. Closed exactly at close (exclusive). + assert!(!is_nyse_open(et(2026, 4, 27, 16, 0))); + } + + #[test] + fn test_closed_one_minute_before_open() { + assert!(!is_nyse_open(et(2026, 4, 27, 9, 29))); + } + + #[test] + fn test_closed_overnight() { + assert!(!is_nyse_open(et(2026, 4, 27, 3, 0))); + assert!(!is_nyse_open(et(2026, 4, 27, 22, 0))); + } + + #[test] + fn test_closed_on_saturday() { + // 2026-04-25 is a Saturday. + assert!(!is_nyse_open(et(2026, 4, 25, 12, 0))); + } + + #[test] + fn test_closed_on_sunday() { + // 2026-04-26 is a Sunday. + assert!(!is_nyse_open(et(2026, 4, 26, 12, 0))); + } + + #[test] + fn test_open_during_winter_est() { + // 2026-01-15 (Thursday) is well inside EST. 10:00 ET → 15:00 UTC. + assert!(is_nyse_open(et(2026, 1, 15, 10, 0))); + assert!(!is_nyse_open(et(2026, 1, 15, 8, 0))); + } + + #[test] + fn test_dst_transition_safe() { + // 2026-03-09 (Monday after DST starts on Sunday 2026-03-08). + assert!(is_nyse_open(et(2026, 3, 9, 10, 0))); + } +} diff --git a/src/raindex/block_cache.rs b/src/raindex/block_cache.rs new file mode 100644 index 0000000..aecc2da --- /dev/null +++ b/src/raindex/block_cache.rs @@ -0,0 +1,199 @@ +use crate::cache::AppCache; +use std::time::Duration; + +const BLOCK_NUMBER_CACHE_TTL: Duration = Duration::from_secs(2); +const BLOCK_NUMBER_CACHE_CAPACITY: u64 = 16; + +/// Per-chain cache of the latest block number. +/// +/// Block time on Base is ~2s, so a 2s TTL gives effectively no staleness while +/// eliminating the per-quote-batch `eth_blockNumber` round-trip the upstream +/// quote library performs when called with `block_number = None`. +pub(crate) type BlockNumberCache = AppCache; + +pub(crate) fn block_number_cache() -> BlockNumberCache { + AppCache::new(BLOCK_NUMBER_CACHE_CAPACITY, BLOCK_NUMBER_CACHE_TTL) +} + +#[derive(Debug, thiserror::Error)] +pub(crate) enum BlockNumberError { + #[error("no rpc urls configured for chain")] + NoRpcUrls, + #[error("rpc transport failed: {0}")] + Transport(#[from] reqwest::Error), + #[error("rpc returned malformed response: {0}")] + InvalidResponse(String), +} + +/// Issue a JSON-RPC `eth_blockNumber` call against the first reachable URL. +/// +/// We deliberately use a plain reqwest call rather than threading another +/// alloy/ethers provider through the codebase — this keeps the block-number +/// cache standalone and avoids new transitive deps. +async fn fetch_block_number(rpc_urls: &[String]) -> Result { + if rpc_urls.is_empty() { + return Err(BlockNumberError::NoRpcUrls); + } + + let body = serde_json::json!({ + "jsonrpc": "2.0", + "method": "eth_blockNumber", + "params": [], + "id": 1, + }); + + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(5)) + .build()?; + + let mut last_err: Option = None; + for url in rpc_urls { + match client.post(url).json(&body).send().await { + Ok(resp) => { + if !resp.status().is_success() { + last_err = Some(BlockNumberError::InvalidResponse(format!( + "http status {}", + resp.status() + ))); + continue; + } + match resp.json::().await { + Ok(json) => match parse_block_number(&json) { + Ok(b) => return Ok(b), + Err(e) => last_err = Some(e), + }, + Err(e) => last_err = Some(BlockNumberError::Transport(e)), + } + } + Err(e) => last_err = Some(BlockNumberError::Transport(e)), + } + } + + Err(last_err.unwrap_or(BlockNumberError::NoRpcUrls)) +} + +fn parse_block_number(json: &serde_json::Value) -> Result { + let hex = json + .get("result") + .and_then(|v| v.as_str()) + .ok_or_else(|| BlockNumberError::InvalidResponse("missing result field".into()))?; + let trimmed = hex.trim_start_matches("0x"); + u64::from_str_radix(trimmed, 16) + .map_err(|e| BlockNumberError::InvalidResponse(format!("not a hex u64: {e}"))) +} + +/// Return the cached block number for the chain, fetching on miss. +/// +/// Returns `None` when the fetch fails — callers should fall back to passing +/// `None` to the upstream library so it can perform its own (uncached) lookup. +pub(crate) async fn get_or_fetch_block_number( + cache: &BlockNumberCache, + chain_id: u32, + rpc_urls: &[String], +) -> Option { + if let Some(b) = cache.get(&chain_id).await { + return Some(b); + } + match fetch_block_number(rpc_urls).await { + Ok(b) => { + cache.insert(chain_id, b).await; + Some(b) + } + Err(e) => { + tracing::warn!( + chain_id, + error = %e, + "block number fetch failed; quote batch will fetch directly" + ); + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Arc; + + #[test] + fn test_parse_block_number_valid() { + let v = json!({"jsonrpc": "2.0", "id": 1, "result": "0x1a2b3c"}); + assert_eq!(parse_block_number(&v).unwrap(), 0x1a2b3c); + } + + #[test] + fn test_parse_block_number_no_prefix() { + let v = json!({"result": "ff"}); + assert_eq!(parse_block_number(&v).unwrap(), 255); + } + + #[test] + fn test_parse_block_number_missing_result() { + let v = json!({"jsonrpc": "2.0", "id": 1}); + let err = parse_block_number(&v).unwrap_err(); + assert!(matches!(err, BlockNumberError::InvalidResponse(_))); + } + + #[test] + fn test_parse_block_number_garbage() { + let v = json!({"result": "0xZZZ"}); + let err = parse_block_number(&v).unwrap_err(); + assert!(matches!(err, BlockNumberError::InvalidResponse(_))); + } + + #[rocket::async_test] + async fn test_fetch_block_number_no_urls() { + let result = fetch_block_number(&[]).await; + assert!(matches!(result, Err(BlockNumberError::NoRpcUrls))); + } + + /// Spawn a tiny HTTP server that responds with the given JSON body to one + /// POST and then exits. Returns the bound URL. + async fn one_shot_rpc(response_body: String) -> String { + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + tokio::spawn(async move { + if let Ok((mut socket, _)) = listener.accept().await { + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + let mut buf = [0u8; 4096]; + let _ = socket.read(&mut buf).await; + let response = format!( + "HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: {}\r\nConnection: close\r\n\r\n{}", + response_body.len(), + response_body + ); + let _ = socket.write_all(response.as_bytes()).await; + } + }); + format!("http://{addr}") + } + + #[rocket::async_test] + async fn test_fetch_block_number_success() { + let url = one_shot_rpc(r#"{"jsonrpc":"2.0","id":1,"result":"0x100"}"#.into()).await; + let block = fetch_block_number(&[url]).await.unwrap(); + assert_eq!(block, 256); + } + + #[rocket::async_test] + async fn test_get_or_fetch_caches_value() { + let cache: BlockNumberCache = block_number_cache(); + cache.insert(8453, 12345).await; + + let counter = Arc::new(AtomicUsize::new(0)); + // No URLs needed — value is cached. + let block = get_or_fetch_block_number(&cache, 8453, &[]).await; + assert_eq!(block, Some(12345)); + assert_eq!(counter.load(Ordering::SeqCst), 0); + } + + #[rocket::async_test] + async fn test_get_or_fetch_returns_none_on_failure() { + let cache: BlockNumberCache = block_number_cache(); + // 127.0.0.1:1 is reserved; the connection will fail. + let block = get_or_fetch_block_number(&cache, 8453, &["http://127.0.0.1:1".into()]).await; + assert_eq!(block, None); + } +} diff --git a/src/raindex/config.rs b/src/raindex/config.rs index d78b677..cd8556a 100644 --- a/src/raindex/config.rs +++ b/src/raindex/config.rs @@ -1,6 +1,7 @@ use crate::error::ApiError; use rain_orderbook_common::raindex_client::RaindexClient; use rain_orderbook_js_api::registry::DotrainRegistry; +use std::collections::HashMap; use std::path::PathBuf; #[derive(Debug)] @@ -8,6 +9,86 @@ pub(crate) struct RaindexProvider { registry: DotrainRegistry, client: RaindexClient, db_path: Option, + rpc_overrides: HashMap>, +} + +/// Replaces the `rpcs:` list inside each `networks.` block whose name +/// has an entry in `overrides`. Lets operators point at private/paid RPC +/// endpoints without forking the rain.strategies registry. +/// +/// The YAML mutation is line-based and intentionally narrow: it only touches +/// the `rpcs:` list immediately under a matched `:` key inside the +/// `networks:` section. Any other keys (chain-id, currency, etc.) are +/// preserved untouched. +fn apply_rpc_override(yaml: &str, overrides: &HashMap>) -> String { + if overrides.is_empty() { + return yaml.to_string(); + } + + let mut out = String::with_capacity(yaml.len()); + let mut in_networks = false; + let mut current_network: Option = None; + let mut skipping_rpcs = false; + + for line in yaml.lines() { + let trimmed = line.trim_start(); + let indent = line.len() - trimmed.len(); + + // Detect leaving the `networks:` block (any new top-level key). + if in_networks && indent == 0 && !line.is_empty() { + in_networks = false; + current_network = None; + skipping_rpcs = false; + } + + // Enter the `networks:` block. + if !in_networks && line.starts_with("networks:") { + in_networks = true; + out.push_str(line); + out.push('\n'); + continue; + } + + if in_networks { + // A network name lives at indent 2 (one level under `networks:`). + if indent == 2 && trimmed.ends_with(':') { + let name = trimmed.trim_end_matches(':').to_string(); + current_network = Some(name); + skipping_rpcs = false; + out.push_str(line); + out.push('\n'); + continue; + } + + // While skipping the original `rpcs:` list, drop list entries + // (lines starting with '-' at indent > 4). + if skipping_rpcs { + if indent > 4 && trimmed.starts_with('-') { + continue; + } + skipping_rpcs = false; + } + + // Detect `rpcs:` key inside the current network and rewrite it. + if let Some(name) = ¤t_network { + if indent == 4 && trimmed.starts_with("rpcs:") { + if let Some(replacement_urls) = overrides.get(name) { + out.push_str(" rpcs:\n"); + for url in replacement_urls { + out.push_str(&format!(" - {url}\n")); + } + skipping_rpcs = true; + continue; + } + } + } + } + + out.push_str(line); + out.push('\n'); + } + + out } /// Neutralizes the `metaboards` section in YAML settings so the library's @@ -45,9 +126,11 @@ impl RaindexProvider { pub(crate) async fn load( registry_url: &str, db_path: Option, + rpc_overrides: HashMap>, ) -> Result { let url = registry_url.to_string(); let db = db_path.clone(); + let overrides = rpc_overrides; let (tx, rx) = tokio::sync::oneshot::channel(); @@ -68,9 +151,20 @@ impl RaindexProvider { .await .map_err(|e| RaindexProviderError::RegistryLoad(e.to_string()))?; - // Build the client with metaboard lookups disabled to avoid ~5s - // of network calls in fetch_orders_dotrain_sources(). - let settings = neutralize_metaboards(®istry.settings()); + // Build the client with: + // - metaboard lookups disabled to avoid ~5s of subgraph calls + // - per-network RPC URLs overridden if `[rpc_override]` is set + let mut settings = neutralize_metaboards(®istry.settings()); + if !overrides.is_empty() { + settings = apply_rpc_override(&settings, &overrides); + for (name, urls) in &overrides { + tracing::info!( + network = %name, + url_count = urls.len(), + "applied RPC override for network" + ); + } + } let client = RaindexClient::new(vec![settings], None, db.clone()) .await .map_err(|e| RaindexProviderError::ClientInit(e.to_string()))?; @@ -79,6 +173,7 @@ impl RaindexProvider { registry, client, db_path: db, + rpc_overrides: overrides, }) }); @@ -99,6 +194,10 @@ impl RaindexProvider { pub(crate) fn db_path(&self) -> Option { self.db_path.clone() } + + pub(crate) fn rpc_overrides(&self) -> HashMap> { + self.rpc_overrides.clone() + } } #[derive(Debug, thiserror::Error)] @@ -134,7 +233,7 @@ mod tests { #[rocket::async_test] async fn test_load_fails_with_unreachable_url() { - let result = RaindexProvider::load("http://127.0.0.1:1/registry.txt", None).await; + let result = RaindexProvider::load("http://127.0.0.1:1/registry.txt", None, HashMap::new()).await; assert!(result.is_err()); assert!(matches!( result.unwrap_err(), @@ -159,7 +258,7 @@ mod tests { let _ = tokio::io::AsyncWriteExt::write_all(&mut socket, response.as_bytes()).await; }); - let result = RaindexProvider::load(&format!("http://{addr}/registry.txt"), None).await; + let result = RaindexProvider::load(&format!("http://{addr}/registry.txt"), None, HashMap::new()).await; assert!(result.is_err()); assert!(matches!( result.unwrap_err(), @@ -218,6 +317,106 @@ metaboards: assert!(!result.contains("api.goldsky.com")); } + #[test] + fn test_apply_rpc_override_replaces_single_url() { + let yaml = "\ +version: 4 +networks: + base: + rpcs: + - https://base-rpc.publicnode.com + chain-id: 8453 + currency: ETH +orderbooks: + base: + address: 0xabc +"; + let mut overrides = HashMap::new(); + overrides.insert( + "base".to_string(), + vec!["https://alchemy.example/v2/key".to_string()], + ); + let result = apply_rpc_override(yaml, &overrides); + + assert!(result.contains("- https://alchemy.example/v2/key")); + assert!(!result.contains("publicnode.com")); + // Other fields preserved. + assert!(result.contains("chain-id: 8453")); + assert!(result.contains("currency: ETH")); + assert!(result.contains("orderbooks:")); + } + + #[test] + fn test_apply_rpc_override_replaces_multi_url() { + let yaml = "\ +networks: + base: + rpcs: + - https://old-1 + - https://old-2 + - https://old-3 + chain-id: 8453 +"; + let mut overrides = HashMap::new(); + overrides.insert( + "base".to_string(), + vec!["https://new-a".to_string(), "https://new-b".to_string()], + ); + let result = apply_rpc_override(yaml, &overrides); + + assert!(result.contains("- https://new-a")); + assert!(result.contains("- https://new-b")); + assert!(!result.contains("old-1")); + assert!(!result.contains("old-2")); + assert!(!result.contains("old-3")); + assert!(result.contains("chain-id: 8453")); + } + + #[test] + fn test_apply_rpc_override_only_named_network() { + let yaml = "\ +networks: + base: + rpcs: + - https://base-rpc + chain-id: 8453 + ethereum: + rpcs: + - https://eth-rpc + chain-id: 1 +"; + let mut overrides = HashMap::new(); + overrides.insert("base".to_string(), vec!["https://new-base".to_string()]); + let result = apply_rpc_override(yaml, &overrides); + + assert!(result.contains("- https://new-base")); + assert!(!result.contains("base-rpc")); + // Ethereum block untouched. + assert!(result.contains("- https://eth-rpc")); + } + + #[test] + fn test_apply_rpc_override_empty_passthrough() { + let yaml = "networks:\n base:\n rpcs:\n - https://x\n"; + let result = apply_rpc_override(yaml, &HashMap::new()); + assert_eq!(result, yaml); + } + + #[test] + fn test_apply_rpc_override_unknown_network_passthrough() { + let yaml = "\ +networks: + base: + rpcs: + - https://base-rpc +"; + let mut overrides = HashMap::new(); + overrides.insert("polygon".to_string(), vec!["https://poly".to_string()]); + let result = apply_rpc_override(yaml, &overrides); + assert!(result.contains("- https://base-rpc")); + assert!(!result.contains("poly")); + } + #[test] fn test_error_maps_to_api_error() { let err = RaindexProviderError::RegistryLoad("test".into()); diff --git a/src/raindex/mod.rs b/src/raindex/mod.rs index 14b908d..dd95039 100644 --- a/src/raindex/mod.rs +++ b/src/raindex/mod.rs @@ -1,4 +1,6 @@ +pub(crate) mod block_cache; pub(crate) mod config; +pub(crate) use block_cache::{block_number_cache, get_or_fetch_block_number, BlockNumberCache}; pub(crate) use config::RaindexProvider; -pub(crate) type SharedRaindexProvider = tokio::sync::RwLock; +pub(crate) type SharedRaindexProvider = std::sync::Arc>; diff --git a/src/routes/admin.rs b/src/routes/admin.rs index 80d94a7..0502d3e 100644 --- a/src/routes/admin.rs +++ b/src/routes/admin.rs @@ -68,7 +68,11 @@ pub async fn put_registry( })); } - let new_provider = RaindexProvider::load(&req.registry_url, db_path) + let rpc_overrides = { + let guard = shared_raindex.read().await; + guard.rpc_overrides() + }; + let new_provider = RaindexProvider::load(&req.registry_url, db_path, rpc_overrides) .await .map_err(|e| { tracing::warn!(error = %e, "failed to load new registry"); diff --git a/src/routes/health.rs b/src/routes/health.rs index ed89ea6..2a55714 100644 --- a/src/routes/health.rs +++ b/src/routes/health.rs @@ -1,9 +1,11 @@ +use crate::cache_warmer::SharedCacheWarmerStats; use crate::db::DbPool; use crate::error::ApiError; use crate::fairings::TracingSpan; use crate::raindex::SharedRaindexProvider; use crate::types::health::{ - DbStatus, DetailedHealthResponse, HealthResponse, OrderbookSyncInfo, RaindexDbStatus, + CacheWarmerStatus, DbStatus, DetailedHealthResponse, HealthResponse, OrderbookSyncInfo, + RaindexDbStatus, }; use rocket::serde::json::Json; use rocket::{Route, State}; @@ -43,6 +45,7 @@ pub async fn get_health_detailed( span: TracingSpan, pool: &State, shared_raindex: &State, + cache_warmer_stats: &State, ) -> Result, ApiError> { async move { tracing::info!("detailed health check request received"); @@ -53,26 +56,54 @@ pub async fn get_health_detailed( // 2. Check raindex database and sync status let raindex_db = check_raindex_db(shared_raindex).await; - // 3. Determine overall status - let status = if app_db.connected && raindex_db.connected && !raindex_db.orderbooks.is_empty() - { - "ok".to_string() - } else if app_db.connected || raindex_db.connected { - "degraded".to_string() - } else { - "error".to_string() - }; + // 3. Snapshot cache warmer stats + let cache_warmer = build_cache_warmer_status(cache_warmer_stats).await; + + // 4. Determine overall status + let status = + if app_db.connected && raindex_db.connected && !raindex_db.orderbooks.is_empty() { + "ok".to_string() + } else if app_db.connected || raindex_db.connected { + "degraded".to_string() + } else { + "error".to_string() + }; Ok(Json(DetailedHealthResponse { status, app_db, raindex_db, + cache_warmer, })) } .instrument(span.0) .await } +async fn build_cache_warmer_status(stats: &SharedCacheWarmerStats) -> CacheWarmerStatus { + let snapshot = stats.read().await.clone(); + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0); + let (seconds_since_last_complete, last_complete_age) = match snapshot.last_complete_at_unix { + Some(ts) => ( + Some(now.saturating_sub(ts)), + Some(format_age(now, ts)), + ), + None => (None, None), + }; + CacheWarmerStatus { + running: snapshot.total_cycles > 0, + total_cycles: snapshot.total_cycles, + last_cycle_ms: snapshot.last_cycle_ms, + last_tokens: snapshot.last_tokens, + last_errors: snapshot.last_errors, + seconds_since_last_complete, + last_complete_age, + } +} + async fn check_app_db(pool: &DbPool) -> DbStatus { match sqlx::query("SELECT 1").execute(pool).await { Ok(_) => DbStatus { @@ -136,10 +167,8 @@ async fn check_raindex_db(shared_raindex: &SharedRaindexProvider) -> RaindexDbSt .map(|ob| (ob.network.chain_id, format!("{:#x}", ob.address))) .collect(); - let query_result = spawn_blocking(move || { - query_raindex_sync_status(&db_path_clone, &orderbook_configs) - }) - .await; + let query_result = + spawn_blocking(move || query_raindex_sync_status(&db_path_clone, &orderbook_configs)).await; match query_result { Ok(Ok(orderbook_infos)) => RaindexDbStatus { @@ -333,4 +362,56 @@ mod tests { fn test_format_age_exact_day() { assert_eq!(format_age(86400, 0), "1d ago"); } + + #[rocket::async_test] + async fn test_build_cache_warmer_status_no_cycles() { + let stats = crate::cache_warmer::shared_cache_warmer_stats(); + let s = build_cache_warmer_status(&stats).await; + assert!(!s.running); + assert_eq!(s.total_cycles, 0); + assert!(s.last_cycle_ms.is_none()); + assert!(s.last_complete_age.is_none()); + } + + #[rocket::async_test] + async fn test_build_cache_warmer_status_after_cycle() { + let stats = crate::cache_warmer::shared_cache_warmer_stats(); + { + let mut w = stats.write().await; + w.total_cycles = 3; + w.last_cycle_ms = Some(7500); + w.last_tokens = Some(11); + w.last_errors = Some(0); + w.last_complete_at_unix = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + } + let s = build_cache_warmer_status(&stats).await; + assert!(s.running); + assert_eq!(s.total_cycles, 3); + assert_eq!(s.last_cycle_ms, Some(7500)); + assert_eq!(s.last_tokens, Some(11)); + assert_eq!(s.last_errors, Some(0)); + // age may be 0 or 1 second depending on test timing + assert!(s.seconds_since_last_complete.is_some()); + assert!(s.last_complete_age.is_some()); + } + + #[rocket::async_test] + async fn test_health_detailed_includes_cache_warmer_field() { + use crate::test_helpers::TestClientBuilder; + use rocket::http::Status; + let client = TestClientBuilder::new().build().await; + let resp = client.get("/health/detailed").dispatch().await; + assert_eq!(resp.status(), Status::Ok); + let body: serde_json::Value = + serde_json::from_str(&resp.into_string().await.unwrap()).unwrap(); + // cache_warmer object must always be present + assert!(body.get("cache_warmer").is_some()); + let cw = &body["cache_warmer"]; + // Before any cycle has run, totalCycles == 0 and running == false + assert_eq!(cw["running"], serde_json::json!(false)); + assert_eq!(cw["total_cycles"], serde_json::json!(0)); + } } diff --git a/src/routes/orders/get_by_owner.rs b/src/routes/orders/get_by_owner.rs index 8246023..8112ac8 100644 --- a/src/routes/orders/get_by_owner.rs +++ b/src/routes/orders/get_by_owner.rs @@ -1,6 +1,6 @@ use super::{ - build_order_summary, build_pagination, extract_quote_fields, OrdersListDataSource, - QuoteFields, RaindexOrdersListDataSource, DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE, + build_order_summary, build_pagination, OrdersListDataSource, QuoteFields, + RaindexOrdersListDataSource, DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE, }; use crate::auth::AuthenticatedKey; use crate::cache::AppCache; @@ -74,10 +74,11 @@ pub(crate) async fn process_get_orders_by_owner( skipped_zero_balance = orders.len() - quotable_orders.len(), "fetching batched quotes for orders by owner" ); - let quote_results = ds.get_order_quotes_batch("able_orders).await; + let quotable_fields = ds.fetch_quote_fields("able_orders).await; let quotes_stage_duration_ms = quotes_stage_start.elapsed().as_millis(); - // Map quote results back to original order positions + // Map quote fields back to original order positions; orders skipped above + // (zero output balance) keep the placeholder fields. let mut quote_fields: Vec = (0..orders.len()) .map(|_| QuoteFields { io_ratio: "-".into(), @@ -85,13 +86,9 @@ pub(crate) async fn process_get_orders_by_owner( }) .collect(); for (qi, &original_idx) in quotable_indices.iter().enumerate() { - quote_fields[original_idx] = extract_quote_fields( - &orders[original_idx], - quote_results - .get(qi) - .cloned() - .unwrap_or_else(|| Err(ApiError::Internal("missing quote".into()))), - ); + if let Some(field) = quotable_fields.get(qi) { + quote_fields[original_idx] = field.clone(); + } } let mut summaries = Vec::with_capacity(orders.len()); @@ -134,12 +131,16 @@ pub(crate) async fn process_get_orders_by_owner( (status = 500, description = "Internal server error", body = ApiErrorResponse), ) )] +#[allow(clippy::too_many_arguments)] #[get("/owner/
?")] pub async fn get_orders_by_address( _global: GlobalRateLimit, _key: AuthenticatedKey, shared_raindex: &State, orders_cache: &State, + block_number_cache: &State, + limit_ratio_cache: &State, + stale_price_skip_cache: &State, span: TracingSpan, address: ValidatedAddress, params: OrdersPaginationParams, @@ -159,6 +160,9 @@ pub async fn get_orders_by_address( let raindex = shared_raindex.read().await; let ds = RaindexOrdersListDataSource { client: raindex.client(), + block_number_cache: block_number_cache.inner(), + limit_ratio_cache: limit_ratio_cache.inner(), + stale_price_skip_cache: stale_price_skip_cache.inner(), }; process_get_orders_by_owner(&ds, addr, Some(page), Some(page_size)).await }) diff --git a/src/routes/orders/get_by_token.rs b/src/routes/orders/get_by_token.rs index 01add0b..4d3b0a8 100644 --- a/src/routes/orders/get_by_token.rs +++ b/src/routes/orders/get_by_token.rs @@ -1,6 +1,6 @@ use super::{ - build_order_summary, build_pagination, extract_quote_fields, OrdersListDataSource, - QuoteFields, RaindexOrdersListDataSource, DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE, + build_order_summary, build_pagination, OrdersListDataSource, QuoteFields, + RaindexOrdersListDataSource, DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE, }; use crate::auth::AuthenticatedKey; use crate::cache::AppCache; @@ -92,10 +92,11 @@ pub(crate) async fn process_get_orders_by_token( skipped_zero_balance = orders.len() - quotable_orders.len(), "fetching batched quotes for orders by token" ); - let quote_results = ds.get_order_quotes_batch("able_orders).await; + let quotable_fields = ds.fetch_quote_fields("able_orders).await; let quotes_stage_duration_ms = quotes_stage_start.elapsed().as_millis(); - // Map quote results back to original order positions + // Map quote fields back to original order positions; orders skipped above + // (zero output balance) keep the placeholder fields. let mut quote_fields: Vec = (0..orders.len()) .map(|_| QuoteFields { io_ratio: "-".into(), @@ -103,13 +104,9 @@ pub(crate) async fn process_get_orders_by_token( }) .collect(); for (qi, &original_idx) in quotable_indices.iter().enumerate() { - quote_fields[original_idx] = extract_quote_fields( - &orders[original_idx], - quote_results - .get(qi) - .cloned() - .unwrap_or_else(|| Err(ApiError::Internal("missing quote".into()))), - ); + if let Some(field) = quotable_fields.get(qi) { + quote_fields[original_idx] = field.clone(); + } } let mut summaries = Vec::with_capacity(orders.len()); @@ -152,12 +149,16 @@ pub(crate) async fn process_get_orders_by_token( (status = 500, description = "Internal server error", body = ApiErrorResponse), ) )] +#[allow(clippy::too_many_arguments)] #[get("/token/
?")] pub async fn get_orders_by_token( _global: GlobalRateLimit, _key: AuthenticatedKey, shared_raindex: &State, orders_cache: &State, + block_number_cache: &State, + limit_ratio_cache: &State, + stale_price_skip_cache: &State, span: TracingSpan, address: ValidatedAddress, params: OrdersByTokenParams, @@ -178,6 +179,9 @@ pub async fn get_orders_by_token( let raindex = shared_raindex.read().await; let ds = RaindexOrdersListDataSource { client: raindex.client(), + block_number_cache: block_number_cache.inner(), + limit_ratio_cache: limit_ratio_cache.inner(), + stale_price_skip_cache: stale_price_skip_cache.inner(), }; process_get_orders_by_token(&ds, addr, side, Some(page), Some(page_size)).await }) diff --git a/src/routes/orders/limit_cache.rs b/src/routes/orders/limit_cache.rs new file mode 100644 index 0000000..625f067 --- /dev/null +++ b/src/routes/orders/limit_cache.rs @@ -0,0 +1,100 @@ +use crate::cache::AppCache; +use alloy::primitives::B256; +use rain_orderbook_common::parsed_meta::ParsedMeta; +use rain_orderbook_common::raindex_client::orders::RaindexOrder; +use std::time::Duration; + +const LIMIT_RATIO_CACHE_TTL: Duration = Duration::from_secs(86_400); // 24h +const LIMIT_RATIO_CACHE_CAPACITY: u64 = 10_000; + +/// Cache of `formatted_ratio` keyed by order_hash for limit orders. +/// +/// Limit orders by definition have a fixed io_ratio that doesn't depend on +/// block state, so once we've quoted one we can reuse the ratio without +/// burning RPC. The 24h TTL is a safety net so a poisoned cache entry +/// self-heals after a day. +pub(crate) type LimitOrderRatioCache = AppCache; + +pub(crate) fn limit_order_ratio_cache() -> LimitOrderRatioCache { + AppCache::new(LIMIT_RATIO_CACHE_CAPACITY, LIMIT_RATIO_CACHE_TTL) +} + +/// True when the order's `selected_deployment` metadata identifies it as a +/// limit order (e.g. `fixed-limit`). Returns false for orders with no +/// dotrain GUI metadata, or with a non-limit deployment. +pub(crate) fn is_limit_order(order: &RaindexOrder) -> bool { + for meta in order.parsed_meta() { + if let ParsedMeta::DotrainGuiStateV1(gui_state) = meta { + if gui_state + .selected_deployment + .to_lowercase() + .contains("limit") + { + return true; + } + } + } + false +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::routes::order::test_fixtures::order_json; + use serde_json::json; + + fn order_with_deployment(deployment: &str) -> RaindexOrder { + let mut value = order_json(); + value["parsedMeta"] = json!([{ + "DotrainGuiStateV1": { + "dotrain_hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "field_values": {}, + "deposits": {}, + "select_tokens": {}, + "vault_ids": {}, + "selected_deployment": deployment, + } + }]); + serde_json::from_value(value).expect("deserialize order with parsedMeta") + } + + #[test] + fn test_is_limit_order_true_for_fixed_limit() { + let order = order_with_deployment("fixed-limit-buy-base"); + assert!(is_limit_order(&order)); + } + + #[test] + fn test_is_limit_order_true_for_uppercase_limit() { + let order = order_with_deployment("Fixed-LIMIT-Sell"); + assert!(is_limit_order(&order)); + } + + #[test] + fn test_is_limit_order_false_for_dca() { + let order = order_with_deployment("auction-dca-base"); + assert!(!is_limit_order(&order)); + } + + #[test] + fn test_is_limit_order_false_for_grid() { + let order = order_with_deployment("grid-base"); + assert!(!is_limit_order(&order)); + } + + #[test] + fn test_is_limit_order_false_for_no_metadata() { + let order: RaindexOrder = + serde_json::from_value(order_json()).expect("deserialize plain order"); + assert!(!is_limit_order(&order)); + } + + #[rocket::async_test] + async fn test_cache_hit_and_miss() { + let cache = limit_order_ratio_cache(); + let key = alloy::primitives::B256::from([1u8; 32]); + assert!(cache.get(&key).await.is_none()); + cache.insert(key, "1.5".to_string()).await; + assert_eq!(cache.get(&key).await.as_deref(), Some("1.5")); + } +} diff --git a/src/routes/orders/mod.rs b/src/routes/orders/mod.rs index 1a5d442..dc00c3c 100644 --- a/src/routes/orders/mod.rs +++ b/src/routes/orders/mod.rs @@ -1,12 +1,15 @@ mod get_by_owner; mod get_by_token; mod get_by_tx; +mod limit_cache; +mod stale_price_skip; use crate::error::ApiError; use crate::types::common::TokenRef; use crate::types::orders::{OrderSummary, OrdersListResponse, OrdersPagination}; use async_trait::async_trait; use futures::{future::join_all, stream, StreamExt}; +use rain_orderbook_bindings::IOrderBookV6::SignedContextV1; use rain_orderbook_common::raindex_client::order_quotes::{ get_order_quotes_batch as fetch_order_quotes_batch, RaindexOrderQuote, }; @@ -19,6 +22,83 @@ pub(crate) const DEFAULT_PAGE_SIZE: u32 = 20; pub(crate) const MAX_PAGE_SIZE: u16 = 50; const MAX_CHAIN_BATCH_CONCURRENCY: usize = 4; +/// Fetch signed oracle context from an order's oracle URL. +/// Returns an empty vec if the order has no oracle URL or the fetch fails. +/// +/// The oracle server expects a POST with an ABI-encoded `bytes` body. +/// An empty bytes value is: offset (0x20) + length (0x00), each as a 32-byte word. +async fn fetch_oracle_context(oracle_url: &str) -> Vec { + // ABI-encode an empty `bytes` value: offset=0x20, length=0 + let mut abi_body = vec![0u8; 64]; + abi_body[31] = 0x20; // offset = 32 + + let client = reqwest::Client::new(); + let resp = match client + .post(oracle_url) + .header("Content-Type", "application/octet-stream") + .body(abi_body) + .send() + .await + { + Ok(resp) if resp.status().is_success() => resp, + Ok(resp) => { + tracing::warn!( + oracle_url, + status = %resp.status(), + "oracle endpoint returned non-success status" + ); + return vec![]; + } + Err(e) => { + tracing::warn!(oracle_url, error = %e, "failed to fetch oracle context"); + return vec![]; + } + }; + + let body = match resp.text().await { + Ok(body) => body, + Err(e) => { + tracing::warn!(oracle_url, error = %e, "failed to read oracle response body"); + return vec![]; + } + }; + + // Try parsing as array first, then as single object + if let Ok(contexts) = serde_json::from_str::>(&body) { + tracing::debug!( + oracle_url, + count = contexts.len(), + "fetched oracle signed context" + ); + return contexts; + } + if let Ok(context) = serde_json::from_str::(&body) { + tracing::debug!(oracle_url, "fetched single oracle signed context"); + return vec![context]; + } + + tracing::warn!( + oracle_url, + "failed to parse oracle response as SignedContextV1" + ); + vec![] +} + +/// For each order, fetch oracle signed context if the order has an oracle URL. +/// Returns a vec parallel to the input orders, with empty vecs for non-oracle orders. +async fn fetch_oracle_contexts_for_orders(orders: &[RaindexOrder]) -> Vec> { + let futures: Vec<_> = orders + .iter() + .map(|order| async move { + match order.oracle_url() { + Some(url) => fetch_oracle_context(&url).await, + None => vec![], + } + }) + .collect(); + join_all(futures).await +} + type OrderQuoteResult = Result, ApiError>; type OrderQuoteBatchResult = Result>, ApiError>; type IndexedOrder = (usize, RaindexOrder); @@ -50,10 +130,28 @@ pub(crate) trait OrdersListDataSource: Send + Sync { async fn get_order_quotes_batch(&self, orders: &[RaindexOrder]) -> Vec { fetch_order_quotes_grouped(self, orders).await } + + /// Fetch `QuoteFields` (display-level extracted quote data) for each order. + /// + /// Default implementation runs the multicall batch for every order and + /// returns the extracted fields with no caching. The real implementation + /// applies caches (e.g. limit-order ratio cache) before falling back to + /// the multicall. + async fn fetch_quote_fields(&self, orders: &[RaindexOrder]) -> Vec { + let results = self.get_order_quotes_batch(orders).await; + orders + .iter() + .zip(results) + .map(|(order, result)| extract_quote_fields(order, result)) + .collect() + } } pub(crate) struct RaindexOrdersListDataSource<'a> { pub client: &'a RaindexClient, + pub block_number_cache: &'a crate::raindex::BlockNumberCache, + pub limit_ratio_cache: &'a LimitOrderRatioCache, + pub stale_price_skip_cache: &'a StalePriceSkipCache, } fn group_orders_by_chain(orders: &[RaindexOrder]) -> GroupedOrders { @@ -207,23 +305,159 @@ impl<'a> OrdersListDataSource for RaindexOrdersListDataSource<'a> { .first() .map(RaindexOrder::chain_id) .unwrap_or_default(); - // Use small chunk size (4) to avoid exceeding public RPC eth_call gas - // limits, which would trigger expensive probe-and-split retries in the - // quote library. - fetch_order_quotes_batch(orders, None, Some(4)) - .await - .map_err(|error| { - tracing::error!( - chain_id, - error = %error, - "failed to batch query order quotes" - ); - ApiError::Internal("failed to query order quotes".into()) - }) + + // Fetch oracle signed context for orders that have an oracle URL. + // This enables accurate quoting for oracle-dependent orders (e.g. SPYM). + let signed_contexts = fetch_oracle_contexts_for_orders(orders).await; + let has_any_context = signed_contexts.iter().any(|ctx| !ctx.is_empty()); + + // Resolve the block number once via our short-TTL cache so multiple + // concurrent batches hit the RPC at most once per cache window. If the + // cache fetch fails we fall through to `None` and let the upstream + // library do its own (uncached) lookup. + let block_number = if let Some(first_order) = orders.first() { + let rpc_urls: Vec = first_order + .get_rpc_urls() + .map(|urls| urls.into_iter().map(|u| u.to_string()).collect()) + .unwrap_or_default(); + crate::raindex::get_or_fetch_block_number(self.block_number_cache, chain_id, &rpc_urls) + .await + } else { + None + }; + + // Chunk size 16 matches the upstream library default. A multicall is + // a single eth_call regardless of chunk size, so larger chunks reduce + // RPC volume without adding latency. The library has a probe-and-split + // safety net if a chunk exceeds the RPC's gas budget. + fetch_order_quotes_batch( + orders, + block_number, + Some(16), + if has_any_context { + Some(&signed_contexts) + } else { + None + }, + ) + .await + .map_err(|error| { + tracing::error!( + chain_id, + error = %error, + "failed to batch query order quotes" + ); + ApiError::Internal("failed to query order quotes".into()) + }) + } + + async fn fetch_quote_fields(&self, orders: &[RaindexOrder]) -> Vec { + fetch_quote_fields_with_caches( + self, + self.limit_ratio_cache, + self.stale_price_skip_cache, + crate::market_calendar::is_nyse_open(chrono::Utc::now()), + orders, + ) + .await + } +} + +/// Apply per-order quote caches around a batched quote call: +/// - **Limit-order ratio cache**: orders identified as limit orders that +/// already have a cached io_ratio bypass the multicall entirely. +/// `max_output` is left `None`, which causes the downstream summary +/// builder to fall back to `vault_balance` (the right behavior for +/// limit orders, where max_output is bounded by the output vault). +/// - **Stale-price skip cache**: orders previously known to revert with +/// `StalePrice` are skipped while NYSE is closed (their oracle won't +/// refresh until the cash session reopens). When NYSE is open, every +/// order is quoted normally — fresh `StalePrice` failures re-mark the +/// order so it stays skipped during the next off-hours window. +/// - All other orders go through the standard batched quote path. +/// - After the batch, successful quotes for limit orders populate the +/// limit cache, and any quote whose error includes `StalePrice` is +/// added to the skip cache. +pub(crate) async fn fetch_quote_fields_with_caches( + ds: &T, + limit_cache: &LimitOrderRatioCache, + stale_skip_cache: &StalePriceSkipCache, + nyse_open: bool, + orders: &[RaindexOrder], +) -> Vec +where + T: OrdersListDataSource + ?Sized, +{ + let mut fields: Vec> = vec![None; orders.len()]; + let mut to_quote_indices: Vec = Vec::new(); + let mut to_quote_orders: Vec = Vec::new(); + + for (i, order) in orders.iter().enumerate() { + if is_limit_order(order) { + if let Some(cached_ratio) = limit_cache.get(&order.order_hash()).await { + fields[i] = Some(QuoteFields { + io_ratio: cached_ratio, + max_output: None, + }); + continue; + } + } + if !nyse_open && stale_skip_cache.get(&order.order_hash()).await.is_some() { + tracing::debug!( + order_hash = ?order.order_hash(), + "skipping quote for stale-marked order (NYSE closed)" + ); + fields[i] = Some(QuoteFields { + io_ratio: "-".into(), + max_output: None, + }); + continue; + } + to_quote_indices.push(i); + to_quote_orders.push(order.clone()); + } + + if !to_quote_orders.is_empty() { + let quote_results = ds.get_order_quotes_batch(&to_quote_orders).await; + for (qi, &original_idx) in to_quote_indices.iter().enumerate() { + let order = &orders[original_idx]; + let result = quote_results + .get(qi) + .cloned() + .unwrap_or_else(|| Err(ApiError::Internal("missing quote result".into()))); + + if let Ok(quotes) = &result { + if quotes + .iter() + .any(|q| q.error.as_deref().is_some_and(quote_indicates_stale_price)) + { + stale_skip_cache.insert(order.order_hash(), ()).await; + } + } + + let extracted = extract_quote_fields(order, result); + if is_limit_order(order) && extracted.io_ratio != "-" { + limit_cache + .insert(order.order_hash(), extracted.io_ratio.clone()) + .await; + } + fields[original_idx] = Some(extracted); + } } + + fields + .into_iter() + .map(|opt| { + opt.unwrap_or_else(|| QuoteFields { + io_ratio: "-".into(), + max_output: None, + }) + }) + .collect() } /// Extracted quote fields for building order summaries. +#[derive(Clone)] pub(crate) struct QuoteFields { pub io_ratio: String, /// Simulated max output from on-chain quote. None when quote failed or unavailable. @@ -273,7 +507,18 @@ pub(crate) fn extract_quote_fields( ) -> QuoteFields { match quotes_result { Ok(quotes) => { - let data = quotes.first().and_then(|quote| quote.data.as_ref()); + let first = quotes.first(); + let data = first.and_then(|quote| quote.data.as_ref()); + if data.is_none() { + if let Some(quote) = first { + tracing::warn!( + order_hash = ?order.order_hash(), + success = quote.success, + error = ?quote.error, + "quote returned no data; using fallback io_ratio" + ); + } + } QuoteFields { io_ratio: data .map(|d| d.formatted_ratio.clone()) @@ -343,8 +588,14 @@ pub use get_by_owner::*; pub use get_by_token::*; pub use get_by_tx::*; -pub(crate) use get_by_owner::{orders_by_owner_cache, OrdersByOwnerCache}; -pub(crate) use get_by_token::{orders_by_token_cache, OrdersByTokenCache}; +pub(crate) use get_by_owner::orders_by_owner_cache; +pub(crate) use get_by_token::{ + orders_by_token_cache, process_get_orders_by_token, OrdersByTokenCache, +}; +pub(crate) use limit_cache::{is_limit_order, limit_order_ratio_cache, LimitOrderRatioCache}; +pub(crate) use stale_price_skip::{ + quote_indicates_stale_price, stale_price_skip_cache, StalePriceSkipCache, +}; pub fn routes() -> Vec { rocket::routes![ @@ -603,4 +854,315 @@ mod tests { assert!(matches!(result, Err(ApiError::Internal(_)))); } + + fn limit_order_for_chain(chain_id: u32, order_hash: &str, deployment: &str) -> RaindexOrder { + let mut value = order_json(); + value["chainId"] = json!(chain_id); + value["orderHash"] = json!(order_hash); + value["parsedMeta"] = json!([{ + "DotrainGuiStateV1": { + "dotrain_hash": "0x0000000000000000000000000000000000000000000000000000000000000001", + "field_values": {}, + "deposits": {}, + "select_tokens": {}, + "vault_ids": {}, + "selected_deployment": deployment, + } + }]); + serde_json::from_value(value).expect("deserialize limit-order mock") + } + + #[rocket::async_test] + async fn test_limit_cache_hit_skips_multicall() { + let cache = limit_order_ratio_cache(); + let order = limit_order_for_chain( + 8453, + "0x00000000000000000000000000000000000000000000000000000000000000aa", + "fixed-limit-buy", + ); + cache.insert(order.order_hash(), "0.42".to_string()).await; + + let batch_calls = Arc::new(Mutex::new(Vec::new())); + let single_calls = Arc::new(Mutex::new(Vec::new())); + let ds = BatchingTestDataSource { + per_order_quotes: HashMap::new(), + batched_quotes: HashMap::new(), + batch_calls: Arc::clone(&batch_calls), + single_calls: Arc::clone(&single_calls), + }; + + let stale_skip_cache = stale_price_skip_cache(); + let fields = fetch_quote_fields_with_caches( + &ds, + &cache, + &stale_skip_cache, + true, + std::slice::from_ref(&order), + ) + .await; + + assert_eq!(fields.len(), 1); + assert_eq!(fields[0].io_ratio, "0.42"); + assert!(fields[0].max_output.is_none()); + // No multicall and no per-order call should have happened. + assert!(batch_calls.lock().expect("lock").is_empty()); + assert!(single_calls.lock().expect("lock").is_empty()); + } + + #[rocket::async_test] + async fn test_limit_cache_miss_populates_cache() { + let cache = limit_order_ratio_cache(); + let order = limit_order_for_chain( + 8453, + "0x00000000000000000000000000000000000000000000000000000000000000bb", + "fixed-limit-sell", + ); + + let batch_calls = Arc::new(Mutex::new(Vec::new())); + let single_calls = Arc::new(Mutex::new(Vec::new())); + let ds = BatchingTestDataSource { + per_order_quotes: HashMap::new(), + batched_quotes: HashMap::from([(8453, Ok(vec![vec![mock_quote("1.234")]]))]), + batch_calls: Arc::clone(&batch_calls), + single_calls: Arc::clone(&single_calls), + }; + + let stale_skip_cache = stale_price_skip_cache(); + let fields = fetch_quote_fields_with_caches( + &ds, + &cache, + &stale_skip_cache, + true, + std::slice::from_ref(&order), + ) + .await; + + assert_eq!(fields[0].io_ratio, "1.234"); + // Batch happened exactly once for the uncached limit order. + assert_eq!(batch_calls.lock().expect("lock").len(), 1); + // Cache now contains the freshly-fetched ratio. + assert_eq!( + cache.get(&order.order_hash()).await.as_deref(), + Some("1.234") + ); + } + + #[rocket::async_test] + async fn test_limit_cache_does_not_cache_non_limit_orders() { + let cache = limit_order_ratio_cache(); + let order = mock_order_for_chain( + 8453, + "0x00000000000000000000000000000000000000000000000000000000000000cc", + ); + + let batch_calls = Arc::new(Mutex::new(Vec::new())); + let single_calls = Arc::new(Mutex::new(Vec::new())); + let ds = BatchingTestDataSource { + per_order_quotes: HashMap::new(), + batched_quotes: HashMap::from([(8453, Ok(vec![vec![mock_quote("9.99")]]))]), + batch_calls: Arc::clone(&batch_calls), + single_calls: Arc::clone(&single_calls), + }; + + let stale_skip_cache = stale_price_skip_cache(); + let _ = fetch_quote_fields_with_caches( + &ds, + &cache, + &stale_skip_cache, + true, + std::slice::from_ref(&order), + ) + .await; + + // Non-limit order: cache should not be populated. + assert!(cache.get(&order.order_hash()).await.is_none()); + } + + #[rocket::async_test] + async fn test_limit_cache_does_not_cache_failed_quote() { + let cache = limit_order_ratio_cache(); + let order = limit_order_for_chain( + 8453, + "0x00000000000000000000000000000000000000000000000000000000000000dd", + "fixed-limit", + ); + + let batch_calls = Arc::new(Mutex::new(Vec::new())); + let single_calls = Arc::new(Mutex::new(Vec::new())); + let ds = BatchingTestDataSource { + per_order_quotes: HashMap::from([( + order_hash_key(&order), + Err(ApiError::Internal("nope".into())), + )]), + batched_quotes: HashMap::from([(8453, Err(ApiError::Internal("batch failed".into())))]), + batch_calls: Arc::clone(&batch_calls), + single_calls: Arc::clone(&single_calls), + }; + + let stale_skip_cache = stale_price_skip_cache(); + let fields = fetch_quote_fields_with_caches( + &ds, + &cache, + &stale_skip_cache, + true, + std::slice::from_ref(&order), + ) + .await; + + // io_ratio is the placeholder, no cache write. + assert_eq!(fields[0].io_ratio, "-"); + assert!(cache.get(&order.order_hash()).await.is_none()); + } + + #[rocket::async_test] + async fn test_limit_cache_mixed_orders_only_quotes_uncached() { + let cache = limit_order_ratio_cache(); + let cached_limit = limit_order_for_chain( + 8453, + "0x0000000000000000000000000000000000000000000000000000000000000111", + "fixed-limit-buy", + ); + cache + .insert(cached_limit.order_hash(), "0.5".to_string()) + .await; + let regular = mock_order_for_chain( + 8453, + "0x0000000000000000000000000000000000000000000000000000000000000222", + ); + + let orders = vec![cached_limit.clone(), regular.clone()]; + + let batch_calls = Arc::new(Mutex::new(Vec::new())); + let single_calls = Arc::new(Mutex::new(Vec::new())); + // The batch only sees the regular order; one mock quote. + let ds = BatchingTestDataSource { + per_order_quotes: HashMap::new(), + batched_quotes: HashMap::from([(8453, Ok(vec![vec![mock_quote("7.0")]]))]), + batch_calls: Arc::clone(&batch_calls), + single_calls: Arc::clone(&single_calls), + }; + + let stale_skip_cache = stale_price_skip_cache(); + let fields = + fetch_quote_fields_with_caches(&ds, &cache, &stale_skip_cache, true, &orders).await; + + // Position 0: cached limit value. + assert_eq!(fields[0].io_ratio, "0.5"); + assert!(fields[0].max_output.is_none()); + // Position 1: from the batch. + assert_eq!(fields[1].io_ratio, "7.0"); + // Batch was called with exactly the regular order. + let batch_calls = batch_calls.lock().expect("lock"); + assert_eq!(batch_calls.as_slice(), &[(8453, 1)]); + } + + fn stale_price_quote() -> rain_orderbook_common::raindex_client::order_quotes::RaindexOrderQuote + { + serde_json::from_value(json!({ + "pair": { "pairName": "USDC/WETH", "inputIndex": 0, "outputIndex": 0 }, + "blockNumber": 1, + "data": null, + "success": false, + "error": "Execution reverted with error: StalePrice\n" + })) + .expect("deserialize stale-price quote") + } + + #[rocket::async_test] + async fn test_stale_price_marker_set_after_quote_failure() { + let limit_cache = limit_order_ratio_cache(); + let stale_skip_cache = stale_price_skip_cache(); + let order = mock_order_for_chain( + 8453, + "0x0000000000000000000000000000000000000000000000000000000000000abc", + ); + + let batch_calls = Arc::new(Mutex::new(Vec::new())); + let single_calls = Arc::new(Mutex::new(Vec::new())); + let ds = BatchingTestDataSource { + per_order_quotes: HashMap::new(), + batched_quotes: HashMap::from([(8453, Ok(vec![vec![stale_price_quote()]]))]), + batch_calls: Arc::clone(&batch_calls), + single_calls: Arc::clone(&single_calls), + }; + + let _ = fetch_quote_fields_with_caches( + &ds, + &limit_cache, + &stale_skip_cache, + true, // NYSE open: still quote, but mark on failure + std::slice::from_ref(&order), + ) + .await; + + assert!(stale_skip_cache.get(&order.order_hash()).await.is_some()); + } + + #[rocket::async_test] + async fn test_stale_marked_order_skipped_when_nyse_closed() { + let limit_cache = limit_order_ratio_cache(); + let stale_skip_cache = stale_price_skip_cache(); + let order = mock_order_for_chain( + 8453, + "0x0000000000000000000000000000000000000000000000000000000000000def", + ); + // Pre-mark the order as stale. + stale_skip_cache.insert(order.order_hash(), ()).await; + + let batch_calls = Arc::new(Mutex::new(Vec::new())); + let single_calls = Arc::new(Mutex::new(Vec::new())); + let ds = BatchingTestDataSource { + per_order_quotes: HashMap::new(), + batched_quotes: HashMap::new(), // Empty: any batch attempt would panic. + batch_calls: Arc::clone(&batch_calls), + single_calls: Arc::clone(&single_calls), + }; + + let fields = fetch_quote_fields_with_caches( + &ds, + &limit_cache, + &stale_skip_cache, + false, // NYSE closed + std::slice::from_ref(&order), + ) + .await; + + // Skipped: placeholder fields, no batch call. + assert_eq!(fields[0].io_ratio, "-"); + assert!(fields[0].max_output.is_none()); + assert!(batch_calls.lock().expect("lock").is_empty()); + } + + #[rocket::async_test] + async fn test_stale_marked_order_quoted_when_nyse_open() { + let limit_cache = limit_order_ratio_cache(); + let stale_skip_cache = stale_price_skip_cache(); + let order = mock_order_for_chain( + 8453, + "0x000000000000000000000000000000000000000000000000000000000000beef", + ); + // Pre-mark the order: NYSE-open should still quote it. + stale_skip_cache.insert(order.order_hash(), ()).await; + + let batch_calls = Arc::new(Mutex::new(Vec::new())); + let single_calls = Arc::new(Mutex::new(Vec::new())); + let ds = BatchingTestDataSource { + per_order_quotes: HashMap::new(), + batched_quotes: HashMap::from([(8453, Ok(vec![vec![mock_quote("3.14")]]))]), + batch_calls: Arc::clone(&batch_calls), + single_calls: Arc::clone(&single_calls), + }; + + let fields = fetch_quote_fields_with_caches( + &ds, + &limit_cache, + &stale_skip_cache, + true, // NYSE open + std::slice::from_ref(&order), + ) + .await; + + assert_eq!(fields[0].io_ratio, "3.14"); + assert_eq!(batch_calls.lock().expect("lock").len(), 1); + } } diff --git a/src/routes/orders/stale_price_skip.rs b/src/routes/orders/stale_price_skip.rs new file mode 100644 index 0000000..23aed7e --- /dev/null +++ b/src/routes/orders/stale_price_skip.rs @@ -0,0 +1,59 @@ +use crate::cache::AppCache; +use alloy::primitives::B256; +use std::time::Duration; + +const STALE_SKIP_TTL: Duration = Duration::from_secs(7 * 86_400); +const STALE_SKIP_CAPACITY: u64 = 10_000; + +/// Set of order_hashes that have returned `StalePrice` from a quote. +/// +/// The TTL is long (7 days) because the marker only becomes a no-op when +/// NYSE re-opens (callers consult `market_calendar::is_nyse_open` before +/// honoring it). The TTL exists purely as a safety valve so a stale-marker +/// for a permanently-removed order eventually falls out of memory. +pub(crate) type StalePriceSkipCache = AppCache; + +pub(crate) fn stale_price_skip_cache() -> StalePriceSkipCache { + AppCache::new(STALE_SKIP_CAPACITY, STALE_SKIP_TTL) +} + +/// True if the multicall error message indicates the order's price feed is +/// stale (Chainlink-style `StalePrice` revert from the order's strategy). +pub(crate) fn quote_indicates_stale_price(error_msg: &str) -> bool { + error_msg.contains("StalePrice") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_quote_indicates_stale_price_matches() { + assert!(quote_indicates_stale_price( + "Execution reverted with error: StalePrice\n" + )); + assert!(quote_indicates_stale_price( + "Multicall failed: ... StalePrice ..." + )); + } + + #[test] + fn test_quote_indicates_stale_price_does_not_match_other_errors() { + assert!(!quote_indicates_stale_price( + "Execution reverted with error: NotEnoughBalance" + )); + assert!(!quote_indicates_stale_price("")); + assert!(!quote_indicates_stale_price( + "rate-limited until QuantaInstant(...)" + )); + } + + #[rocket::async_test] + async fn test_skip_cache_set_and_get() { + let cache = stale_price_skip_cache(); + let key = B256::from([7u8; 32]); + assert!(cache.get(&key).await.is_none()); + cache.insert(key, ()).await; + assert!(cache.get(&key).await.is_some()); + } +} diff --git a/src/routes/swap/mod.rs b/src/routes/swap/mod.rs index ad25933..706bf57 100644 --- a/src/routes/swap/mod.rs +++ b/src/routes/swap/mod.rs @@ -1,8 +1,9 @@ mod calldata; mod quote; +use crate::cache::AppCache; use crate::error::ApiError; -use crate::types::swap::SwapCalldataResponse; +use crate::types::swap::{SwapCalldataResponse, SwapQuoteResponse}; use alloy::primitives::Address; use async_trait::async_trait; use rain_orderbook_common::raindex_client::orders::{ @@ -15,6 +16,20 @@ use rain_orderbook_common::take_orders::{ build_take_order_candidates_for_pair, TakeOrderCandidate, }; use rocket::Route; +use std::time::Duration; + +const SWAP_QUOTE_CACHE_TTL: Duration = Duration::from_secs(5); +const SWAP_QUOTE_CACHE_CAPACITY: u64 = 1_000; + +/// Cache of completed swap quotes keyed by `(input_token, output_token, output_amount)`. +/// +/// 5s TTL keeps quotes within ~2 Base block times of staleness while +/// coalescing concurrent or repeated requests for the same pair+amount. +pub(crate) type SwapQuoteCache = AppCache<(Address, Address, String), SwapQuoteResponse>; + +pub(crate) fn swap_quote_cache() -> SwapQuoteCache { + AppCache::new(SWAP_QUOTE_CACHE_CAPACITY, SWAP_QUOTE_CACHE_TTL) +} #[async_trait] pub(crate) trait SwapDataSource: Send + Sync { diff --git a/src/routes/swap/quote.rs b/src/routes/swap/quote.rs index b37c3c9..f5f0d18 100644 --- a/src/routes/swap/quote.rs +++ b/src/routes/swap/quote.rs @@ -1,4 +1,4 @@ -use super::{RaindexSwapDataSource, SwapDataSource}; +use super::{RaindexSwapDataSource, SwapDataSource, SwapQuoteCache}; use crate::auth::AuthenticatedKey; use crate::error::{ApiError, ApiErrorResponse}; use crate::fairings::{GlobalRateLimit, TracingSpan}; @@ -30,17 +30,25 @@ pub async fn post_swap_quote( _global: GlobalRateLimit, _key: AuthenticatedKey, shared_raindex: &State, + swap_cache: &State, span: TracingSpan, request: Json, ) -> Result, ApiError> { let req = request.into_inner(); async move { tracing::info!(body = ?req, "request received"); - let raindex = shared_raindex.read().await; - let ds = RaindexSwapDataSource { - client: raindex.client(), - }; - let response = process_swap_quote(&ds, req).await?; + let cache_key = (req.input_token, req.output_token, req.output_amount.clone()); + let req_for_fetch = req.clone(); + let response = swap_cache + .get_or_try_insert(cache_key, || async move { + let raindex = shared_raindex.read().await; + let ds = RaindexSwapDataSource { + client: raindex.client(), + }; + process_swap_quote(&ds, req_for_fetch).await + }) + .await + .map_err(ApiError::from)?; Ok(Json(response)) } .instrument(span.0) @@ -255,4 +263,69 @@ mod tests { .await; assert_eq!(response.status(), Status::Unauthorized); } + + #[rocket::async_test] + async fn test_swap_cache_returns_cached_response_without_fetch() { + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Arc; + + let cache = crate::routes::swap::swap_quote_cache(); + let key = (USDC, WETH, "100".to_string()); + let cached = SwapQuoteResponse { + input_token: USDC, + output_token: WETH, + output_amount: "100".to_string(), + estimated_output: "100".to_string(), + estimated_input: "150".to_string(), + estimated_io_ratio: "1.5".to_string(), + }; + cache.insert(key.clone(), cached.clone()).await; + + // Fetch closure should not run on a cache hit; if it does the test + // notices via the counter. + let calls = Arc::new(AtomicUsize::new(0)); + let calls_inner = Arc::clone(&calls); + let result: Result> = cache + .get_or_try_insert(key, || async move { + calls_inner.fetch_add(1, Ordering::SeqCst); + Err::(ApiError::Internal("should not be called".into())) + }) + .await; + + let response = result.expect("cache hit should bypass fetch"); + assert_eq!(response.estimated_io_ratio, "1.5"); + assert_eq!(calls.load(Ordering::SeqCst), 0); + } + + #[rocket::async_test] + async fn test_swap_cache_runs_fetch_on_miss_and_populates() { + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Arc; + + let cache = crate::routes::swap::swap_quote_cache(); + let key = (USDC, WETH, "200".to_string()); + let calls = Arc::new(AtomicUsize::new(0)); + let calls_inner = Arc::clone(&calls); + + let response: Result> = cache + .get_or_try_insert(key.clone(), || async move { + calls_inner.fetch_add(1, Ordering::SeqCst); + Ok(SwapQuoteResponse { + input_token: USDC, + output_token: WETH, + output_amount: "200".to_string(), + estimated_output: "200".to_string(), + estimated_input: "300".to_string(), + estimated_io_ratio: "1.5".to_string(), + }) + }) + .await; + + let r = response.expect("fetch result populated cache"); + assert_eq!(r.estimated_input, "300"); + assert_eq!(calls.load(Ordering::SeqCst), 1); + // Subsequent get sees cached value without re-fetching. + let cached = cache.get(&key).await.expect("cached value present"); + assert_eq!(cached.estimated_input, "300"); + } } diff --git a/src/routes/tokens.rs b/src/routes/tokens.rs index 6243d0d..bc36361 100644 --- a/src/routes/tokens.rs +++ b/src/routes/tokens.rs @@ -157,9 +157,13 @@ tokens: "#; let registry_url = crate::test_helpers::mock_raindex_registry_url_with_settings(settings).await; - let config = crate::raindex::RaindexProvider::load(®istry_url, None) - .await - .expect("load raindex config"); + let config = crate::raindex::RaindexProvider::load( + ®istry_url, + None, + std::collections::HashMap::new(), + ) + .await + .expect("load raindex config"); let client = TestClientBuilder::new() .raindex_config(config) .build() @@ -227,9 +231,13 @@ using-tokens-from: }"#; let registry_url = mock_raindex_registry_url_with_settings_and_tokens(settings, remote_tokens).await; - let config = crate::raindex::RaindexProvider::load(®istry_url, None) - .await - .expect("load raindex config"); + let config = crate::raindex::RaindexProvider::load( + ®istry_url, + None, + std::collections::HashMap::new(), + ) + .await + .expect("load raindex config"); let client = TestClientBuilder::new() .raindex_config(config) .build() diff --git a/src/routes/trades.rs b/src/routes/trades.rs index 2c5d777..e61a567 100644 --- a/src/routes/trades.rs +++ b/src/routes/trades.rs @@ -909,14 +909,8 @@ async fn process_get_taker_trades( sender: Address, params: TradesPaginationParams, ) -> Result { - // Step 1: Get tx hashes (cached) - let tx_hashes = match direct_trades { - Some(fetcher) => taker_tx_cache - .get_or_try_insert(sender, || async { - fetcher.fetch_taker_tx_hashes(&sender).await - }) - .await - .map_err(ApiError::from)?, + let fetcher = match direct_trades { + Some(f) => f, None => { tracing::warn!("direct trades fetcher unavailable; returning empty taker trades"); return Ok(TakerTradesResponse { @@ -932,6 +926,14 @@ async fn process_get_taker_trades( } }; + // Step 1: Get tx hashes (cached) + let tx_hashes = taker_tx_cache + .get_or_try_insert(sender, || async { + fetcher.fetch_taker_tx_hashes(&sender).await + }) + .await + .map_err(ApiError::from)?; + // Step 2: Paginate let page = params.page.unwrap_or(1); let page_size = params.page_size.unwrap_or(20); @@ -949,17 +951,33 @@ async fn process_get_taker_trades( tx_hashes[offset..end].iter().map(|(h, _)| *h).collect() }; - // Step 3: Resolve each tx via existing cached trade-by-tx lookup + // Step 3: Batch fetch all trades for the page via DirectTradesFetcher (fast SQLite path) + let trades_by_tx = fetcher.fetch_taker_tx_trades(&page_hashes).await?; + + // Step 4: Build TradesByTxResponse for each tx hash, preserving order let mut market_orders = Vec::with_capacity(page_hashes.len()); - for tx_hash in page_hashes { - match get_cached_trades_by_tx(trades_by_tx_cache, ds, tx_hash, None).await { - Ok(tx_trades) => market_orders.push(tx_trades), + for tx_hash in &page_hashes { + let enriched_trades = match trades_by_tx.get(tx_hash) { + Some(trades) if !trades.is_empty() => trades, + _ => { + tracing::warn!(tx_hash = %tx_hash, "no trades found for taker tx; skipping"); + continue; + } + }; + + match build_trades_by_tx_from_enriched(*tx_hash, enriched_trades) { + Ok(response) => market_orders.push(response), Err(e) => { - tracing::warn!(tx_hash = %tx_hash, error = %e, "failed to resolve taker tx; skipping"); + tracing::warn!(tx_hash = %tx_hash, error = %e, "failed to build taker tx response; skipping"); } } } + // Suppress unused variable warnings — these params are kept for backward + // compatibility (the test passes None for direct_trades and exercises the + // early-return path above, which never reaches here). + let _ = (ds, trades_by_tx_cache); + Ok(TakerTradesResponse { market_orders, pagination: TradesPagination { @@ -972,6 +990,112 @@ async fn process_get_taker_trades( }) } +/// Build a `TradesByTxResponse` from enriched trade rows returned by DirectTradesFetcher. +fn build_trades_by_tx_from_enriched( + tx_hash: B256, + trades: &[crate::direct_trades::EnrichedTradeRow], +) -> Result { + let first = &trades[0]; + + let mut total_input = Float::zero().map_err(|e| { + tracing::error!(error = %e, "float zero construction failed"); + ApiError::Internal("trade totals calculation failed".into()) + })?; + let mut total_output = Float::zero().map_err(|e| { + tracing::error!(error = %e, "float zero construction failed"); + ApiError::Internal("trade totals calculation failed".into()) + })?; + + let mut entries = Vec::with_capacity(trades.len()); + for trade in trades { + let input_float = Float::parse(trade.input_amount.clone()).map_err(|e| { + tracing::error!(error = %e, "failed to parse input amount"); + ApiError::Internal("trade totals calculation failed".into()) + })?; + let output_float = Float::parse(trade.output_amount.clone()).map_err(|e| { + tracing::error!(error = %e, "failed to parse output amount"); + ApiError::Internal("trade totals calculation failed".into()) + })?; + + let io_ratio = { + let zero = Float::zero().map_err(|e| { + tracing::error!(error = %e, "float zero construction failed"); + ApiError::Internal("io ratio calculation failed".into()) + })?; + if output_float.eq(zero).unwrap_or(true) { + "0".to_string() + } else { + let ratio = input_float.div(output_float).map_err(|e| { + tracing::error!(error = %e, "failed to compute io ratio"); + ApiError::Internal("io ratio calculation failed".into()) + })?; + format_float(ratio, "io ratio")? + } + }; + + // Re-parse for totals accumulation (parse is cheap) + let input_for_total = Float::parse(trade.input_amount.clone()).map_err(|e| { + tracing::error!(error = %e, "failed to parse input amount for total"); + ApiError::Internal("trade totals calculation failed".into()) + })?; + let output_for_total = Float::parse(trade.output_amount.clone()).map_err(|e| { + tracing::error!(error = %e, "failed to parse output amount for total"); + ApiError::Internal("trade totals calculation failed".into()) + })?; + + total_input = total_input.add(input_for_total).map_err(|e| { + tracing::error!(error = %e, "failed to sum total input"); + ApiError::Internal("trade totals calculation failed".into()) + })?; + total_output = total_output.add(output_for_total).map_err(|e| { + tracing::error!(error = %e, "failed to sum total output"); + ApiError::Internal("trade totals calculation failed".into()) + })?; + + entries.push(TradeByTxEntry { + order_hash: trade.order_hash, + order_owner: trade.order_owner, + request: TradeRequest { + input_token: trade.input_token, + output_token: trade.output_token, + maximum_input: trade.input_amount.clone(), + maximum_io_ratio: io_ratio.clone(), + }, + result: TradeResult { + input_amount: trade.input_amount.clone(), + output_amount: trade.output_amount.clone(), + actual_io_ratio: io_ratio, + }, + }); + } + + let zero = Float::zero().map_err(|e| { + tracing::error!(error = %e, "float zero construction failed"); + ApiError::Internal("trade totals calculation failed".into()) + })?; + let average_io_ratio = if total_output.eq(zero).unwrap_or(true) { + zero + } else { + total_input.div(total_output).map_err(|e| { + tracing::error!(error = %e, "failed to compute average io ratio"); + ApiError::Internal("trade totals calculation failed".into()) + })? + }; + + Ok(TradesByTxResponse { + tx_hash, + block_number: first.block_number, + timestamp: first.block_timestamp, + sender: first.sender, + trades: entries, + totals: TradesTotals { + total_input_amount: format_float(total_input, "trade totals")?, + total_output_amount: format_float(total_output, "trade totals")?, + average_io_ratio: format_float(average_io_ratio, "trade totals")?, + }, + }) +} + #[utoipa::path( get, path = "/v1/trades/tx/{tx_hash}", diff --git a/src/test_helpers.rs b/src/test_helpers.rs index 0c80d77..b09e894 100644 --- a/src/test_helpers.rs +++ b/src/test_helpers.rs @@ -49,16 +49,34 @@ impl TestClientBuilder { Some(url) => url, None => mock_raindex_registry_url().await, }; - crate::raindex::RaindexProvider::load(®istry_url, None) + crate::raindex::RaindexProvider::load(®istry_url, None, std::collections::HashMap::new()) .await .expect("mock raindex config from registry url") } }; - let shared_raindex = tokio::sync::RwLock::new(raindex_config); + let shared_raindex = std::sync::Arc::new(tokio::sync::RwLock::new(raindex_config)); let docs_dir = std::env::temp_dir().to_string_lossy().into_owned(); - let rocket = crate::rocket(pool, self.rate_limiter, shared_raindex, docs_dir, None) - .expect("valid rocket instance"); + let orders_by_token_cache = crate::routes::orders::orders_by_token_cache(); + let block_number_cache = crate::raindex::block_number_cache(); + let limit_ratio_cache = crate::routes::orders::limit_order_ratio_cache(); + let stale_price_skip_cache = crate::routes::orders::stale_price_skip_cache(); + let swap_quote_cache = crate::routes::swap::swap_quote_cache(); + let cache_warmer_stats = crate::cache_warmer::shared_cache_warmer_stats(); + let rocket = crate::rocket( + pool, + self.rate_limiter, + shared_raindex, + docs_dir, + None, + orders_by_token_cache, + block_number_cache, + limit_ratio_cache, + stale_price_skip_cache, + swap_quote_cache, + cache_warmer_stats, + ) + .expect("valid rocket instance"); Client::tracked(rocket).await.expect("valid client") } @@ -66,7 +84,7 @@ impl TestClientBuilder { pub(crate) async fn mock_raindex_config() -> crate::raindex::RaindexProvider { let registry_url = mock_raindex_registry_url().await; - crate::raindex::RaindexProvider::load(®istry_url, None) + crate::raindex::RaindexProvider::load(®istry_url, None, std::collections::HashMap::new()) .await .expect("mock raindex config") } diff --git a/src/types/health.rs b/src/types/health.rs index 3e2c50f..f933117 100644 --- a/src/types/health.rs +++ b/src/types/health.rs @@ -18,6 +18,42 @@ pub struct DetailedHealthResponse { /// raindex local database connectivity and sync status pub raindex_db: RaindexDbStatus, + + /// Background cache warmer health + pub cache_warmer: CacheWarmerStatus, +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct CacheWarmerStatus { + /// True if the warmer has completed at least one cycle + pub running: bool, + + /// Total number of cycles completed since process start + #[schema(example = 42)] + pub total_cycles: u64, + + /// Duration of the most recently completed cycle, in milliseconds + #[serde(skip_serializing_if = "Option::is_none")] + #[schema(example = 9500)] + pub last_cycle_ms: Option, + + /// Number of orders refreshed in the last cycle (`tokens` × per-token orders implicit) + #[serde(skip_serializing_if = "Option::is_none")] + #[schema(example = 11)] + pub last_tokens: Option, + + /// Errors observed in the last cycle (per-token failures) + #[serde(skip_serializing_if = "Option::is_none")] + pub last_errors: Option, + + /// Seconds elapsed since the warmer last completed a cycle + #[serde(skip_serializing_if = "Option::is_none")] + #[schema(example = 7)] + pub seconds_since_last_complete: Option, + + /// Human-readable age of the last completion (e.g. `12s ago`) + #[serde(skip_serializing_if = "Option::is_none")] + pub last_complete_age: Option, } #[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]