diff --git a/crates/core/common/src/amp_catalog_provider.rs b/crates/core/common/src/amp_catalog_provider.rs index f209c55d2..c657aea0d 100644 --- a/crates/core/common/src/amp_catalog_provider.rs +++ b/crates/core/common/src/amp_catalog_provider.rs @@ -31,7 +31,6 @@ use crate::{ AsyncSchemaProvider as FuncAsyncSchemaProvider, SchemaProvider as FuncSchemaProvider, }, }, - udfs::eth_call::EthCallUdfsCache, }; /// Combined async schema provider for both tables and functions. @@ -51,7 +50,6 @@ pub const AMP_CATALOG_NAME: &str = "amp"; #[derive(Clone)] pub struct AmpCatalogProvider { datasets_cache: DatasetsCache, - ethcall_udfs_cache: EthCallUdfsCache, /// Optional dependency alias overrides. When set, bare names matching /// a key are resolved directly to the corresponding [`HashReference`] /// instead of going through `PartialReference` → `Reference` → `resolve_revision`. @@ -63,10 +61,9 @@ pub struct AmpCatalogProvider { impl AmpCatalogProvider { /// Creates a new catalog provider. - pub fn new(datasets_cache: DatasetsCache, ethcall_udfs_cache: EthCallUdfsCache) -> Self { + pub fn new(datasets_cache: DatasetsCache) -> Self { Self { datasets_cache, - ethcall_udfs_cache, dep_aliases: Default::default(), self_schema: None, } @@ -116,11 +113,8 @@ impl AmpCatalogProvider { .await .map_err(|err| DataFusionError::External(Box::new(err)))?; - let provider: Arc = Arc::new(DatasetSchemaProvider::new( - name.to_string(), - dataset, - self.ethcall_udfs_cache.clone(), - )); + let provider: Arc = + Arc::new(DatasetSchemaProvider::new(name.to_string(), dataset)); return Ok(Some(provider)); } @@ -146,11 +140,8 @@ impl AmpCatalogProvider { .await .map_err(|err| DataFusionError::External(Box::new(err)))?; - let provider: Arc = Arc::new(DatasetSchemaProvider::new( - name.to_string(), - dataset, - self.ethcall_udfs_cache.clone(), - )); + let provider: Arc = + Arc::new(DatasetSchemaProvider::new(name.to_string(), dataset)); Ok(Some(provider)) } } diff --git a/crates/core/common/src/dataset_schema_provider.rs b/crates/core/common/src/dataset_schema_provider.rs index 353093f75..dc6e90e80 100644 --- a/crates/core/common/src/dataset_schema_provider.rs +++ b/crates/core/common/src/dataset_schema_provider.rs @@ -16,7 +16,7 @@ use datafusion::{ logical_expr::ScalarUDF, }; use datasets_common::{dataset::Dataset, table_name::TableName}; -use datasets_derived::{dataset::Dataset as DerivedDataset, func_name::ETH_CALL_FUNCTION_NAME}; +use datasets_derived::dataset::Dataset as DerivedDataset; use parking_lot::RwLock; use crate::{ @@ -27,7 +27,7 @@ use crate::{ }, }, plan_table::PlanTable, - udfs::{eth_call::EthCallUdfsCache, plan::PlanJsUdf}, + udfs::plan::PlanJsUdf, }; /// Schema provider for a dataset. @@ -38,22 +38,16 @@ use crate::{ pub struct DatasetSchemaProvider { schema_name: String, dataset: Arc, - ethcall_udfs_cache: EthCallUdfsCache, tables: RwLock>>, functions: RwLock>>, } impl DatasetSchemaProvider { /// Creates a new provider for the given dataset and schema name. - pub(crate) fn new( - schema_name: String, - dataset: Arc, - ethcall_udfs_cache: EthCallUdfsCache, - ) -> Self { + pub(crate) fn new(schema_name: String, dataset: Arc) -> Self { Self { schema_name, dataset, - ethcall_udfs_cache, tables: RwLock::new(Default::default()), functions: RwLock::new(Default::default()), } @@ -147,21 +141,6 @@ impl FuncSchemaProvider for DatasetSchemaProvider { } } - // Check for eth_call function - if name == ETH_CALL_FUNCTION_NAME { - let udf = self - .ethcall_udfs_cache - .eth_call_for_dataset(&self.schema_name, self.dataset.as_ref()) - .await - .map_err(|err| DataFusionError::External(Box::new(err)))?; - - if let Some(udf) = udf { - let udf = Arc::new(udf); - self.functions.write().insert(name.to_string(), udf.clone()); - return Ok(Some(Arc::new(ScalarFunctionProvider::from(udf)))); - } - } - // Try to get UDF from derived dataset and build a planning-only UDF. let udf: Option = self .dataset @@ -187,8 +166,6 @@ impl FuncSchemaProvider for DatasetSchemaProvider { /// Returns whether the function is known **from the cache only**. /// /// This deliberately does not probe the dataset or the store because: - /// - `eth_call` resolution requires async I/O (`dataset_store.eth_call_for_dataset`), - /// which cannot be performed in this synchronous trait method without blocking. /// - Derived-dataset UDF lookup (`function_by_name`) is sync but allocates a /// full `ScalarUDF` as a side effect, which is inappropriate for an existence check. /// diff --git a/crates/core/common/src/lib.rs b/crates/core/common/src/lib.rs index ce5c7ea30..ebf093a29 100644 --- a/crates/core/common/src/lib.rs +++ b/crates/core/common/src/lib.rs @@ -19,6 +19,7 @@ pub mod physical_table; pub mod plan_table; pub mod plan_visitors; pub mod retryable; +pub mod rpc_catalog_provider; pub mod self_schema_provider; pub mod sql; pub mod streaming_query; diff --git a/crates/core/common/src/rpc_catalog_provider.rs b/crates/core/common/src/rpc_catalog_provider.rs new file mode 100644 index 000000000..0806f65d0 --- /dev/null +++ b/crates/core/common/src/rpc_catalog_provider.rs @@ -0,0 +1,243 @@ +//! Catalog provider for RPC functions. +//! +//! Resolves `rpc..eth_call(...)` function references by looking up +//! providers directly from the [`ProvidersRegistry`] without requiring a dataset. +//! +//! SQL uses `snake_case` network names (e.g., `base_sepolia`) while provider +//! configs use `kebab-case` (e.g., `base-sepolia`). The [`SqlNetworkId`] type +//! handles this conversion. + +use std::sync::Arc; + +use async_trait::async_trait; +use datafusion::error::DataFusionError; +use datasets_common::network_id::NetworkId; +use datasets_derived::func_name::ETH_CALL_FUNCTION_NAME; + +use crate::{ + func_catalog::{ + function_provider::{FunctionProvider, ScalarFunctionProvider}, + schema_provider::AsyncSchemaProvider as FuncAsyncSchemaProvider, + }, + udfs::eth_call::EthCallUdfsCache, +}; + +/// The catalog name used to register the RPC function provider. +pub const RPC_CATALOG_NAME: &str = "rpc"; + +/// Catalog provider for RPC functions. +/// +/// Resolves network names as schemas (e.g., `rpc.mainnet`) and provides +/// `eth_call` as the only function within each network schema. +#[derive(Clone)] +pub struct RpcCatalogProvider { + cache: EthCallUdfsCache, +} + +impl RpcCatalogProvider { + /// Creates a new RPC catalog provider. + pub fn new(cache: EthCallUdfsCache) -> Self { + Self { cache } + } +} + +impl std::fmt::Debug for RpcCatalogProvider { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RpcCatalogProvider").finish_non_exhaustive() + } +} + +#[async_trait] +impl crate::func_catalog::catalog_provider::AsyncCatalogProvider for RpcCatalogProvider { + async fn schema( + &self, + name: &str, + ) -> Result>, DataFusionError> { + let sql_network: SqlNetworkId = name + .parse() + .map_err(|err| DataFusionError::Plan(format!("invalid network '{name}': {err}")))?; + Ok(Some(Arc::new(RpcSchemaProvider { + sql_network, + cache: self.cache.clone(), + }))) + } +} + +/// Schema provider for a single network within the `rpc` catalog. +/// +/// Resolves the `eth_call` function by creating an EVM RPC client from +/// the providers registry for the given network. +#[derive(Debug)] +struct RpcSchemaProvider { + sql_network: SqlNetworkId, + cache: EthCallUdfsCache, +} + +#[async_trait] +impl FuncAsyncSchemaProvider for RpcSchemaProvider { + async fn function( + &self, + name: &str, + ) -> Result>, DataFusionError> { + if name != ETH_CALL_FUNCTION_NAME { + return Ok(None); + } + + let udf_name = self.eth_call_udf_name(); + let network: NetworkId = self.sql_network.clone().into(); + let udf = self + .cache + .eth_call_for_network(&udf_name, &network) + .await + .map_err(|err| DataFusionError::External(Box::new(err)))?; + + Ok(Some(Arc::new(ScalarFunctionProvider::from(Arc::new(udf))))) + } +} + +impl RpcSchemaProvider { + /// Returns the UDF name for DataFusion's flat function registry lookup. + /// + /// Uses the SQL-facing `snake_case` network name so the UDF name matches + /// what DataFusion's planner constructs (e.g., `rpc.base_sepolia.eth_call`). + fn eth_call_udf_name(&self) -> String { + format!( + "{}.{}.{}", + RPC_CATALOG_NAME, self.sql_network, ETH_CALL_FUNCTION_NAME + ) + } +} + +/// A SQL-compatible network identifier using `snake_case` format. +/// +/// SQL identifiers cannot contain hyphens, so network names in SQL use +/// `snake_case` (e.g., `base_sepolia`). This type validates the `snake_case` +/// format and converts to [`NetworkId`] (`kebab-case`) for provider lookups. +#[derive(Debug, Clone)] +pub struct SqlNetworkId(String); + +impl std::fmt::Display for SqlNetworkId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl std::str::FromStr for SqlNetworkId { + type Err = InvalidSqlNetworkIdError; + + fn from_str(s: &str) -> Result { + if s.is_empty() { + return Err(InvalidSqlNetworkIdError::Empty); + } + // SQL network names must be snake_case: lowercase alphanumeric + underscores + if !s + .chars() + .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_') + { + return Err(InvalidSqlNetworkIdError::InvalidFormat(s.to_string())); + } + Ok(Self(s.to_string())) + } +} + +impl From for NetworkId { + fn from(sql: SqlNetworkId) -> Self { + let kebab = sql.0.replace('_', "-"); + // Safety: we validated non-empty in FromStr, and replacing underscores + // with hyphens preserves non-emptiness. + NetworkId::new_unchecked(kebab) + } +} + +/// Error for invalid SQL network identifiers. +#[derive(Debug, thiserror::Error)] +pub enum InvalidSqlNetworkIdError { + /// Network identifier is empty. + #[error("SQL network identifier cannot be empty")] + Empty, + /// Network identifier contains invalid characters (must be snake_case). + #[error( + "SQL network identifier must be snake_case (lowercase alphanumeric and underscores): '{0}'" + )] + InvalidFormat(String), +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn sql_network_id_simple_name_converts_to_network_id() { + //* Given + let sql_id: SqlNetworkId = "mainnet".parse().unwrap(); + + //* When + let network: NetworkId = sql_id.into(); + + //* Then + assert_eq!(network.as_str(), "mainnet"); + } + + #[test] + fn sql_network_id_snake_case_converts_underscores_to_hyphens() { + //* Given + let sql_id: SqlNetworkId = "base_mainnet".parse().unwrap(); + + //* When + let network: NetworkId = sql_id.into(); + + //* Then + assert_eq!(network.as_str(), "base-mainnet"); + } + + #[test] + fn sql_network_id_multiple_underscores_all_convert() { + //* Given + let sql_id: SqlNetworkId = "arbitrum_one_nova".parse().unwrap(); + + //* When + let network: NetworkId = sql_id.into(); + + //* Then + assert_eq!(network.as_str(), "arbitrum-one-nova"); + } + + #[test] + fn sql_network_id_empty_fails() { + //* Given/When + let result = "".parse::(); + + //* Then + assert!(result.is_err()); + } + + #[test] + fn sql_network_id_with_hyphens_fails() { + //* Given/When + let result = "base-mainnet".parse::(); + + //* Then + assert!(result.is_err()); + } + + #[test] + fn sql_network_id_with_uppercase_fails() { + //* Given/When + let result = "Base_Mainnet".parse::(); + + //* Then + assert!(result.is_err()); + } + + #[test] + fn sql_network_id_display_shows_snake_case() { + //* Given + let sql_id: SqlNetworkId = "base_mainnet".parse().unwrap(); + + //* When + let display = sql_id.to_string(); + + //* Then + assert_eq!(display, "base_mainnet"); + } +} diff --git a/crates/core/common/src/streaming_query.rs b/crates/core/common/src/streaming_query.rs index 3949c9a54..bbfef40df 100644 --- a/crates/core/common/src/streaming_query.rs +++ b/crates/core/common/src/streaming_query.rs @@ -54,6 +54,7 @@ use crate::{ find_cross_network_join, order_by_block_num, unproject_special_block_num_column, }, retryable::RetryableErrorExt, + rpc_catalog_provider::{RPC_CATALOG_NAME, RpcCatalogProvider}, self_schema_provider::SelfSchemaProvider, sql::TableReference, sql_str::SqlStr, @@ -390,16 +391,16 @@ impl StreamingQuery { catalog.udfs().to_vec(), )); let amp_catalog = Arc::new( - AmpCatalogProvider::new( - exec_env.datasets_cache.clone(), - exec_env.ethcall_udfs_cache.clone(), - ) - .with_dep_aliases(dep_alias_map) - .with_self_schema(self_schema), + AmpCatalogProvider::new(exec_env.datasets_cache.clone()) + .with_dep_aliases(dep_alias_map) + .with_self_schema(self_schema), ); + let rpc_catalog = + Arc::new(RpcCatalogProvider::new(exec_env.ethcall_udfs_cache.clone())); let ctx = PlanContextBuilder::new(exec_env.session_config.clone()) .with_table_catalog(AMP_CATALOG_NAME, amp_catalog.clone()) .with_func_catalog(AMP_CATALOG_NAME, amp_catalog) + .with_func_catalog(RPC_CATALOG_NAME, rpc_catalog) .build(); ctx.optimize(&plan).map_err(SpawnError::OptimizePlan)? }; diff --git a/crates/core/common/src/udfs/eth_call.rs b/crates/core/common/src/udfs/eth_call.rs index 64a21fda8..9c10ed7ad 100644 --- a/crates/core/common/src/udfs/eth_call.rs +++ b/crates/core/common/src/udfs/eth_call.rs @@ -1,5 +1,5 @@ mod cache; mod udf; -pub use cache::{EthCallForDatasetError, EthCallUdfsCache}; +pub use cache::{EthCallForNetworkError, EthCallUdfsCache}; pub use udf::EthCall; diff --git a/crates/core/common/src/udfs/eth_call/cache.rs b/crates/core/common/src/udfs/eth_call/cache.rs index 35c9ab426..d6a624173 100644 --- a/crates/core/common/src/udfs/eth_call/cache.rs +++ b/crates/core/common/src/udfs/eth_call/cache.rs @@ -1,7 +1,7 @@ -//! EthCall UDF cache for EVM RPC datasets. +//! EthCall UDF cache for EVM RPC providers. //! //! This module provides the `EthCallUdfsCache` struct which manages creation and caching -//! of `eth_call` scalar UDFs for EVM RPC datasets through the providers registry. +//! of `eth_call` scalar UDFs keyed by network through the providers registry. use std::sync::Arc; @@ -10,23 +10,18 @@ use datafusion::{ common::HashMap, logical_expr::{ScalarUDF, async_udf::AsyncScalarUDF}, }; -use datasets_common::{ - dataset::Dataset as _, dataset_kind_str::DatasetKindStr, hash_reference::HashReference, - network_id::NetworkId, -}; -use datasets_raw::dataset::Dataset as RawDataset; -use evm_rpc_datasets::EvmRpcDatasetKind; +use datasets_common::network_id::NetworkId; use parking_lot::RwLock; use super::udf::EthCall; -/// Manages creation and caching of `eth_call` scalar UDFs for EVM RPC datasets. +/// Manages creation and caching of `eth_call` scalar UDFs keyed by network. /// /// Orchestrates UDF creation through the providers registry with in-memory caching. #[derive(Clone)] pub struct EthCallUdfsCache { registry: ProvidersRegistry, - cache: Arc>>, + cache: Arc>>, } impl std::fmt::Debug for EthCallUdfsCache { @@ -49,74 +44,51 @@ impl EthCallUdfsCache { &self.registry } - /// Returns cached eth_call scalar UDF, otherwise loads the UDF and caches it. - /// - /// The function will be named `.eth_call`. + /// Returns cached eth_call scalar UDF for a network, creating one if not cached. /// - /// # Panics - /// - /// Panics if an EVM RPC dataset has no tables. This is a structural invariant - /// guaranteed by the dataset construction process. - pub async fn eth_call_for_dataset( + /// The `udf_name` is the name DataFusion's planner uses to look up the function + /// (e.g., `rpc.mainnet.eth_call`). The caller controls the naming convention. + pub async fn eth_call_for_network( &self, - sql_schema_name: &str, - dataset: &dyn datasets_common::dataset::Dataset, - ) -> Result, EthCallForDatasetError> { - let Some(raw) = dataset.downcast_ref::() else { - return Ok(None); - }; - - if raw.kind() != EvmRpcDatasetKind { - return Ok(None); - } - - // Check if we already have the provider cached. - if let Some(udf) = self.cache.read().get(dataset.reference()) { - return Ok(Some(udf.clone())); + udf_name: &str, + network: &NetworkId, + ) -> Result { + // Check cache first. + if let Some(udf) = self.cache.read().get(network) { + return Ok(udf.clone()); } - // Load the provider from the dataset definition. - let network = raw.network(); - let provider = match self.registry.create_evm_rpc_client(network).await { Ok(Some(provider)) => provider, Ok(None) => { tracing::warn!( - provider_kind = %EvmRpcDatasetKind, provider_network = %network, - "no provider found for requested kind-network configuration" + "no EVM RPC provider found for network" ); - return Err(EthCallForDatasetError::ProviderNotFound { - dataset_kind: EvmRpcDatasetKind.into(), + return Err(EthCallForNetworkError::ProviderNotFound { network: network.clone(), }); } Err(err) => { - return Err(EthCallForDatasetError::ProviderCreation(err)); + return Err(EthCallForNetworkError::ProviderCreation(err)); } }; - let udf = AsyncScalarUDF::new(Arc::new(EthCall::new(sql_schema_name, provider))) + let udf = AsyncScalarUDF::new(Arc::new(EthCall::new(udf_name.to_string(), provider))) .into_scalar_udf(); - // Cache the EthCall UDF - self.cache - .write() - .insert(dataset.reference().clone(), udf.clone()); + self.cache.write().insert(network.clone(), udf.clone()); - Ok(Some(udf)) + Ok(udf) } } -/// Errors that occur when creating eth_call user-defined functions for EVM RPC datasets. +/// Errors that occur when creating eth_call UDFs for a network. #[derive(Debug, thiserror::Error)] -pub enum EthCallForDatasetError { - /// No provider configuration found for the dataset kind and network combination. - #[error("No provider found for dataset kind '{dataset_kind}' and network '{network}'")] - ProviderNotFound { - dataset_kind: DatasetKindStr, - network: NetworkId, - }, +pub enum EthCallForNetworkError { + /// No provider configuration found for the network. + #[error("No EVM RPC provider found for network '{network}'")] + ProviderNotFound { network: NetworkId }, /// Failed to create the EVM RPC provider. #[error("Failed to create EVM RPC provider")] diff --git a/crates/core/common/src/udfs/eth_call/udf.rs b/crates/core/common/src/udfs/eth_call/udf.rs index 5a01d037e..92edfe9d4 100644 --- a/crates/core/common/src/udfs/eth_call/udf.rs +++ b/crates/core/common/src/udfs/eth_call/udf.rs @@ -18,7 +18,7 @@ use datafusion::{ }, datatypes::{DataType, Field, Fields}, }, - common::{internal_err, plan_err, utils::quote_identifier}, + common::{internal_err, plan_err}, error::DataFusionError, logical_expr::{ ColumnarValue, ScalarFunctionArgs, ScalarUDFImpl, Signature, TypeSignature, Volatility, @@ -95,11 +95,11 @@ impl std::hash::Hash for EthCall { } impl EthCall { - pub fn new(sql_schema_name: &str, client: alloy::providers::RootProvider) -> Self { - // Create UDF name with quoted schema to match how DataFusion's query planner - // resolves qualified function references (e.g., "_/anvil_rpc@0.0.0".eth_call) - let name = format!("{}.eth_call", quote_identifier(sql_schema_name)); - + /// Creates an `EthCall` UDF with the given name and RPC client. + /// + /// The name must match the flat lookup key that DataFusion's planner constructs + /// for the function reference, e.g., `rpc.mainnet.eth_call`. + pub fn new(name: String, client: alloy::providers::RootProvider) -> Self { EthCall { name, client, diff --git a/crates/core/worker-datasets-derived/src/job_impl/table.rs b/crates/core/worker-datasets-derived/src/job_impl/table.rs index 0de900ebc..351f7f645 100644 --- a/crates/core/worker-datasets-derived/src/job_impl/table.rs +++ b/crates/core/worker-datasets-derived/src/job_impl/table.rs @@ -22,6 +22,7 @@ use common::{ exec_env::ExecEnv, physical_table::{CanonicalChainError, PhysicalTable}, retryable::RetryableErrorExt as _, + rpc_catalog_provider::{RPC_CATALOG_NAME, RpcCatalogProvider}, self_schema_provider::SelfSchemaProvider, sql::{ParseSqlError, ResolveTableReferencesError, TableReference, resolve_table_references}, }; @@ -150,13 +151,15 @@ pub async fn materialize_table( let self_schema: Arc = Arc::new(self_schema_provider); let amp_catalog = Arc::new( - AmpCatalogProvider::new(ctx.datasets_cache.clone(), ctx.ethcall_udfs_cache.clone()) + AmpCatalogProvider::new(ctx.datasets_cache.clone()) .with_dep_aliases(dep_alias_map) .with_self_schema(self_schema), ); + let rpc_catalog = Arc::new(RpcCatalogProvider::new(ctx.ethcall_udfs_cache.clone())); let planning_ctx = PlanContextBuilder::new(env.session_config.clone()) .with_table_catalog(AMP_CATALOG_NAME, amp_catalog.clone()) .with_func_catalog(AMP_CATALOG_NAME, amp_catalog) + .with_func_catalog(RPC_CATALOG_NAME, rpc_catalog) .build(); join_set.spawn( diff --git a/crates/services/admin-api/src/handlers/common.rs b/crates/services/admin-api/src/handlers/common.rs index 09ed74bef..80093188b 100644 --- a/crates/services/admin-api/src/handlers/common.rs +++ b/crates/services/admin-api/src/handlers/common.rs @@ -16,6 +16,7 @@ use common::{ context::plan::PlanContextBuilder, datasets_cache::{DatasetsCache, GetDatasetError}, exec_env::default_session_config, + rpc_catalog_provider::{RPC_CATALOG_NAME, RpcCatalogProvider}, self_schema_provider::SelfSchemaProvider, sql::{ FunctionReference, ResolveFunctionReferencesError, ResolveTableReferencesError, @@ -462,13 +463,15 @@ pub async fn validate_derived_manifest( let self_schema_provider = Arc::new(SelfSchemaProvider::from_manifest_udfs(&manifest.functions)); let amp_catalog = Arc::new( - AmpCatalogProvider::new(datasets_cache.clone(), ethcall_udfs_cache.clone()) + AmpCatalogProvider::new(datasets_cache.clone()) .with_dep_aliases(dep_aliases) .with_self_schema(self_schema_provider.clone() as Arc), ); + let rpc_catalog = Arc::new(RpcCatalogProvider::new(ethcall_udfs_cache.clone())); let planning_ctx = PlanContextBuilder::new(session_config) .with_table_catalog(AMP_CATALOG_NAME, amp_catalog.clone()) .with_func_catalog(AMP_CATALOG_NAME, amp_catalog) + .with_func_catalog(RPC_CATALOG_NAME, rpc_catalog) .build(); // Step 4: Validate that all table SQL queries are incremental, in topological order. diff --git a/crates/services/admin-api/src/handlers/schema.rs b/crates/services/admin-api/src/handlers/schema.rs index 1f03f7fc1..46e621b6d 100644 --- a/crates/services/admin-api/src/handlers/schema.rs +++ b/crates/services/admin-api/src/handlers/schema.rs @@ -14,6 +14,7 @@ use common::{ exec_env::default_session_config, incrementalizer::NonIncrementalQueryError, plan_visitors::prepend_special_block_num_field, + rpc_catalog_provider::{RPC_CATALOG_NAME, RpcCatalogProvider}, self_schema_provider::SelfSchemaProvider, sql::{self, ResolveTableReferencesError}, sql_str::SqlStr, @@ -267,13 +268,15 @@ pub async fn handler( let session_config = default_session_config().map_err(Error::SessionConfig)?; let self_schema_provider = Arc::new(SelfSchemaProvider::from_manifest_udfs(&functions)); let amp_catalog = Arc::new( - AmpCatalogProvider::new(ctx.datasets_cache.clone(), ctx.ethcall_udfs_cache.clone()) + AmpCatalogProvider::new(ctx.datasets_cache.clone()) .with_dep_aliases(dep_aliases) .with_self_schema(self_schema_provider.clone() as Arc), ); + let rpc_catalog = Arc::new(RpcCatalogProvider::new(ctx.ethcall_udfs_cache.clone())); let planning_ctx = PlanContextBuilder::new(session_config) .with_table_catalog(AMP_CATALOG_NAME, amp_catalog.clone()) .with_func_catalog(AMP_CATALOG_NAME, amp_catalog) + .with_func_catalog(RPC_CATALOG_NAME, rpc_catalog) .build(); // Infer schema for each table in topological order. diff --git a/crates/services/server/src/flight.rs b/crates/services/server/src/flight.rs index 813fd94b1..e43c171c8 100644 --- a/crates/services/server/src/flight.rs +++ b/crates/services/server/src/flight.rs @@ -49,6 +49,7 @@ use common::{ exec_env::ExecEnv, memory_pool::TieredMemoryPool, plan_visitors::{plan_has_block_num_udf, unproject_special_block_num_column}, + rpc_catalog_provider::{RPC_CATALOG_NAME, RpcCatalogProvider}, sql::{ResolveFunctionReferencesError, ResolveTableReferencesError, resolve_table_references}, sql_str::SqlStr, streaming_query::{QueryMessage, StreamingQuery}, @@ -142,13 +143,12 @@ impl Service { .await .map_err(Error::PhysicalCatalogError)?; - let amp_catalog = Arc::new(AmpCatalogProvider::new( - self.env.datasets_cache.clone(), - self.env.ethcall_udfs_cache.clone(), - )); + let amp_catalog = Arc::new(AmpCatalogProvider::new(self.env.datasets_cache.clone())); + let rpc_catalog = Arc::new(RpcCatalogProvider::new(self.env.ethcall_udfs_cache.clone())); let ctx = PlanContextBuilder::new(self.env.session_config.clone()) .with_table_catalog(AMP_CATALOG_NAME, amp_catalog.clone()) .with_func_catalog(AMP_CATALOG_NAME, amp_catalog) + .with_func_catalog(RPC_CATALOG_NAME, rpc_catalog) .build(); let plan = ctx .statement_to_plan(query.clone()) @@ -374,13 +374,14 @@ impl Service { let query = common::sql::parse(&sql_str).map_err(Error::SqlParse)?; let plan_ctx = { - let amp_catalog = Arc::new(AmpCatalogProvider::new( - self.env.datasets_cache.clone(), - self.env.ethcall_udfs_cache.clone(), - )); + let amp_catalog = + Arc::new(AmpCatalogProvider::new(self.env.datasets_cache.clone())); + let rpc_catalog = + Arc::new(RpcCatalogProvider::new(self.env.ethcall_udfs_cache.clone())); PlanContextBuilder::new(self.env.session_config.clone()) .with_table_catalog(AMP_CATALOG_NAME, amp_catalog.clone()) .with_func_catalog(AMP_CATALOG_NAME, amp_catalog) + .with_func_catalog(RPC_CATALOG_NAME, rpc_catalog) .build() }; diff --git a/docs/feat/udf-builtin-evm-eth-call.md b/docs/feat/udf-builtin-evm-eth-call.md index c88f6bdd7..02dee63a9 100644 --- a/docs/feat/udf-builtin-evm-eth-call.md +++ b/docs/feat/udf-builtin-evm-eth-call.md @@ -50,7 +50,7 @@ Without a configured RPC provider, `eth_call` queries will fail. Executes read-only contract calls via JSON-RPC. -**Important:** eth_call requires dataset qualification. Always use the format `"namespace/name@revision".eth_call(...)` where the dataset identifier specifies which blockchain network and provider configuration to use. For example: `"edgeandnode/mainnet@0.0.1".eth_call(...)`. +**Important:** eth_call requires RPC catalog qualification. Always use the format `rpc..eth_call(...)` where `` matches a configured provider network. For example: `rpc.mainnet.eth_call(...)`. **Arguments:** - `from` (FixedSizeBinary(20) or NULL): Sender address (optional, can pass NULL directly) @@ -71,7 +71,7 @@ Fetch token metadata like `name()` and `decimals()` to enrich query results with SELECT evm_decode_hex(token_address) as token, evm_decode_type( - ("edgeandnode/mainnet@0.0.1".eth_call(NULL, token_address, evm_encode_params('name()'), 'latest')).data, + (rpc.mainnet.eth_call(NULL, token_address, evm_encode_params('name()'), 'latest')).data, 'string' ) as name FROM tokens @@ -79,7 +79,7 @@ FROM tokens -- Query token decimals SELECT evm_decode_type( - ("edgeandnode/mainnet@0.0.1".eth_call(NULL, token_address, evm_encode_params('decimals()'), 'latest')).data, + (rpc.mainnet.eth_call(NULL, token_address, evm_encode_params('decimals()'), 'latest')).data, 'uint8' ) as decimals FROM tokens @@ -101,7 +101,7 @@ FROM ( SELECT token, holder, - "edgeandnode/mainnet@0.0.1".eth_call( + rpc.mainnet.eth_call( NULL, token, evm_encode_params(holder, 'balanceOf(address account)'), @@ -120,7 +120,7 @@ Query past state at a specific block to analyze historical balances or reconstru SELECT evm_decode_type(result.data, 'uint256') as balance_at_block FROM ( - SELECT "edgeandnode/mainnet@0.0.1".eth_call( + SELECT rpc.mainnet.eth_call( NULL, evm_encode_hex('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'), -- WETH evm_encode_params( @@ -143,7 +143,7 @@ SELECT FROM ( SELECT pair_address, - "edgeandnode/mainnet@0.0.1".eth_call( + rpc.mainnet.eth_call( NULL, pair_address, evm_encode_params('getReserves()'), @@ -160,7 +160,7 @@ Call a contract without any input data. ```sql SELECT - "edgeandnode/mainnet@0.0.1".eth_call( + rpc.mainnet.eth_call( NULL, evm_encode_hex('0x0000000000000000000000000000000000000000'), NULL, @@ -177,7 +177,7 @@ Handle reverted calls gracefully since `eth_call` returns errors in the result s Exclude rows where the contract call reverted. ```sql -WHERE ("namespace/name@revision".eth_call(...)).message IS NULL +WHERE (rpc..eth_call(...)).message IS NULL ``` #### Decode the Result Struct @@ -185,7 +185,7 @@ WHERE ("namespace/name@revision".eth_call(...)).message IS NULL Access the `data` field and decode it to the expected Solidity type. ```sql -evm_decode_type(("namespace/name@revision".eth_call(...)).data, 'uint256') +evm_decode_type((rpc..eth_call(...)).data, 'uint256') ``` #### Handle Reverts Gracefully @@ -200,7 +200,7 @@ SELECT ELSE NULL END as balance FROM ( - SELECT token, "edgeandnode/mainnet@0.0.1".eth_call(NULL, token, evm_encode_params(holder, 'balanceOf(address)'), 'latest') as result + SELECT token, rpc.mainnet.eth_call(NULL, token, evm_encode_params(holder, 'balanceOf(address)'), 'latest') as result FROM token_holders ) ``` @@ -214,7 +214,7 @@ SELECT evm_decode_hex(contract) as contract, result.message as revert_reason FROM ( - SELECT contract, "edgeandnode/mainnet@0.0.1".eth_call(NULL, contract, calldata, 'latest') as result + SELECT contract, rpc.mainnet.eth_call(NULL, contract, calldata, 'latest') as result FROM contracts ) WHERE result.message IS NOT NULL @@ -229,7 +229,7 @@ WHERE result.message IS NOT NULL ## Limitations - Requires Ethereum JSON-RPC endpoint configured -- Requires dataset qualification (`"namespace/name@revision".eth_call`) +- Requires RPC catalog qualification (`rpc..eth_call`) - `to` address is required (cannot be NULL, must be FixedSizeBinary(20)) - `block` is required (cannot be NULL) - `from` must be NULL or FixedSizeBinary(20) - other types will produce an error diff --git a/tests/config/manifests/base_rpc.json b/tests/config/manifests/base_rpc.json index 998d33dc5..5eb77a5cd 100644 --- a/tests/config/manifests/base_rpc.json +++ b/tests/config/manifests/base_rpc.json @@ -1,6 +1,6 @@ { "kind": "evm-rpc", - "network": "base", + "network": "base-mainnet", "start_block": 33411770, "finalized_blocks_only": false, "tables": { @@ -115,7 +115,7 @@ "nullable": false }, { - "name": "base_fee_per_gas", + "name": "base-mainnet_fee_per_gas", "type": { "Decimal128": [ 38, @@ -151,7 +151,7 @@ ] } }, - "network": "base" + "network": "base-mainnet" }, "logs": { "schema": { @@ -239,7 +239,7 @@ ] } }, - "network": "base" + "network": "base-mainnet" }, "transactions": { "schema": { @@ -391,7 +391,7 @@ ] } }, - "network": "base" + "network": "base-mainnet" } } -} +} \ No newline at end of file diff --git a/tests/config/providers/per_tx_receipt/rpc_eth_base.toml b/tests/config/providers/per_tx_receipt/rpc_eth_base.toml index 1163ffc05..a92ce05d5 100644 --- a/tests/config/providers/per_tx_receipt/rpc_eth_base.toml +++ b/tests/config/providers/per_tx_receipt/rpc_eth_base.toml @@ -2,5 +2,5 @@ kind = "evm-rpc" url = "${ETH_BASE_RPC_URL}" auth_header = "${ETH_BASE_RPC_AUTH_HEADER}" auth_token = "${ETH_BASE_RPC_AUTH_TOKEN}" -network = "base" +network = "base-mainnet" fetch_receipts_per_tx = true diff --git a/tests/config/providers/rpc_eth_base.toml b/tests/config/providers/rpc_eth_base.toml index 2db7728aa..00e2e82cc 100644 --- a/tests/config/providers/rpc_eth_base.toml +++ b/tests/config/providers/rpc_eth_base.toml @@ -2,4 +2,4 @@ kind = "evm-rpc" url = "${ETH_BASE_RPC_URL}" auth_header = "${ETH_BASE_RPC_AUTH_HEADER}" auth_token = "${ETH_BASE_RPC_AUTH_TOKEN}" -network = "base" +network = "base-mainnet" diff --git a/tests/specs/eth-call-tests.yaml b/tests/specs/eth-call-tests.yaml index 555591f8b..d4dbe60a3 100644 --- a/tests/specs/eth-call-tests.yaml +++ b/tests/specs/eth-call-tests.yaml @@ -3,7 +3,7 @@ - name: eth_call query: | - SELECT eth_rpc.eth_call(from, to, input, block_num) AS call + SELECT rpc.mainnet.eth_call(from, to, input, block_num) AS call FROM eth_rpc.transactions WHERE tx_index = 2 results: | @@ -19,7 +19,7 @@ - name: eth_call_all_params query: | with test as( - SELECT eth_rpc.eth_call( + SELECT rpc.mainnet.eth_call( evm_encode_hex('0x742d35Cc6634C0532925a3b844Bc454e4438f44e'), evm_encode_hex('0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48'), evm_encode_params( @@ -39,7 +39,7 @@ - name: eth_call_null_from query: | with test as( - SELECT eth_rpc.eth_call( + SELECT rpc.mainnet.eth_call( NULL, evm_encode_hex('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'), evm_encode_params( @@ -58,7 +58,7 @@ - name: eth_call_null_data query: | with test as( - SELECT eth_rpc.eth_call( + SELECT rpc.mainnet.eth_call( NULL, evm_encode_hex('0x0000000000000000000000000000000000000000'), NULL, @@ -74,7 +74,7 @@ - name: eth_call_invalid_from_type query: | with test as( - SELECT eth_rpc.eth_call( + SELECT rpc.mainnet.eth_call( evm_encode_hex('0x12a137b28608117c2d1f562195afb72651dc2a52c17e04e4cee8df08ccf4e96b'), evm_encode_hex('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'), evm_encode_params( @@ -93,7 +93,7 @@ - name: eth_call_invalid_to_type query: | with test as( - SELECT eth_rpc.eth_call( + SELECT rpc.mainnet.eth_call( NULL, evm_encode_hex('0x12a137b28608117c2d1f562195afb72651dc2a52c17e04e4cee8df08ccf4e96b'), evm_encode_params( @@ -112,7 +112,7 @@ - name: eth_call_invalid_input_data_type query: | with test as( - SELECT eth_rpc.eth_call( + SELECT rpc.mainnet.eth_call( NULL, evm_encode_hex('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'), arrow_cast(1234, 'Int64'), @@ -128,7 +128,7 @@ - name: eth_call_invalid_block_type query: | with test as( - SELECT eth_rpc.eth_call( + SELECT rpc.mainnet.eth_call( NULL, evm_encode_hex('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'), evm_encode_params( @@ -147,7 +147,7 @@ - name: eth_call_integer_block_number query: | with test as( - SELECT eth_rpc.eth_call( + SELECT rpc.mainnet.eth_call( NULL, evm_encode_hex('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'), evm_encode_params( @@ -166,7 +166,7 @@ - name: eth_call_negative_block_number query: | with test as( - SELECT eth_rpc.eth_call( + SELECT rpc.mainnet.eth_call( NULL, evm_encode_hex('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'), evm_encode_params( @@ -185,7 +185,7 @@ - name: eth_call_null_block_number query: | with test as( - SELECT eth_rpc.eth_call( + SELECT rpc.mainnet.eth_call( NULL, evm_encode_hex('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'), evm_encode_params( @@ -200,3 +200,35 @@ FROM test failure: | 'block' is not a valid block number or tag: Null + +- name: eth_call_unknown_provider + query: | + SELECT rpc.unknown.eth_call( + NULL, + evm_encode_hex('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'), + evm_encode_params( + evm_encode_hex('0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2'), + 'balanceOf(address account)' + ), + NULL + ) + failure: | + No EVM RPC provider found for network 'unknown' + +- name: eth_call_base_provider + query: | + with test as( + SELECT rpc.base_mainnet.eth_call( + NULL, + evm_encode_hex('0x4200000000000000000000000000000000000006'), + evm_encode_params( + 'symbol()' + ), + '43792142' + ) as result + ) + SELECT + result.data as result + FROM test + results: | + [{"result": "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000045745544800000000000000000000000000000000000000000000000000000000"}] diff --git a/tests/src/tests/it_admin_api_schema.rs b/tests/src/tests/it_admin_api_schema.rs index 3ce4639c4..424703089 100644 --- a/tests/src/tests/it_admin_api_schema.rs +++ b/tests/src/tests/it_admin_api_schema.rs @@ -1504,7 +1504,7 @@ async fn bare_builtin_function_succeeds() { } #[tokio::test] -async fn function_with_catalog_qualification_fails() { +async fn function_with_unregistered_catalog_qualification_fails() { //* Given let ctx = TestCtx::setup( "function_with_catalog_qualification", @@ -1512,8 +1512,8 @@ async fn function_with_catalog_qualification_fails() { ) .await; - // Catalog-qualified function (3 parts) - not supported - let sql_query = r#"SELECT catalog.schema.function(data) FROM eth.blocks"#; + // Catalog-qualified function referencing an unregistered catalog + let sql_query = r#"SELECT unknown_catalog.schema.function(data) FROM eth.blocks"#; //* When let resp = ctx @@ -1527,7 +1527,7 @@ async fn function_with_catalog_qualification_fails() { assert_eq!( resp.status(), StatusCode::INTERNAL_SERVER_ERROR, - "schema resolution should fail with catalog-qualified function" + "schema resolution should fail with unregistered catalog function" ); let response: ErrorResponse = resp diff --git a/tests/src/tests/it_functions_eth_call.rs b/tests/src/tests/it_functions_eth_call.rs index 098eb3f8f..8693a0574 100644 --- a/tests/src/tests/it_functions_eth_call.rs +++ b/tests/src/tests/it_functions_eth_call.rs @@ -103,7 +103,7 @@ async fn eth_call_reads_counter_value_after_increments() { let contract_addr_hex = hex::encode(deployment.address.as_slice()); let eth_call_query = indoc::formatdoc! {r#" WITH call_result AS ( - SELECT "_/anvil_rpc@0.0.0".eth_call( + SELECT rpc.anvil.eth_call( arrow_cast(decode('{null_addr_hex}', 'hex'), 'FixedSizeBinary(20)'), arrow_cast(decode('{contract_addr_hex}', 'hex'), 'FixedSizeBinary(20)'), decode('{count_selector}', 'hex'), diff --git a/tests/src/tests/it_sql.rs b/tests/src/tests/it_sql.rs index 581e7a82a..0a9f48dc8 100644 --- a/tests/src/tests/it_sql.rs +++ b/tests/src/tests/it_sql.rs @@ -33,7 +33,7 @@ async fn eth_call_tests() { let test_ctx = TestCtxBuilder::new("eth_call_tests") .with_dataset_manifests(["eth_rpc"]) .with_dataset_snapshots(["eth_rpc"]) - .with_provider_configs(["rpc_eth_mainnet"]) + .with_provider_configs(["rpc_eth_mainnet", "rpc_eth_base"]) .build() .await .expect("Failed to create test environment"); diff --git a/typescript/studio/src/constants.ts b/typescript/studio/src/constants.ts index 9714c0279..046d6da33 100644 --- a/typescript/studio/src/constants.ts +++ b/typescript/studio/src/constants.ts @@ -12,7 +12,7 @@ export const RESERVED_FIELDS = new Set(["from", "select", "limit", "order"]) export const UserDefinedFunctionName = Schema.Literal( "evm_decode_log", "evm_topic", - "${dataset}.eth_call", + "rpc..eth_call", "attestation_hash", "evm_decode_params", "evm_encode_params", @@ -47,10 +47,10 @@ export const USER_DEFINED_FUNCTIONS: ReadonlyArray = [ sql: `FixedSizeBinary(32) evm_topic(Utf8 signature)`, }, { - name: "${dataset}.eth_call", + name: "rpc..eth_call", description: - "This function executes an `eth_call` JSON-RPC against the provider of the specified EVM-RPC dataset. Returns a tuple of the return value of the call and the error message (if any, or empty string if no error).", - sql: `(Binary, Utf8) {dataset}.eth_call( + "This function executes an `eth_call` JSON-RPC against the provider of the specified network. Returns a tuple of the return value of the call and the error message (if any, or empty string if no error).", + sql: `(Binary, Utf8) rpc..eth_call( FixedSizeBinary(20) from, # optional FixedSizeBinary(20) to, Binary input_data, # optional diff --git a/typescript/studio/src/services/sql/AmpCompletionProvider.ts b/typescript/studio/src/services/sql/AmpCompletionProvider.ts index 8da301cdc..96b77e2e8 100644 --- a/typescript/studio/src/services/sql/AmpCompletionProvider.ts +++ b/typescript/studio/src/services/sql/AmpCompletionProvider.ts @@ -363,7 +363,7 @@ export class AmpCompletionProvider implements languages.CompletionItemProvider { private readonly udfSnippets: Record = { evm_decode_log: "evm_decode_log(${1:topic1}, ${2:topic2}, ${3:topic3}, ${4:data}, '${5:signature}')$0", evm_topic: "evm_topic('${1:signature}')$0", - "${dataset}.eth_call": "${1:dataset}.eth_call(${2:from_address}, ${3:to_address}, ${4:input_data}, '${5:block}')$0", + "rpc.eth_call": "rpc.${1:network}.eth_call(${2:from_address}, ${3:to_address}, ${4:input_data}, '${5:block}')$0", evm_decode_params: "evm_decode_params(${1:input_data}, '${2:signature}')$0", evm_encode_params: "evm_encode_params(${1:arg1}, ${2:arg2}, '${3:signature}')$0", evm_encode_type: "evm_encode_type(${1:value}, '${2:type}')$0", diff --git a/typescript/studio/src/services/sql/UDFSnippetGenerator.ts b/typescript/studio/src/services/sql/UDFSnippetGenerator.ts index a766a9700..870e578db 100644 --- a/typescript/studio/src/services/sql/UDFSnippetGenerator.ts +++ b/typescript/studio/src/services/sql/UDFSnippetGenerator.ts @@ -61,7 +61,7 @@ export class UdfSnippetGenerator { private readonly snippetGenerators = new Map string>([ ["evm_decode_log", () => this.createEvmDecodeLogSnippet()], ["evm_topic", () => this.createEvmTopicSnippet()], - ["${dataset}.eth_call", () => this.createEthCallSnippet()], + ["rpc..eth_call", () => this.createEthCallSnippet()], ["evm_decode_params", () => this.createEvmDecodeParamsSnippet()], ["evm_encode_params", () => this.createEvmEncodeParamsSnippet()], ["evm_encode_type", () => this.createEvmEncodeTypeSnippet()], @@ -122,10 +122,10 @@ export class UdfSnippetGenerator { */ private createEthCallSnippet(): string { if (this.config.includeExampleValues) { - const dataset = `\${1:${this.config.defaultDataset}}` - return `${dataset}.eth_call(\${2:0x0000000000000000000000000000000000000000}, \${3:0x1234567890123456789012345678901234567890}, \${4:0x70a08231}, '\${5:latest}')$0` + const network = `\${1:${this.config.defaultDataset}}` + return `rpc.${network}.eth_call(\${2:0x0000000000000000000000000000000000000000}, \${3:0x1234567890123456789012345678901234567890}, \${4:0x70a08231}, '\${5:latest}')$0` } - return "${1:dataset}.eth_call(${2:from_address}, ${3:to_address}, ${4:input_data}, '${5:block}')$0" + return "rpc.${1:network}.eth_call(${2:from_address}, ${3:to_address}, ${4:input_data}, '${5:block}')$0" } /** diff --git a/typescript/studio/src/services/sql/types.ts b/typescript/studio/src/services/sql/types.ts index b6b715575..a9656233c 100644 --- a/typescript/studio/src/services/sql/types.ts +++ b/typescript/studio/src/services/sql/types.ts @@ -50,7 +50,7 @@ export interface MonacoITextModel { * @interface UserDefinedFunction */ export interface UserDefinedFunction { - /** The function name (e.g., 'evm_decode_log', '${dataset}.eth_call') */ + /** The function name (e.g., 'evm_decode_log', 'rpc..eth_call') */ name: UserDefinedFunctionName /** Human-readable description of the function's purpose */ diff --git a/typescript/studio/tests/services/sql/core.test.ts b/typescript/studio/tests/services/sql/core.test.ts index 106cb0a4d..7cf0bf910 100644 --- a/typescript/studio/tests/services/sql/core.test.ts +++ b/typescript/studio/tests/services/sql/core.test.ts @@ -66,7 +66,7 @@ describe("SQL Intellisense Core Functionality", () => { test("should generate snippet for dataset-prefixed UDF", () => { generator = new UdfSnippetGenerator() - const udf = mockUDFs.find((u) => u.name === "${dataset}.eth_call")! + const udf = mockUDFs.find((u) => u.name === "rpc..eth_call")! const snippet = generator.createUdfSnippet(udf) @@ -149,7 +149,7 @@ describe("SQL Intellisense Core Functionality", () => { const expectedNames = [ "evm_decode_log", "evm_topic", - "${dataset}.eth_call", + "rpc..eth_call", "attestation_hash", "evm_decode_params", "evm_encode_params", diff --git a/typescript/studio/tests/services/sql/fixtures/mockQueries.ts b/typescript/studio/tests/services/sql/fixtures/mockQueries.ts index 277cd998c..cebf27485 100644 --- a/typescript/studio/tests/services/sql/fixtures/mockQueries.ts +++ b/typescript/studio/tests/services/sql/fixtures/mockQueries.ts @@ -116,8 +116,8 @@ export const udfQueries: Array = [ description: "Parameter completion inside UDF call", }, { - query: "SELECT anvil.eth_call(", - position: new monaco.Position(1, 23), + query: "SELECT rpc.anvil.eth_call(", + position: new monaco.Position(1, 27), expectedContext: { expectsColumn: true, currentClause: "SELECT", diff --git a/typescript/studio/tests/services/sql/fixtures/mockUDFs.ts b/typescript/studio/tests/services/sql/fixtures/mockUDFs.ts index c9e4e7236..216a7fbec 100644 --- a/typescript/studio/tests/services/sql/fixtures/mockUDFs.ts +++ b/typescript/studio/tests/services/sql/fixtures/mockUDFs.ts @@ -25,13 +25,13 @@ export const mockUDFs: ReadonlyArray = [ "SELECT * FROM anvil.logs WHERE topics[0] = evm_topic('Transfer(address indexed from, address indexed to, uint256 value)')", }, { - name: "${dataset}.eth_call", + name: "rpc..eth_call", description: - "This function executes an `eth_call` JSON-RPC against the provider of the specified EVM-RPC dataset. Returns a tuple of the return value of the call and the error message (if any, or empty string if no error).", - sql: "${dataset}.eth_call(from, to, input_data, block_specification)", + "This function executes an `eth_call` JSON-RPC against the provider of the specified network. Returns a tuple of the return value of the call and the error message (if any, or empty string if no error).", + sql: "rpc..eth_call(from, to, input_data, block_specification)", parameters: ["from", "to", "input_data", "block_specification"], example: - "SELECT anvil.eth_call('0x0000000000000000000000000000000000000000', '0x1234567890123456789012345678901234567890', '0x70a08231', 'latest')", + "SELECT rpc.anvil.eth_call('0x0000000000000000000000000000000000000000', '0x1234567890123456789012345678901234567890', '0x70a08231', 'latest')", }, { name: "attestation_hash",