From a68ff3a6c03a77d9e56ab03d85046a386fe58649 Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Mon, 22 Dec 2025 17:24:04 -0600 Subject: [PATCH 01/14] Add core metering types for priority fee estimation Add MeteringCache and PriorityFeeEstimator to crates/rpc for resource-aware priority fee estimation in flashblocks. - cache.rs: In-memory cache storing metered transactions by block/flashblock with resource totals (gas, execution time, state root time, data availability bytes) - estimator.rs: Top-down fee estimation algorithm that determines minimum priority fee needed for bundle inclusion based on resource competition --- Cargo.lock | 3 + Cargo.toml | 5 + crates/rpc/Cargo.toml | 5 + crates/rpc/src/base/cache.rs | 383 +++++++++++++++ crates/rpc/src/base/estimator.rs | 801 +++++++++++++++++++++++++++++++ crates/rpc/src/base/mod.rs | 2 + crates/rpc/src/lib.rs | 6 + 7 files changed, 1205 insertions(+) create mode 100644 crates/rpc/src/base/cache.rs create mode 100644 crates/rpc/src/base/estimator.rs diff --git a/Cargo.lock b/Cargo.lock index aebb5828..8420ea55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1600,6 +1600,7 @@ dependencies = [ "eyre", "futures-util", "httpmock", + "indexmap 2.12.1", "jsonrpsee", "jsonrpsee-types", "metrics", @@ -1607,6 +1608,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-network", "op-alloy-rpc-types", + "parking_lot", "rand 0.9.2", "reth", "reth-db", @@ -1616,6 +1618,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-optimism-evm", "reth-optimism-node", + "reth-optimism-payload-builder", "reth-optimism-primitives", "reth-primitives-traits", "reth-provider", diff --git a/Cargo.toml b/Cargo.toml index 6a860dca..53fe7deb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,6 +87,7 @@ reth-e2e-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9 reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.3" } reth-primitives-traits = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.3" } reth-optimism-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.3" } +reth-optimism-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.3" } reth-optimism-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.3" } reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.3", features = [ "op", @@ -119,6 +120,7 @@ op-alloy-rpc-types = "0.22.0" op-alloy-consensus = "0.22.0" op-alloy-rpc-jsonrpsee = "0.22.0" op-alloy-rpc-types-engine = "0.22.0" +op-alloy-flz = "0.13.1" # tokio tokio = "1.48.0" @@ -158,3 +160,6 @@ derive_more = "2.1.0" serde_json = "1.0.145" metrics-derive = "0.1.0" tracing-subscriber = "0.3.22" +parking_lot = "0.12.3" +indexmap = "2.7.0" +rdkafka = { version = "0.37.0", default-features = false, features = ["tokio", "ssl-vendored", "libz-static"] } diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 390fdf1a..2646e72c 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -53,6 +53,11 @@ eyre.workspace = true serde.workspace = true metrics.workspace = true metrics-derive.workspace = true +parking_lot.workspace = true +indexmap.workspace = true + +# priority fee estimation +reth-optimism-payload-builder.workspace = true [dev-dependencies] base-flashtypes.workspace = true diff --git a/crates/rpc/src/base/cache.rs b/crates/rpc/src/base/cache.rs new file mode 100644 index 00000000..3e5b4f6a --- /dev/null +++ b/crates/rpc/src/base/cache.rs @@ -0,0 +1,383 @@ +//! In-memory cache for metering data used by the priority fee estimator. + +use std::collections::{BTreeMap, HashMap, VecDeque}; + +use alloy_primitives::{B256, U256}; +use indexmap::IndexMap; + +/// A metered transaction with resource consumption data. +#[derive(Debug, Clone)] +pub struct MeteredTransaction { + /// Transaction hash. + pub tx_hash: B256, + /// Priority fee per gas for ordering. + pub priority_fee_per_gas: U256, + /// Gas consumed. + pub gas_used: u64, + /// Execution time in microseconds. + pub execution_time_us: u128, + /// State root computation time in microseconds. + pub state_root_time_us: u128, + /// Data availability bytes. + pub data_availability_bytes: u64, +} + +impl MeteredTransaction { + /// Creates a zeroed transaction (placeholder with no resource usage). + pub fn zeroed(tx_hash: B256) -> Self { + Self { + tx_hash, + priority_fee_per_gas: U256::ZERO, + gas_used: 0, + execution_time_us: 0, + state_root_time_us: 0, + data_availability_bytes: 0, + } + } +} + +/// Aggregated resource totals. +#[derive(Debug, Clone, Copy, Default)] +pub struct ResourceTotals { + /// Total gas used. + pub gas_used: u64, + /// Total execution time in microseconds. + pub execution_time_us: u128, + /// Total state root time in microseconds. + pub state_root_time_us: u128, + /// Total data availability bytes. + pub data_availability_bytes: u64, +} + +impl ResourceTotals { + fn accumulate(&mut self, tx: &MeteredTransaction) { + self.gas_used = self.gas_used.saturating_add(tx.gas_used); + self.execution_time_us = self.execution_time_us.saturating_add(tx.execution_time_us); + self.state_root_time_us = self.state_root_time_us.saturating_add(tx.state_root_time_us); + self.data_availability_bytes = + self.data_availability_bytes.saturating_add(tx.data_availability_bytes); + } + + fn subtract(&mut self, tx: &MeteredTransaction) { + self.gas_used = self.gas_used.saturating_sub(tx.gas_used); + self.execution_time_us = self.execution_time_us.saturating_sub(tx.execution_time_us); + self.state_root_time_us = self.state_root_time_us.saturating_sub(tx.state_root_time_us); + self.data_availability_bytes = + self.data_availability_bytes.saturating_sub(tx.data_availability_bytes); + } +} + +/// Metrics for a single flashblock within a block. +#[derive(Debug)] +pub struct FlashblockMetrics { + /// Block number. + pub block_number: u64, + /// Flashblock index within the block. + pub flashblock_index: u64, + /// Transactions keyed by hash in insertion order. + transactions: IndexMap, + totals: ResourceTotals, +} + +impl FlashblockMetrics { + /// Creates a new flashblock metrics container. + pub fn new(block_number: u64, flashblock_index: u64) -> Self { + Self { + block_number, + flashblock_index, + transactions: IndexMap::new(), + totals: ResourceTotals::default(), + } + } + + /// Inserts or updates a transaction. + pub fn upsert_transaction(&mut self, tx: MeteredTransaction) { + let tx_hash = tx.tx_hash; + if let Some(existing) = self.transactions.get(&tx_hash) { + self.totals.subtract(existing); + } + self.totals.accumulate(&tx); + self.transactions.insert(tx_hash, tx); + } + + /// Removes a transaction by hash. + pub fn remove_transaction(&mut self, tx_hash: &B256) -> Option { + let removed = self.transactions.shift_remove(tx_hash); + if let Some(ref tx) = removed { + self.totals.subtract(tx); + } + removed + } + + /// Returns the resource totals for this flashblock. + pub fn totals(&self) -> ResourceTotals { + self.totals + } + + /// Iterates over all transactions. + pub fn transactions(&self) -> impl Iterator { + self.transactions.values() + } + + /// Returns transactions sorted by priority fee (ascending). + pub fn transactions_sorted_by_priority_fee(&self) -> Vec<&MeteredTransaction> { + let mut txs: Vec<&MeteredTransaction> = self.transactions.values().collect(); + txs.sort_by(|a, b| a.priority_fee_per_gas.cmp(&b.priority_fee_per_gas)); + txs + } + + /// Returns the number of transactions. + pub fn len(&self) -> usize { + self.transactions.len() + } + + /// Returns true if empty. + pub fn is_empty(&self) -> bool { + self.transactions.is_empty() + } +} + +/// Aggregated metrics for a block, including per-flashblock breakdown. +#[derive(Debug)] +pub struct BlockMetrics { + /// Block number. + pub block_number: u64, + flashblocks: BTreeMap, + totals: ResourceTotals, +} + +impl BlockMetrics { + /// Creates a new block metrics container. + pub fn new(block_number: u64) -> Self { + Self { block_number, flashblocks: BTreeMap::new(), totals: ResourceTotals::default() } + } + + /// Returns the number of flashblocks. + pub fn flashblock_count(&self) -> usize { + self.flashblocks.len() + } + + /// Iterates over all flashblocks. + pub fn flashblocks(&self) -> impl Iterator { + self.flashblocks.values() + } + + /// Returns the flashblock at the given index. + pub fn flashblock(&self, flashblock_index: u64) -> Option<&FlashblockMetrics> { + self.flashblocks.get(&flashblock_index) + } + + /// Returns a mutable reference to the flashblock, creating it if necessary. + /// Returns `(flashblock, is_new)`. + pub fn flashblock_mut(&mut self, flashblock_index: u64) -> (&mut FlashblockMetrics, bool) { + let is_new = !self.flashblocks.contains_key(&flashblock_index); + let entry = self + .flashblocks + .entry(flashblock_index) + .or_insert_with(|| FlashblockMetrics::new(self.block_number, flashblock_index)); + (entry, is_new) + } + + /// Returns the resource totals for this block. + pub fn totals(&self) -> ResourceTotals { + self.totals + } + + fn recompute_totals(&mut self) { + self.totals = ResourceTotals::default(); + for flashblock in self.flashblocks.values() { + let totals = flashblock.totals(); + self.totals.gas_used = self.totals.gas_used.saturating_add(totals.gas_used); + self.totals.execution_time_us = + self.totals.execution_time_us.saturating_add(totals.execution_time_us); + self.totals.state_root_time_us = + self.totals.state_root_time_us.saturating_add(totals.state_root_time_us); + self.totals.data_availability_bytes = + self.totals.data_availability_bytes.saturating_add(totals.data_availability_bytes); + } + } +} + +/// In-memory cache maintaining metering data for the most recent blocks. +#[derive(Debug)] +pub struct MeteringCache { + max_blocks: usize, + blocks: VecDeque, + block_index: HashMap, +} + +impl MeteringCache { + /// Creates a new cache retaining at most `max_blocks` recent blocks. + pub fn new(max_blocks: usize) -> Self { + Self { max_blocks, blocks: VecDeque::new(), block_index: HashMap::new() } + } + + /// Returns the maximum number of blocks retained. + pub fn max_blocks(&self) -> usize { + self.max_blocks + } + + /// Returns the block metrics for the given block number. + pub fn block(&self, block_number: u64) -> Option<&BlockMetrics> { + self.block_index.get(&block_number).and_then(|&idx| self.blocks.get(idx)) + } + + /// Returns a mutable reference to the block, creating it if necessary. + pub fn block_mut(&mut self, block_number: u64) -> &mut BlockMetrics { + if let Some(&idx) = self.block_index.get(&block_number) { + return self.blocks.get_mut(idx).expect("block index out of bounds"); + } + + let block = BlockMetrics::new(block_number); + self.blocks.push_back(block); + let idx = self.blocks.len() - 1; + self.block_index.insert(block_number, idx); + + self.evict_if_needed(); + self.blocks.get_mut(*self.block_index.get(&block_number).unwrap()).unwrap() + } + + /// Returns the flashblock metrics for the given block and flashblock index. + pub fn flashblock( + &self, + block_number: u64, + flashblock_index: u64, + ) -> Option<&FlashblockMetrics> { + self.block(block_number).and_then(|block| block.flashblock(flashblock_index)) + } + + /// Inserts or updates a transaction in the cache. + pub fn upsert_transaction( + &mut self, + block_number: u64, + flashblock_index: u64, + tx: MeteredTransaction, + ) { + let block = self.block_mut(block_number); + let (flashblock, _) = block.flashblock_mut(flashblock_index); + flashblock.upsert_transaction(tx); + block.recompute_totals(); + } + + /// Removes a transaction from the cache. + pub fn remove_transaction( + &mut self, + block_number: u64, + flashblock_index: u64, + tx_hash: &B256, + ) -> Option { + let block = self.block_mut(block_number); + let (flashblock, _) = block.flashblock_mut(flashblock_index); + let removed = flashblock.remove_transaction(tx_hash); + block.recompute_totals(); + removed + } + + /// Returns the number of cached blocks. + pub fn len(&self) -> usize { + self.blocks.len() + } + + /// Returns true if the cache is empty. + pub fn is_empty(&self) -> bool { + self.blocks.is_empty() + } + + /// Iterates over blocks in descending order (most recent first). + pub fn blocks_desc(&self) -> impl Iterator { + self.blocks.iter().rev() + } + + fn evict_if_needed(&mut self) { + let mut evicted = false; + while self.blocks.len() > self.max_blocks { + if let Some(oldest) = self.blocks.pop_front() { + self.block_index.remove(&oldest.block_number); + evicted = true; + } + } + // Rebuild index once after all evictions to maintain correctness. + if evicted { + self.rebuild_index(); + } + } + + fn rebuild_index(&mut self) { + self.block_index.clear(); + for (idx, block) in self.blocks.iter().enumerate() { + self.block_index.insert(block.block_number, idx); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn test_tx(hash: u64, priority: u64) -> MeteredTransaction { + let mut hash_bytes = [0u8; 32]; + hash_bytes[24..].copy_from_slice(&hash.to_be_bytes()); + MeteredTransaction { + tx_hash: B256::new(hash_bytes), + priority_fee_per_gas: U256::from(priority), + gas_used: 10, + execution_time_us: 5, + state_root_time_us: 7, + data_availability_bytes: 20, + } + } + + #[test] + fn insert_and_retrieve_transactions() { + let mut cache = MeteringCache::new(12); + let tx1 = test_tx(1, 2); + cache.upsert_transaction(100, 0, tx1.clone()); + + let block = cache.block(100).unwrap(); + let flashblock = block.flashblocks().next().unwrap(); + assert_eq!(flashblock.len(), 1); + assert_eq!(flashblock.transactions().next().unwrap().tx_hash, tx1.tx_hash); + } + + #[test] + fn replaces_existing_transaction() { + let mut cache = MeteringCache::new(12); + let mut tx1 = test_tx(1, 2); + cache.upsert_transaction(100, 0, tx1.clone()); + tx1.gas_used = 42; + cache.upsert_transaction(100, 0, tx1.clone()); + + let block = cache.block(100).unwrap(); + let flashblock = block.flashblocks().next().unwrap(); + assert_eq!(flashblock.len(), 1); + assert_eq!(flashblock.transactions().next().unwrap().gas_used, tx1.gas_used); + } + + #[test] + fn evicts_old_blocks() { + let mut cache = MeteringCache::new(2); + for block_number in 0..3u64 { + cache.upsert_transaction(block_number, 0, test_tx(block_number, block_number)); + } + assert!(cache.block(0).is_none()); + assert!(cache.block(1).is_some()); + assert!(cache.block(2).is_some()); + } + + #[test] + fn transactions_sorted_by_priority_fee() { + let mut cache = MeteringCache::new(12); + cache.upsert_transaction(100, 0, test_tx(1, 30)); + cache.upsert_transaction(100, 0, test_tx(2, 10)); + cache.upsert_transaction(100, 0, test_tx(3, 20)); + + let block = cache.block(100).unwrap(); + let flashblock = block.flashblocks().next().unwrap(); + let sorted: Vec<_> = flashblock + .transactions_sorted_by_priority_fee() + .iter() + .map(|tx| tx.priority_fee_per_gas) + .collect(); + assert_eq!(sorted, vec![U256::from(10u64), U256::from(20u64), U256::from(30u64)]); + } +} diff --git a/crates/rpc/src/base/estimator.rs b/crates/rpc/src/base/estimator.rs new file mode 100644 index 00000000..b14c1741 --- /dev/null +++ b/crates/rpc/src/base/estimator.rs @@ -0,0 +1,801 @@ +//! Priority fee estimation based on resource consumption in flashblocks. + +use std::sync::Arc; + +use alloy_primitives::U256; +use parking_lot::RwLock; +use reth_optimism_payload_builder::config::OpDAConfig; + +use crate::base::cache::{MeteredTransaction, MeteringCache}; + +/// Errors that can occur during priority fee estimation. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum EstimateError { + /// The bundle's resource demand exceeds the configured capacity limit. + DemandExceedsCapacity { + /// The resource that exceeded capacity. + resource: ResourceKind, + /// The requested demand. + demand: u128, + /// The configured limit. + limit: u128, + }, +} + +impl std::fmt::Display for EstimateError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::DemandExceedsCapacity { resource, demand, limit } => { + write!( + f, + "bundle {} demand ({}) exceeds capacity limit ({})", + resource.as_name(), + demand, + limit + ) + } + } + } +} + +impl std::error::Error for EstimateError {} + +/// Configured capacity limits for each resource type. +/// +/// These values define the maximum capacity available per flashblock (or per block +/// for "use-it-or-lose-it" resources). The estimator uses these limits to determine +/// when resources are congested. +#[derive(Debug, Clone, Copy, Default)] +pub struct ResourceLimits { + /// Gas limit per flashblock. + pub gas_used: Option, + /// Execution time budget in microseconds. + pub execution_time_us: Option, + /// State root computation time budget in microseconds. + pub state_root_time_us: Option, + /// Data availability bytes limit per flashblock. + pub data_availability_bytes: Option, +} + +impl ResourceLimits { + /// Returns the limit for the given resource kind. + fn limit_for(&self, resource: ResourceKind) -> Option { + match resource { + ResourceKind::GasUsed => self.gas_used.map(|v| v as u128), + ResourceKind::ExecutionTime => self.execution_time_us, + ResourceKind::StateRootTime => self.state_root_time_us, + ResourceKind::DataAvailability => self.data_availability_bytes.map(|v| v as u128), + } + } +} + +/// Resources that influence flashblock inclusion ordering. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum ResourceKind { + /// Gas consumption. + GasUsed, + /// Execution time. + ExecutionTime, + /// State root computation time. + StateRootTime, + /// Data availability bytes. + DataAvailability, +} + +impl ResourceKind { + /// Returns all resource kinds in a fixed order. + pub fn all() -> [Self; 4] { + [Self::GasUsed, Self::ExecutionTime, Self::StateRootTime, Self::DataAvailability] + } + + /// Returns `true` if this resource is "use-it-or-lose-it", meaning capacity + /// that isn't consumed in one flashblock cannot be reclaimed in later ones. + /// + /// Execution time is the canonical example: the block builder has a fixed + /// time budget per block, and unused time in flashblock 0 doesn't roll over + /// to flashblock 1. For these resources, the estimator aggregates usage + /// across all flashblocks rather than evaluating each flashblock in isolation. + /// + /// Other resources like gas and DA bytes are bounded per-block but are + /// evaluated per-flashblock since their limits apply independently. + fn use_it_or_lose_it(self) -> bool { + matches!(self, Self::ExecutionTime) + } + + /// Returns a human-readable name for the resource kind. + pub const fn as_name(&self) -> &'static str { + match self { + Self::GasUsed => "gas", + Self::ExecutionTime => "execution time", + Self::StateRootTime => "state root time", + Self::DataAvailability => "data availability", + } + } + + /// Returns a camelCase name for JSON serialization. + pub const fn as_camel_case(&self) -> &'static str { + match self { + Self::GasUsed => "gasUsed", + Self::ExecutionTime => "executionTime", + Self::StateRootTime => "stateRootTime", + Self::DataAvailability => "dataAvailability", + } + } +} + +/// Amount of resources required by the bundle being priced. +#[derive(Debug, Clone, Copy, Default)] +pub struct ResourceDemand { + /// Gas demand. + pub gas_used: Option, + /// Execution time demand in microseconds. + pub execution_time_us: Option, + /// State root time demand in microseconds. + pub state_root_time_us: Option, + /// Data availability bytes demand. + pub data_availability_bytes: Option, +} + +impl ResourceDemand { + fn demand_for(&self, resource: ResourceKind) -> Option { + match resource { + ResourceKind::GasUsed => self.gas_used.map(|v| v as u128), + ResourceKind::ExecutionTime => self.execution_time_us, + ResourceKind::StateRootTime => self.state_root_time_us, + ResourceKind::DataAvailability => self.data_availability_bytes.map(|v| v as u128), + } + } +} + +/// Fee estimate for a single resource type. +/// +/// The estimation algorithm answers: "What priority fee would my bundle need to pay +/// to displace enough lower-paying transactions to free up the resources I need?" +#[derive(Debug, Clone)] +pub struct ResourceEstimate { + /// Minimum fee to displace enough capacity for the bundle's resource demand. + pub threshold_priority_fee: U256, + /// Recommended fee based on a percentile of transactions above the threshold. + /// Provides a safety margin over the bare minimum. + pub recommended_priority_fee: U256, + /// Total resource usage of transactions at or above the threshold fee. + pub cumulative_usage: u128, + /// Number of transactions at or above `threshold_priority_fee`. These higher-paying + /// transactions remain included alongside the bundle; lower-paying ones are displaced. + pub threshold_tx_count: usize, + /// Total transactions considered in the estimate. + pub total_transactions: usize, +} + +/// Per-resource fee estimates. +/// +/// Each field corresponds to a resource type. `None` indicates the resource +/// was not requested or could not be estimated (e.g., demand exceeds capacity). +#[derive(Debug, Clone, Default)] +pub struct ResourceEstimates { + /// Gas usage estimate. + pub gas_used: Option, + /// Execution time estimate. + pub execution_time: Option, + /// State root time estimate. + pub state_root_time: Option, + /// Data availability estimate. + pub data_availability: Option, +} + +impl ResourceEstimates { + /// Returns the estimate for the given resource kind. + pub fn get(&self, kind: ResourceKind) -> Option<&ResourceEstimate> { + match kind { + ResourceKind::GasUsed => self.gas_used.as_ref(), + ResourceKind::ExecutionTime => self.execution_time.as_ref(), + ResourceKind::StateRootTime => self.state_root_time.as_ref(), + ResourceKind::DataAvailability => self.data_availability.as_ref(), + } + } + + /// Sets the estimate for the given resource kind. + pub fn set(&mut self, kind: ResourceKind, estimate: ResourceEstimate) { + match kind { + ResourceKind::GasUsed => self.gas_used = Some(estimate), + ResourceKind::ExecutionTime => self.execution_time = Some(estimate), + ResourceKind::StateRootTime => self.state_root_time = Some(estimate), + ResourceKind::DataAvailability => self.data_availability = Some(estimate), + } + } + + /// Iterates over all present estimates with their resource kind. + pub fn iter(&self) -> impl Iterator { + [ + (ResourceKind::GasUsed, &self.gas_used), + (ResourceKind::ExecutionTime, &self.execution_time), + (ResourceKind::StateRootTime, &self.state_root_time), + (ResourceKind::DataAvailability, &self.data_availability), + ] + .into_iter() + .filter_map(|(kind, opt)| opt.as_ref().map(|est| (kind, est))) + } + + /// Returns true if no estimates are present. + pub fn is_empty(&self) -> bool { + self.iter().next().is_none() + } +} + +/// Estimates for a specific flashblock index. +#[derive(Debug, Clone)] +pub struct FlashblockResourceEstimates { + /// Flashblock index. + pub flashblock_index: u64, + /// Per-resource estimates. + pub estimates: ResourceEstimates, +} + +/// Aggregated estimates for a block. +#[derive(Debug, Clone)] +pub struct BlockPriorityEstimates { + /// Block number. + pub block_number: u64, + /// Per-flashblock estimates. + pub flashblocks: Vec, + /// Minimum recommended fee across all flashblocks (easiest inclusion). + pub min_across_flashblocks: ResourceEstimates, + /// Maximum recommended fee across all flashblocks (most competitive). + pub max_across_flashblocks: ResourceEstimates, +} + +/// Rolling estimates aggregated across multiple recent blocks. +#[derive(Debug, Clone)] +pub struct RollingPriorityEstimates { + /// Number of blocks that contributed to this estimate. + pub blocks_sampled: usize, + /// Per-resource estimates (median across sampled blocks). + pub estimates: ResourceEstimates, + /// Single recommended fee: maximum across all resources. + pub recommended_priority_fee: U256, +} + +/// Computes resource fee estimates based on cached flashblock metering data. +#[derive(Debug)] +pub struct PriorityFeeEstimator { + cache: Arc>, + percentile: f64, + limits: ResourceLimits, + default_priority_fee: U256, + /// Optional shared DA config from the miner RPC. When set, the estimator uses + /// `max_da_block_size` from this config instead of `limits.data_availability_bytes`. + /// This allows dynamic updates via `miner_setMaxDASize`. + da_config: Option, +} + +impl PriorityFeeEstimator { + /// Creates a new estimator referencing the shared metering cache. + /// + /// # Parameters + /// - `cache`: Shared cache containing recent flashblock metering data. + /// - `percentile`: Point in the fee distribution (among transactions above threshold) + /// to use for the recommended fee. + /// - `limits`: Configured resource capacity limits. + /// - `default_priority_fee`: Fee to return when a resource is not congested. + /// - `da_config`: Optional shared DA config for dynamic DA limit updates. + pub fn new( + cache: Arc>, + percentile: f64, + limits: ResourceLimits, + default_priority_fee: U256, + da_config: Option, + ) -> Self { + Self { cache, percentile, limits, default_priority_fee, da_config } + } + + /// Returns the current DA block size limit, preferring the dynamic `OpDAConfig` value + /// if available, otherwise falling back to the static limit. + pub fn max_da_block_size(&self) -> Option { + self.da_config + .as_ref() + .and_then(|c| c.max_da_block_size()) + .or(self.limits.data_availability_bytes) + } + + /// Returns the limit for the given resource kind, using dynamic config where available. + fn limit_for(&self, resource: ResourceKind) -> Option { + match resource { + ResourceKind::DataAvailability => self.max_da_block_size().map(|v| v as u128), + _ => self.limits.limit_for(resource), + } + } + + /// Returns fee estimates for the provided block. If `block_number` is `None` + /// the most recent block in the cache is used. + /// + /// Returns `Ok(None)` if the cache is empty, the requested block is not cached, + /// or no transactions exist in the cached flashblocks. + /// + /// Returns `Err` if the bundle's demand exceeds any resource's capacity limit. + pub fn estimate_for_block( + &self, + block_number: Option, + demand: ResourceDemand, + ) -> Result, EstimateError> { + let cache_guard = self.cache.read(); + let block_metrics = match block_number { + Some(target) => cache_guard.block(target), + None => cache_guard.blocks_desc().next(), + }; + let Some(block_metrics) = block_metrics else { + return Ok(None); + }; + + let block_number = block_metrics.block_number; + + // Materialise sorted transactions per flashblock so we can drop the lock before + // running the estimation logic. + let mut flashblock_transactions = Vec::new(); + let mut total_tx_count = 0usize; + for flashblock in block_metrics.flashblocks() { + let sorted: Vec = + flashblock.transactions_sorted_by_priority_fee().into_iter().cloned().collect(); + if sorted.is_empty() { + continue; + } + total_tx_count += sorted.len(); + flashblock_transactions.push((flashblock.flashblock_index, sorted)); + } + drop(cache_guard); + + if flashblock_transactions.is_empty() { + return Ok(None); + } + + // Build the aggregate list for use-it-or-lose-it resources by collecting references + // to avoid cloning transactions twice. + let mut aggregate_refs: Vec<&MeteredTransaction> = Vec::with_capacity(total_tx_count); + for (_, txs) in &flashblock_transactions { + aggregate_refs.extend(txs.iter()); + } + aggregate_refs.sort_by(|a, b| a.priority_fee_per_gas.cmp(&b.priority_fee_per_gas)); + + let mut flashblock_estimates = Vec::new(); + + for (flashblock_index, txs) in &flashblock_transactions { + // Build a reference slice for this flashblock's transactions. + let txs_refs: Vec<&MeteredTransaction> = txs.iter().collect(); + + let mut estimates = ResourceEstimates::default(); + for resource in ResourceKind::all() { + let Some(demand_value) = demand.demand_for(resource) else { + continue; + }; + let Some(limit_value) = self.limit_for(resource) else { + continue; + }; + + let transactions: &[&MeteredTransaction] = + if resource.use_it_or_lose_it() { &aggregate_refs } else { &txs_refs }; + let estimate = compute_estimate( + resource, + transactions, + demand_value, + limit_value, + usage_extractor(resource), + self.percentile, + self.default_priority_fee, + )?; + + estimates.set(resource, estimate); + } + + flashblock_estimates.push(FlashblockResourceEstimates { + flashblock_index: *flashblock_index, + estimates, + }); + } + + let (min_across_flashblocks, max_across_flashblocks) = + compute_min_max_estimates(&flashblock_estimates); + + Ok(Some(BlockPriorityEstimates { + block_number, + flashblocks: flashblock_estimates, + min_across_flashblocks, + max_across_flashblocks, + })) + } + + /// Returns rolling fee estimates aggregated across the most recent blocks in the cache. + /// + /// For each resource, computes estimates per-block and takes the median recommended fee. + /// The final `recommended_priority_fee` is the maximum across all resources. + /// + /// Returns `Ok(None)` if the cache is empty or no blocks contain transaction data. + /// + /// Returns `Err` if the bundle's demand exceeds any resource's capacity limit. + pub fn estimate_rolling( + &self, + demand: ResourceDemand, + ) -> Result, EstimateError> { + let cache_guard = self.cache.read(); + let block_numbers: Vec = cache_guard.blocks_desc().map(|b| b.block_number).collect(); + drop(cache_guard); + + if block_numbers.is_empty() { + return Ok(None); + } + + // Collect per-block max estimates. Propagate any errors. + let mut block_estimates = Vec::new(); + for &n in &block_numbers { + if let Some(est) = self.estimate_for_block(Some(n), demand)? { + block_estimates.push(est.max_across_flashblocks); + } + } + + if block_estimates.is_empty() { + return Ok(None); + } + + // Compute median fee for each resource across blocks. + let mut estimates = ResourceEstimates::default(); + let mut max_fee = U256::ZERO; + + for resource in ResourceKind::all() { + let mut fees: Vec = block_estimates + .iter() + .filter_map(|e| e.get(resource)) + .map(|e| e.recommended_priority_fee) + .collect(); + + if fees.is_empty() { + continue; + } + + fees.sort(); + let median_fee = fees[fees.len() / 2]; + max_fee = max_fee.max(median_fee); + + estimates.set( + resource, + ResourceEstimate { + threshold_priority_fee: median_fee, + recommended_priority_fee: median_fee, + cumulative_usage: 0, + threshold_tx_count: 0, + total_transactions: 0, + }, + ); + } + + if estimates.is_empty() { + return Ok(None); + } + + Ok(Some(RollingPriorityEstimates { + blocks_sampled: block_numbers.len(), + estimates, + recommended_priority_fee: max_fee, + })) + } +} + +/// Core estimation algorithm (top-down approach). +/// +/// Given a list of transactions and a resource limit, determines the minimum priority +/// fee needed to be included alongside enough high-paying transactions while still +/// leaving room for the bundle's demand. +/// +/// # Algorithm +/// +/// 1. Sort transactions from highest to lowest priority fee. +/// 2. Walk from the top, subtracting each transaction's usage from remaining capacity. +/// 3. Stop when including another transaction would leave less capacity than the bundle needs. +/// 4. The threshold fee is the fee of the last included transaction (the minimum fee +/// among transactions that would be included alongside the bundle). +/// 5. If we include all transactions and still have capacity >= demand, the resource is +/// not congested, so return the configured default fee. +/// +/// Returns `Err` if the bundle's demand exceeds the resource limit. +fn compute_estimate( + resource: ResourceKind, + transactions: &[&MeteredTransaction], + demand: u128, + limit: u128, + usage_fn: fn(&MeteredTransaction) -> u128, + percentile: f64, + default_fee: U256, +) -> Result { + // Bundle demand exceeds the resource limit entirely. + if demand > limit { + return Err(EstimateError::DemandExceedsCapacity { resource, demand, limit }); + } + + // No transactions or zero demand means no competition for this resource. + if transactions.is_empty() || demand == 0 { + return Ok(ResourceEstimate { + threshold_priority_fee: default_fee, + recommended_priority_fee: default_fee, + cumulative_usage: 0, + threshold_tx_count: 0, + total_transactions: 0, + }); + } + + // Sort transactions by priority fee descending (highest first). + let mut sorted: Vec<_> = transactions.to_vec(); + sorted.sort_by(|a, b| b.priority_fee_per_gas.cmp(&a.priority_fee_per_gas)); + + // Walk from highest-paying transactions, subtracting usage from remaining capacity. + // Stop when we can no longer fit another transaction while leaving room for demand. + let mut remaining = limit; + let mut included_usage = 0u128; + let mut last_included_idx: Option = None; + + for (idx, tx) in sorted.iter().enumerate() { + let usage = usage_fn(tx); + + // Check if we can include this transaction and still have room for the bundle. + if remaining >= usage && remaining.saturating_sub(usage) >= demand { + remaining = remaining.saturating_sub(usage); + included_usage = included_usage.saturating_add(usage); + last_included_idx = Some(idx); + } else { + // Can't include this transaction without crowding out the bundle. + break; + } + } + + // If we included all transactions and still have room, resource is not congested. + let is_uncongested = last_included_idx == Some(sorted.len() - 1) && remaining >= demand; + + if is_uncongested { + return Ok(ResourceEstimate { + threshold_priority_fee: default_fee, + recommended_priority_fee: default_fee, + cumulative_usage: included_usage, + threshold_tx_count: sorted.len(), + total_transactions: sorted.len(), + }); + } + + let (supporting_count, threshold_fee, recommended_fee) = match last_included_idx { + Some(idx) => { + // At least one transaction fits alongside the bundle. + // The threshold is the fee of the last included transaction. + let threshold_fee = sorted[idx].priority_fee_per_gas; + + // For recommended fee, look at included transactions (those above threshold) + // and pick one at the specified percentile for a safety margin. + let included = &sorted[..=idx]; + let percentile = percentile.clamp(0.0, 1.0); + let recommended_fee = if included.len() <= 1 { + threshold_fee + } else { + // Pick from the higher end of included transactions for safety. + let pos = ((included.len() - 1) as f64 * (1.0 - percentile)).round() as usize; + included[pos.min(included.len() - 1)].priority_fee_per_gas + }; + + (idx + 1, threshold_fee, recommended_fee) + } + None => { + // No transactions fit - even the first transaction would crowd out + // the bundle. The bundle must beat the highest fee to be included. + // Report 0 supporting transactions since none were actually included. + let threshold_fee = sorted[0].priority_fee_per_gas; + (0, threshold_fee, threshold_fee) + } + }; + + Ok(ResourceEstimate { + threshold_priority_fee: threshold_fee, + recommended_priority_fee: recommended_fee, + cumulative_usage: included_usage, + threshold_tx_count: supporting_count, + total_transactions: sorted.len(), + }) +} + +/// Returns a function that extracts the relevant resource usage from a transaction. +fn usage_extractor(resource: ResourceKind) -> fn(&MeteredTransaction) -> u128 { + match resource { + ResourceKind::GasUsed => |tx: &MeteredTransaction| tx.gas_used as u128, + ResourceKind::ExecutionTime => |tx: &MeteredTransaction| tx.execution_time_us, + ResourceKind::StateRootTime => |tx: &MeteredTransaction| tx.state_root_time_us, + ResourceKind::DataAvailability => { + |tx: &MeteredTransaction| tx.data_availability_bytes as u128 + } + } +} + +/// Computes the minimum and maximum recommended fees across all flashblocks. +/// +/// Returns two `ResourceEstimates`: +/// - First: For each resource, the estimate with the lowest recommended fee (easiest inclusion). +/// - Second: For each resource, the estimate with the highest recommended fee (most competitive). +fn compute_min_max_estimates( + flashblocks: &[FlashblockResourceEstimates], +) -> (ResourceEstimates, ResourceEstimates) { + let mut min_estimates = ResourceEstimates::default(); + let mut max_estimates = ResourceEstimates::default(); + + for flashblock in flashblocks { + for (resource, estimate) in flashblock.estimates.iter() { + // Update min. + let current_min = min_estimates.get(resource); + if current_min.is_none() + || estimate.recommended_priority_fee < current_min.unwrap().recommended_priority_fee + { + min_estimates.set(resource, estimate.clone()); + } + + // Update max. + let current_max = max_estimates.get(resource); + if current_max.is_none() + || estimate.recommended_priority_fee > current_max.unwrap().recommended_priority_fee + { + max_estimates.set(resource, estimate.clone()); + } + } + } + + (min_estimates, max_estimates) +} + +#[cfg(test)] +mod tests { + use alloy_primitives::B256; + + use super::*; + + const DEFAULT_FEE: U256 = U256::from_limbs([1, 0, 0, 0]); // 1 wei + + fn tx(priority: u64, usage: u64) -> MeteredTransaction { + let mut hash_bytes = [0u8; 32]; + hash_bytes[24..].copy_from_slice(&priority.to_be_bytes()); + MeteredTransaction { + tx_hash: B256::new(hash_bytes), + priority_fee_per_gas: U256::from(priority), + gas_used: usage, + execution_time_us: usage as u128, + state_root_time_us: usage as u128, + data_availability_bytes: usage, + } + } + + #[test] + fn compute_estimate_congested_resource() { + // Limit: 30, Demand: 15 + // Transactions: priority=10 (10 gas), priority=5 (10 gas), priority=2 (10 gas) + // Walking from top (highest fee): + // - Include tx priority=10: remaining = 30-10 = 20 >= 15 ok + // - Include tx priority=5: remaining = 20-10 = 10 < 15 stop + // Threshold = 10 (the last included tx's fee) + let txs = vec![tx(10, 10), tx(5, 10), tx(2, 10)]; + let txs_refs: Vec<&MeteredTransaction> = txs.iter().collect(); + let quote = compute_estimate( + ResourceKind::GasUsed, + &txs_refs, + 15, + 30, // limit + usage_extractor(ResourceKind::GasUsed), + 0.5, + DEFAULT_FEE, + ) + .expect("no error"); + assert_eq!(quote.threshold_priority_fee, U256::from(10)); + assert_eq!(quote.cumulative_usage, 10); // Only the first tx was included + assert_eq!(quote.threshold_tx_count, 1); + assert_eq!(quote.total_transactions, 3); + } + + #[test] + fn compute_estimate_uncongested_resource() { + // Limit: 100, Demand: 15 + // All transactions fit with room to spare -> return default fee + let txs = vec![tx(10, 10), tx(5, 10), tx(2, 10)]; + let txs_refs: Vec<&MeteredTransaction> = txs.iter().collect(); + let quote = compute_estimate( + ResourceKind::GasUsed, + &txs_refs, + 15, + 100, // limit is much larger than total usage + usage_extractor(ResourceKind::GasUsed), + 0.5, + DEFAULT_FEE, + ) + .expect("no error"); + assert_eq!(quote.threshold_priority_fee, DEFAULT_FEE); + assert_eq!(quote.recommended_priority_fee, DEFAULT_FEE); + assert_eq!(quote.cumulative_usage, 30); // All txs included + assert_eq!(quote.threshold_tx_count, 3); + } + + #[test] + fn compute_estimate_demand_exceeds_limit() { + // Demand > Limit -> Error + let txs = vec![tx(10, 10), tx(5, 10)]; + let txs_refs: Vec<&MeteredTransaction> = txs.iter().collect(); + let result = compute_estimate( + ResourceKind::GasUsed, + &txs_refs, + 50, // demand + 30, // limit + usage_extractor(ResourceKind::GasUsed), + 0.5, + DEFAULT_FEE, + ); + assert!(matches!( + result, + Err(EstimateError::DemandExceedsCapacity { + resource: ResourceKind::GasUsed, + demand: 50, + limit: 30, + }) + )); + } + + #[test] + fn compute_estimate_exact_fit() { + // Limit: 30, Demand: 20 + // Transactions: priority=10 (10 gas), priority=5 (10 gas) + // After including tx priority=10: remaining = 20 >= 20 ok + // After including tx priority=5: remaining = 10 < 20 stop + let txs = vec![tx(10, 10), tx(5, 10)]; + let txs_refs: Vec<&MeteredTransaction> = txs.iter().collect(); + let quote = compute_estimate( + ResourceKind::GasUsed, + &txs_refs, + 20, + 30, + usage_extractor(ResourceKind::GasUsed), + 0.5, + DEFAULT_FEE, + ) + .expect("no error"); + assert_eq!(quote.threshold_priority_fee, U256::from(10)); + assert_eq!(quote.cumulative_usage, 10); + assert_eq!(quote.threshold_tx_count, 1); + } + + #[test] + fn compute_estimate_single_transaction() { + // Single tx that fits + let txs = vec![tx(10, 10)]; + let txs_refs: Vec<&MeteredTransaction> = txs.iter().collect(); + let quote = compute_estimate( + ResourceKind::GasUsed, + &txs_refs, + 15, + 30, + usage_extractor(ResourceKind::GasUsed), + 0.5, + DEFAULT_FEE, + ) + .expect("no error"); + // After including the tx: remaining = 20 >= 15 ok + // But we only have 1 tx, so it's uncongested + assert_eq!(quote.threshold_priority_fee, DEFAULT_FEE); + assert_eq!(quote.recommended_priority_fee, DEFAULT_FEE); + } + + #[test] + fn compute_estimate_no_room_for_any_tx() { + // Limit: 25, Demand: 20 + // First tx uses 10, remaining = 15 < 20 -> can't even include first tx + let txs = vec![tx(10, 10), tx(5, 10)]; + let txs_refs: Vec<&MeteredTransaction> = txs.iter().collect(); + let quote = compute_estimate( + ResourceKind::GasUsed, + &txs_refs, + 20, + 25, + usage_extractor(ResourceKind::GasUsed), + 0.5, + DEFAULT_FEE, + ) + .expect("no error"); + // No transactions can be included, threshold is the highest fee + assert_eq!(quote.threshold_priority_fee, U256::from(10)); + assert_eq!(quote.threshold_tx_count, 0); + assert_eq!(quote.cumulative_usage, 0); + } +} diff --git a/crates/rpc/src/base/mod.rs b/crates/rpc/src/base/mod.rs index b4097c40..99ced898 100644 --- a/crates/rpc/src/base/mod.rs +++ b/crates/rpc/src/base/mod.rs @@ -1,3 +1,5 @@ +pub mod cache; +pub mod estimator; pub(crate) mod meter; pub(crate) mod meter_rpc; pub(crate) mod pubsub; diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index 5a5db0a0..8e4c321a 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -8,6 +8,12 @@ pub use tips_core::types::{Bundle, MeterBundleResponse, TransactionResult}; mod base; pub use base::{ + cache::{BlockMetrics, FlashblockMetrics, MeteredTransaction, MeteringCache, ResourceTotals}, + estimator::{ + BlockPriorityEstimates, EstimateError, FlashblockResourceEstimates, PriorityFeeEstimator, + ResourceDemand, ResourceEstimate, ResourceEstimates, ResourceKind, ResourceLimits, + RollingPriorityEstimates, + }, meter::meter_bundle, meter_rpc::MeteringApiImpl, pubsub::{EthPubSub, EthPubSubApiServer}, From 9a06ab0eb8afdf84f4eb859031ef17e24a8387a2 Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Mon, 22 Dec 2025 17:29:31 -0600 Subject: [PATCH 02/14] Add Kafka consumer and resource annotator for metering pipeline - kafka.rs: KafkaBundleConsumer reads accepted bundle events from Kafka and extracts per-transaction metering data including gas, execution time, and data availability bytes - annotator.rs: ResourceAnnotator correlates Kafka metering data with flashblock inclusion events to populate the metering cache with properly indexed transaction data --- Cargo.lock | 44 ++++++++ crates/rpc/Cargo.toml | 9 ++ crates/rpc/src/base/annotator.rs | 143 ++++++++++++++++++++++++ crates/rpc/src/base/kafka.rs | 182 +++++++++++++++++++++++++++++++ crates/rpc/src/base/mod.rs | 2 + crates/rpc/src/lib.rs | 2 + 6 files changed, 382 insertions(+) create mode 100644 crates/rpc/src/base/annotator.rs create mode 100644 crates/rpc/src/base/kafka.rs diff --git a/Cargo.lock b/Cargo.lock index 8420ea55..56d5a385 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1597,6 +1597,7 @@ dependencies = [ "base-flashtypes", "base-reth-flashblocks", "base-reth-test-utils", + "chrono", "eyre", "futures-util", "httpmock", @@ -1606,10 +1607,12 @@ dependencies = [ "metrics", "metrics-derive", "op-alloy-consensus", + "op-alloy-flz", "op-alloy-network", "op-alloy-rpc-types", "parking_lot", "rand 0.9.2", + "rdkafka", "reth", "reth-db", "reth-db-common", @@ -5914,6 +5917,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-src" +version = "300.5.4+3.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507b3792995dae9b0df8a1c1e3771e8418b7c2d9f0baeba32e6fe8b06c7cb72" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.111" @@ -5922,6 +5934,7 @@ checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" dependencies = [ "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] @@ -6761,6 +6774,37 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rdkafka" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b52c81ac3cac39c9639b95c20452076e74b8d9a71bc6fc4d83407af2ea6fff" +dependencies = [ + "futures-channel", + "futures-util", + "libc", + "log", + "rdkafka-sys", + "serde", + "serde_derive", + "serde_json", + "slab", + "tokio", +] + +[[package]] +name = "rdkafka-sys" +version = "4.9.0+2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5230dca48bc354d718269f3e4353280e188b610f7af7e2fcf54b7a79d5802872" +dependencies = [ + "libc", + "libz-sys", + "num_enum", + "openssl-sys", + "pkg-config", +] + [[package]] name = "recvmsg" version = "1.0.0" diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 2646e72c..21172426 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -51,6 +51,7 @@ jsonrpsee-types.workspace = true tracing.workspace = true eyre.workspace = true serde.workspace = true +serde_json.workspace = true metrics.workspace = true metrics-derive.workspace = true parking_lot.workspace = true @@ -59,6 +60,14 @@ indexmap.workspace = true # priority fee estimation reth-optimism-payload-builder.workspace = true +# Kafka consumer +rdkafka.workspace = true +chrono.workspace = true + +# DA calculation +op-alloy-flz.workspace = true +op-alloy-consensus.workspace = true + [dev-dependencies] base-flashtypes.workspace = true reth-optimism-primitives.workspace = true diff --git a/crates/rpc/src/base/annotator.rs b/crates/rpc/src/base/annotator.rs new file mode 100644 index 00000000..5d9c8a5a --- /dev/null +++ b/crates/rpc/src/base/annotator.rs @@ -0,0 +1,143 @@ +//! Resource annotator that correlates Kafka metering data with flashblock inclusions. + +use std::sync::Arc; + +use alloy_primitives::TxHash; +use parking_lot::RwLock; +use tokio::sync::mpsc::UnboundedReceiver; +use tracing::{debug, info, warn}; + +use crate::{MeteredTransaction, MeteringCache}; + +/// Message received from the flashblocks websocket feed indicating which +/// transactions were included in a specific flashblock. +#[derive(Debug)] +pub struct FlashblockInclusion { + /// Block number. + pub block_number: u64, + /// Flashblock index within the block. + pub flashblock_index: u64, + /// Tx hashes included in this flashblock. + pub ordered_tx_hashes: Vec, +} + +/// Maximum number of pending transactions before oldest entries are evicted. +const MAX_PENDING_TRANSACTIONS: usize = 10_000; + +/// Annotates flashblock transactions with their resource usage. +/// +/// The flow is: +/// 1. Kafka sends `MeteredTransaction` with resource usage data keyed by tx hash +/// 2. These are stored in a pending lookup table +/// 3. Websocket sends `FlashblockInclusion` with actual (block, flashblock) location +/// 4. We look up pending transactions and insert them into the cache at the real location +pub struct ResourceAnnotator { + cache: Arc>, + tx_updates_rx: UnboundedReceiver, + flashblock_rx: UnboundedReceiver, + /// Pending metering data awaiting flashblock inclusion confirmation. + /// Uses IndexMap to maintain insertion order for FIFO eviction. + pending_transactions: indexmap::IndexMap, +} + +impl ResourceAnnotator { + /// Creates a new resource annotator. + pub fn new( + cache: Arc>, + tx_updates_rx: UnboundedReceiver, + flashblock_rx: UnboundedReceiver, + ) -> Self { + Self { + cache, + tx_updates_rx, + flashblock_rx, + pending_transactions: indexmap::IndexMap::new(), + } + } + + /// Runs the annotator until both channels are closed. + pub async fn run(mut self) { + info!(target: "metering::annotator", "Starting ResourceAnnotator"); + loop { + tokio::select! { + Some(tx_event) = self.tx_updates_rx.recv() => { + self.handle_tx_event(tx_event); + } + Some(flashblock_event) = self.flashblock_rx.recv() => { + self.handle_flashblock_event(flashblock_event); + } + else => { + info!(target: "metering::annotator", "ResourceAnnotator terminating"); + break; + } + } + } + } + + fn handle_tx_event(&mut self, tx: MeteredTransaction) { + debug!( + tx_hash = %tx.tx_hash, + gas_used = tx.gas_used, + "Storing metered transaction in pending map" + ); + self.pending_transactions.insert(tx.tx_hash, tx); + + // Evict oldest entries if we exceed the limit. + while self.pending_transactions.len() > MAX_PENDING_TRANSACTIONS { + if let Some((evicted_hash, _)) = self.pending_transactions.shift_remove_index(0) { + info!( + tx_hash = %evicted_hash, + "Evicting old transaction from pending map (limit exceeded)" + ); + metrics::counter!("metering.pending.evicted").increment(1); + } + } + + metrics::gauge!("metering.pending.size").set(self.pending_transactions.len() as f64); + metrics::counter!("metering.kafka.tx_events_total").increment(1); + } + + fn handle_flashblock_event(&mut self, event: FlashblockInclusion) { + metrics::counter!("metering.streams.flashblocks_total").increment(1); + + let mut matched = 0usize; + let mut missed = 0usize; + + { + let mut cache = self.cache.write(); + for tx_hash in &event.ordered_tx_hashes { + if let Some(tx) = self.pending_transactions.shift_remove(tx_hash) { + cache.upsert_transaction(event.block_number, event.flashblock_index, tx); + matched += 1; + } else { + missed += 1; + } + } + } + + if matched > 0 { + debug!( + block_number = event.block_number, + flashblock_index = event.flashblock_index, + matched, + "Inserted transactions into cache from flashblock" + ); + } + + // All transactions should come through as bundles. Any misses indicate + // the Kafka event hasn't arrived yet or was lost. + if missed > 0 { + warn!( + block_number = event.block_number, + flashblock_index = event.flashblock_index, + matched, + missed, + "Flashblock contained transactions not found in pending map" + ); + metrics::counter!("metering.streams.tx_misses_total").increment(missed as u64); + } + + metrics::gauge!("metering.pending.size").set(self.pending_transactions.len() as f64); + metrics::counter!("metering.streams.tx_matched_total").increment(matched as u64); + } +} diff --git a/crates/rpc/src/base/kafka.rs b/crates/rpc/src/base/kafka.rs new file mode 100644 index 00000000..d943976e --- /dev/null +++ b/crates/rpc/src/base/kafka.rs @@ -0,0 +1,182 @@ +//! Kafka consumer for accepted bundle events. + +use std::time::Duration; + +use alloy_consensus::{Transaction, transaction::Recovered}; +use alloy_eips::Encodable2718; +use alloy_primitives::U256; +use chrono::Utc; +use eyre::Result; +use op_alloy_consensus::OpTxEnvelope; +use op_alloy_flz::tx_estimated_size_fjord_bytes; +use rdkafka::{ + ClientConfig, Message, + consumer::{CommitMode, Consumer, StreamConsumer}, +}; +use tips_core::types::AcceptedBundle; +use tokio::{sync::mpsc::UnboundedSender, time::sleep}; +use tracing::{debug, error, info, trace, warn}; + +use crate::MeteredTransaction; + +/// Configuration required to connect to the Kafka topic publishing accepted bundles. +#[derive(Debug)] +pub struct KafkaBundleConsumerConfig { + /// Kafka client configuration. + pub client_config: ClientConfig, + /// Topic name. + pub topic: String, +} + +/// Maximum backoff delay for Kafka receive errors. +const MAX_BACKOFF_SECS: u64 = 60; + +/// Consumes `AcceptedBundle` events from Kafka and publishes transaction-level metering data. +#[derive(Debug)] +pub struct KafkaBundleConsumer { + consumer: StreamConsumer, + tx_sender: UnboundedSender, + topic: String, +} + +impl KafkaBundleConsumer { + /// Creates a new Kafka bundle consumer. + pub fn new( + config: KafkaBundleConsumerConfig, + tx_sender: UnboundedSender, + ) -> Result { + let KafkaBundleConsumerConfig { client_config, topic } = config; + + let consumer: StreamConsumer = client_config.create()?; + consumer.subscribe(&[topic.as_str()])?; + + Ok(Self { consumer, tx_sender, topic }) + } + + /// Starts listening for Kafka messages until the task is cancelled. + pub async fn run(self) { + info!( + target: "metering::kafka", + topic = %self.topic, + "Starting Kafka bundle consumer" + ); + + let mut backoff_secs = 1u64; + + loop { + match self.consumer.recv().await { + Ok(message) => { + // Reset backoff on successful receive. + backoff_secs = 1; + if let Err(err) = self.handle_message(message).await { + error!(target: "metering::kafka", error = %err, "Failed to process Kafka message"); + metrics::counter!("metering.kafka.errors_total").increment(1); + } + } + Err(err) => { + error!( + target: "metering::kafka", + error = %err, + backoff_secs, + "Kafka receive error for topic {}. Retrying after backoff", + self.topic + ); + metrics::counter!("metering.kafka.errors_total").increment(1); + sleep(Duration::from_secs(backoff_secs)).await; + backoff_secs = (backoff_secs * 2).min(MAX_BACKOFF_SECS); + } + } + } + } + + async fn handle_message(&self, message: rdkafka::message::BorrowedMessage<'_>) -> Result<()> { + let payload = + message.payload().ok_or_else(|| eyre::eyre!("Kafka message missing payload"))?; + + let bundle: AcceptedBundle = serde_json::from_slice(payload)?; + metrics::counter!("metering.kafka.messages_total").increment(1); + + if let Some(ts) = message.timestamp().to_millis() { + let now_ms = Utc::now().timestamp_millis(); + let lag_ms = now_ms.saturating_sub(ts); + metrics::gauge!("metering.kafka.lag_ms").set(lag_ms as f64); + } + + debug!( + target: "metering::kafka", + block_number = bundle.block_number, + uuid = %bundle.uuid(), + tx_count = bundle.txs.len(), + "Received accepted bundle from Kafka" + ); + + self.publish_transactions(&bundle)?; + + // Best-effort asynchronous commit. + if let Err(err) = self.consumer.commit_message(&message, CommitMode::Async) { + warn!( + target: "metering::kafka", + error = %err, + "Failed to commit Kafka offset asynchronously" + ); + metrics::counter!("metering.kafka.errors_total").increment(1); + } + + Ok(()) + } + + fn publish_transactions(&self, bundle: &AcceptedBundle) -> Result<()> { + if bundle.txs.len() != bundle.meter_bundle_response.results.len() { + warn!( + target: "metering::kafka", + bundle_uuid = %bundle.uuid(), + tx_count = bundle.txs.len(), + result_count = bundle.meter_bundle_response.results.len(), + "Bundle transactions/results length mismatch; skipping" + ); + metrics::counter!("metering.kafka.messages_skipped").increment(1); + return Ok(()); + } + + for (tx, result) in bundle.txs.iter().zip(bundle.meter_bundle_response.results.iter()) { + let priority_fee_per_gas = calculate_priority_fee(tx); + let data_availability_bytes = tx_estimated_size_fjord_bytes(&tx.encoded_2718()); + + // TODO(metering): Populate state_root_time_us once the TIPS Kafka schema + // includes per-transaction state-root timing. + let metered_tx = MeteredTransaction { + tx_hash: tx.tx_hash(), + priority_fee_per_gas, + gas_used: result.gas_used, + execution_time_us: result.execution_time_us, + state_root_time_us: 0, + data_availability_bytes, + }; + + if let Err(err) = self.tx_sender.send(metered_tx) { + warn!( + target: "metering::kafka", + error = %err, + tx_hash = %tx.tx_hash(), + "Failed to send metered transaction event" + ); + metrics::counter!("metering.kafka.errors_total").increment(1); + } + } + + trace!( + target: "metering::kafka", + bundle_uuid = %bundle.uuid(), + transactions = bundle.txs.len(), + "Published metering events for bundle" + ); + + Ok(()) + } +} + +fn calculate_priority_fee(tx: &Recovered) -> U256 { + tx.max_priority_fee_per_gas() + .map(U256::from) + .unwrap_or_else(|| U256::from(tx.max_fee_per_gas())) +} diff --git a/crates/rpc/src/base/mod.rs b/crates/rpc/src/base/mod.rs index 99ced898..e6dd892a 100644 --- a/crates/rpc/src/base/mod.rs +++ b/crates/rpc/src/base/mod.rs @@ -1,5 +1,7 @@ +pub mod annotator; pub mod cache; pub mod estimator; +pub mod kafka; pub(crate) mod meter; pub(crate) mod meter_rpc; pub(crate) mod pubsub; diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index 8e4c321a..ee0a8851 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -8,12 +8,14 @@ pub use tips_core::types::{Bundle, MeterBundleResponse, TransactionResult}; mod base; pub use base::{ + annotator::{FlashblockInclusion, ResourceAnnotator}, cache::{BlockMetrics, FlashblockMetrics, MeteredTransaction, MeteringCache, ResourceTotals}, estimator::{ BlockPriorityEstimates, EstimateError, FlashblockResourceEstimates, PriorityFeeEstimator, ResourceDemand, ResourceEstimate, ResourceEstimates, ResourceKind, ResourceLimits, RollingPriorityEstimates, }, + kafka::{KafkaBundleConsumer, KafkaBundleConsumerConfig}, meter::meter_bundle, meter_rpc::MeteringApiImpl, pubsub::{EthPubSub, EthPubSubApiServer}, From 9612b642893b948dacec82be98ef66b8867a1a0d Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Mon, 22 Dec 2025 22:43:05 -0600 Subject: [PATCH 03/14] Add base_meteredPriorityFeePerGas RPC endpoint and CLI integration Wires priority fee estimation into the RPC layer: - Add MeteredPriorityFeeResponse types with resource-specific estimates - Add metered_priority_fee_per_gas method to MeteringApi trait - Update MeteringApiImpl to accept optional PriorityFeeEstimator - Add MeteringConfig, KafkaConfig, ResourceLimitsConfig to runner - Wire estimator creation in BaseRpcExtension when Kafka is configured - Add 10 CLI args for resource limits, cache size, and Kafka config --- Cargo.lock | 2 + bin/node/src/cli.rs | 82 ++++++++++- crates/rpc/src/base/kafka.rs | 1 - crates/rpc/src/base/meter_rpc.rs | 170 ++++++++++++++++------- crates/rpc/src/base/metered_fee_types.rs | 67 +++++++++ crates/rpc/src/base/mod.rs | 1 + crates/rpc/src/base/traits.rs | 13 +- crates/rpc/src/lib.rs | 1 + crates/runner/Cargo.toml | 4 + crates/runner/src/config.rs | 45 ++++++ crates/runner/src/extensions/rpc.rs | 54 +++++-- crates/runner/src/lib.rs | 5 +- 12 files changed, 382 insertions(+), 63 deletions(-) create mode 100644 crates/rpc/src/base/metered_fee_types.rs diff --git a/Cargo.lock b/Cargo.lock index 56d5a385..21628ef6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1643,6 +1643,7 @@ dependencies = [ name = "base-reth-runner" version = "0.2.1" dependencies = [ + "alloy-primitives", "base-reth-flashblocks", "base-reth-rpc", "base-tracex", @@ -1650,6 +1651,7 @@ dependencies = [ "eyre", "futures-util", "once_cell", + "parking_lot", "reth", "reth-db", "reth-exex", diff --git a/bin/node/src/cli.rs b/bin/node/src/cli.rs index 8feed7aa..b4abf499 100644 --- a/bin/node/src/cli.rs +++ b/bin/node/src/cli.rs @@ -2,12 +2,15 @@ use std::sync::Arc; -use base_reth_runner::{BaseNodeConfig, FlashblocksCell, FlashblocksConfig, TracingConfig}; +use base_reth_runner::{ + BaseNodeConfig, FlashblocksCell, FlashblocksConfig, KafkaConfig, MeteringConfig, + ResourceLimitsConfig, TracingConfig, +}; use once_cell::sync::OnceCell; use reth_optimism_node::args::RollupArgs; /// CLI Arguments -#[derive(Debug, Clone, PartialEq, Eq, clap::Args)] +#[derive(Debug, Clone, PartialEq, clap::Args)] #[command(next_help_heading = "Rollup")] pub struct Args { /// Rollup arguments @@ -40,6 +43,51 @@ pub struct Args { /// Enable metering RPC for transaction bundle simulation #[arg(long = "enable-metering", value_name = "ENABLE_METERING")] pub enable_metering: bool, + + // --- Priority fee estimation args --- + /// Kafka brokers for metering bundle events (comma-separated) + #[arg(long = "metering-kafka-brokers")] + pub metering_kafka_brokers: Option, + + /// Kafka topic for accepted bundle events + #[arg(long = "metering-kafka-topic")] + pub metering_kafka_topic: Option, + + /// Kafka consumer group ID + #[arg(long = "metering-kafka-group-id")] + pub metering_kafka_group_id: Option, + + /// Optional path to Kafka properties file + #[arg(long = "metering-kafka-properties-file")] + pub metering_kafka_properties_file: Option, + + /// Gas limit per flashblock for priority fee estimation + #[arg(long = "metering-gas-limit", default_value = "30000000")] + pub metering_gas_limit: u64, + + /// Execution time budget in microseconds per flashblock + #[arg(long = "metering-execution-time-us", default_value = "50000")] + pub metering_execution_time_us: u64, + + /// State root time budget in microseconds (optional, disabled by default) + #[arg(long = "metering-state-root-time-us")] + pub metering_state_root_time_us: Option, + + /// Data availability bytes limit per flashblock + #[arg(long = "metering-da-bytes", default_value = "120000")] + pub metering_da_bytes: u64, + + /// Percentile for recommended priority fee (0.0-1.0) + #[arg(long = "metering-priority-fee-percentile", default_value = "0.5")] + pub metering_priority_fee_percentile: f64, + + /// Default priority fee when resource is not congested (in wei) + #[arg(long = "metering-uncongested-priority-fee", default_value = "1")] + pub metering_uncongested_priority_fee: u128, + + /// Number of recent blocks to retain in metering cache + #[arg(long = "metering-cache-size", default_value = "12")] + pub metering_cache_size: usize, } impl Args { @@ -58,6 +106,35 @@ impl From for BaseNodeConfig { max_pending_blocks_depth: args.max_pending_blocks_depth, }); + // Build Kafka config if all required fields are present + let kafka = match ( + args.metering_kafka_brokers, + args.metering_kafka_topic, + args.metering_kafka_group_id, + ) { + (Some(brokers), Some(topic), Some(group_id)) => Some(KafkaConfig { + brokers, + topic, + group_id, + properties_file: args.metering_kafka_properties_file, + }), + _ => None, + }; + + let metering = MeteringConfig { + enabled: args.enable_metering, + kafka, + resource_limits: ResourceLimitsConfig { + gas_limit: args.metering_gas_limit, + execution_time_us: args.metering_execution_time_us, + state_root_time_us: args.metering_state_root_time_us, + da_bytes: args.metering_da_bytes, + }, + priority_fee_percentile: args.metering_priority_fee_percentile, + uncongested_priority_fee: args.metering_uncongested_priority_fee, + cache_size: args.metering_cache_size, + }; + Self { rollup_args: args.rollup_args, flashblocks, @@ -66,6 +143,7 @@ impl From for BaseNodeConfig { logs_enabled: args.enable_transaction_tracing_logs, }, metering_enabled: args.enable_metering, + metering, flashblocks_cell, } } diff --git a/crates/rpc/src/base/kafka.rs b/crates/rpc/src/base/kafka.rs index d943976e..59803124 100644 --- a/crates/rpc/src/base/kafka.rs +++ b/crates/rpc/src/base/kafka.rs @@ -32,7 +32,6 @@ pub struct KafkaBundleConsumerConfig { const MAX_BACKOFF_SECS: u64 = 60; /// Consumes `AcceptedBundle` events from Kafka and publishes transaction-level metering data. -#[derive(Debug)] pub struct KafkaBundleConsumer { consumer: StreamConsumer, tx_sender: UnboundedSender, diff --git a/crates/rpc/src/base/meter_rpc.rs b/crates/rpc/src/base/meter_rpc.rs index 9a99aa32..2c9a0e9f 100644 --- a/crates/rpc/src/base/meter_rpc.rs +++ b/crates/rpc/src/base/meter_rpc.rs @@ -1,19 +1,32 @@ +//! Implementation of the metering RPC API. + +use std::sync::Arc; + use alloy_consensus::Header; -use alloy_eips::BlockNumberOrTag; +use alloy_eips::{BlockNumberOrTag, Encodable2718}; use alloy_primitives::U256; -use jsonrpsee::core::{RpcResult, async_trait}; +use jsonrpsee::{ + core::{RpcResult, async_trait}, + types::{ErrorCode, ErrorObjectOwned}, +}; +use op_alloy_flz::tx_estimated_size_fjord_bytes; use reth::providers::BlockReaderIdExt; use reth_optimism_chainspec::OpChainSpec; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use tips_core::types::{Bundle, MeterBundleResponse, ParsedBundle}; -use tracing::{error, info}; +use tracing::{debug, error, info}; -use crate::{MeteringApiServer, meter_bundle}; +use crate::{ + MeteringApiServer, PriorityFeeEstimator, ResourceDemand, + base::metered_fee_types::{MeteredPriorityFeeResponse, build_priority_fee_response}, + meter_bundle, +}; /// Implementation of the metering RPC API #[derive(Debug)] pub struct MeteringApiImpl { provider: Provider, + priority_fee_estimator: Option>, } impl MeteringApiImpl @@ -23,84 +36,86 @@ where + BlockReaderIdExt
+ Clone, { - /// Creates a new instance of MeteringApi + /// Creates a new instance of MeteringApi without priority fee estimation. pub const fn new(provider: Provider) -> Self { - Self { provider } + Self { provider, priority_fee_estimator: None } } -} -#[async_trait] -impl MeteringApiServer for MeteringApiImpl -where - Provider: StateProviderFactory - + ChainSpecProvider - + BlockReaderIdExt
- + Clone - + Send - + Sync - + 'static, -{ - async fn meter_bundle(&self, bundle: Bundle) -> RpcResult { + /// Creates a new instance of MeteringApi with priority fee estimation enabled. + pub fn with_estimator( + provider: Provider, + priority_fee_estimator: Arc, + ) -> Self { + Self { provider, priority_fee_estimator: Some(priority_fee_estimator) } + } + + fn run_metering( + &self, + bundle: Bundle, + ) -> Result<(MeterBundleResponse, ResourceDemand), ErrorObjectOwned> { info!( num_transactions = &bundle.txs.len(), block_number = &bundle.block_number, "Starting bundle metering" ); - // Get the latest header let header = self .provider .sealed_header_by_number_or_tag(BlockNumberOrTag::Latest) .map_err(|e| { - jsonrpsee::types::ErrorObjectOwned::owned( - jsonrpsee::types::ErrorCode::InternalError.code(), - format!("Failed to get latest header: {}", e), + ErrorObjectOwned::owned( + ErrorCode::InternalError.code(), + format!("Failed to get latest header: {e}"), None::<()>, ) })? .ok_or_else(|| { - jsonrpsee::types::ErrorObjectOwned::owned( - jsonrpsee::types::ErrorCode::InternalError.code(), + ErrorObjectOwned::owned( + ErrorCode::InternalError.code(), "Latest block not found".to_string(), None::<()>, ) })?; let parsed_bundle = ParsedBundle::try_from(bundle).map_err(|e| { - jsonrpsee::types::ErrorObjectOwned::owned( - jsonrpsee::types::ErrorCode::InvalidParams.code(), - format!("Failed to parse bundle: {}", e), + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + format!("Failed to parse bundle: {e}"), None::<()>, ) })?; - // Get state provider for the block + let da_usage: u64 = parsed_bundle + .txs + .iter() + .map(|tx| tx_estimated_size_fjord_bytes(&tx.encoded_2718())) + .sum(); + let state_provider = self.provider.state_by_block_hash(header.hash()).map_err(|e| { error!(error = %e, "Failed to get state provider"); - jsonrpsee::types::ErrorObjectOwned::owned( - jsonrpsee::types::ErrorCode::InternalError.code(), - format!("Failed to get state provider: {}", e), + ErrorObjectOwned::owned( + ErrorCode::InternalError.code(), + format!("Failed to get state provider: {e}"), None::<()>, ) })?; - // Meter bundle using utility function + let chain_spec = self.provider.chain_spec().clone(); + let (results, total_gas_used, total_gas_fees, bundle_hash, total_execution_time) = - meter_bundle(state_provider, self.provider.chain_spec(), parsed_bundle, &header) - .map_err(|e| { - error!(error = %e, "Bundle metering failed"); - jsonrpsee::types::ErrorObjectOwned::owned( - jsonrpsee::types::ErrorCode::InternalError.code(), - format!("Bundle metering failed: {}", e), - None::<()>, - ) - })?; - - // Calculate average gas price + meter_bundle(state_provider, chain_spec, parsed_bundle, &header).map_err(|e| { + error!(error = %e, "Bundle metering failed"); + ErrorObjectOwned::owned( + ErrorCode::InternalError.code(), + format!("Bundle metering failed: {e}"), + None::<()>, + ) + })?; + let bundle_gas_price = if total_gas_used > 0 { total_gas_fees / U256::from(total_gas_used) } else { - U256::from(0) + U256::ZERO }; info!( @@ -111,17 +126,76 @@ where "Bundle metering completed successfully" ); - Ok(MeterBundleResponse { + let response = MeterBundleResponse { bundle_gas_price, bundle_hash, coinbase_diff: total_gas_fees, - eth_sent_to_coinbase: U256::from(0), + eth_sent_to_coinbase: U256::ZERO, gas_fees: total_gas_fees, results, state_block_number: header.number, state_flashblock_index: None, total_gas_used, total_execution_time_us: total_execution_time, - }) + }; + + let resource_demand = ResourceDemand { + gas_used: Some(total_gas_used), + execution_time_us: Some(total_execution_time), + state_root_time_us: None, // Populated when state-root metrics become available. + data_availability_bytes: Some(da_usage), + }; + + Ok((response, resource_demand)) + } +} + +#[async_trait] +impl MeteringApiServer for MeteringApiImpl +where + Provider: StateProviderFactory + + ChainSpecProvider + + BlockReaderIdExt
+ + Clone + + Send + + Sync + + 'static, +{ + async fn meter_bundle(&self, bundle: Bundle) -> RpcResult { + let (response, _) = self.run_metering(bundle)?; + Ok(response) + } + + async fn metered_priority_fee_per_gas( + &self, + bundle: Bundle, + ) -> RpcResult { + let (meter_bundle, resource_demand) = self.run_metering(bundle)?; + + let estimator = self.priority_fee_estimator.as_ref().ok_or_else(|| { + ErrorObjectOwned::owned( + ErrorCode::InternalError.code(), + "Priority fee estimation not enabled".to_string(), + None::<()>, + ) + })?; + + debug!(?resource_demand, "Computing priority fee estimates"); + + let estimates = estimator + .estimate_rolling(resource_demand) + .map_err(|e| { + ErrorObjectOwned::owned(ErrorCode::InvalidParams.code(), e.to_string(), None::<()>) + })? + .ok_or_else(|| { + ErrorObjectOwned::owned( + ErrorCode::InternalError.code(), + "Priority fee data unavailable".to_string(), + None::<()>, + ) + })?; + + let response = build_priority_fee_response(meter_bundle, estimates); + Ok(response) } } diff --git a/crates/rpc/src/base/metered_fee_types.rs b/crates/rpc/src/base/metered_fee_types.rs new file mode 100644 index 00000000..4d00475f --- /dev/null +++ b/crates/rpc/src/base/metered_fee_types.rs @@ -0,0 +1,67 @@ +//! Response types for the metered priority fee RPC endpoint. + +use tips_core::types::MeterBundleResponse; + +use crate::{ResourceEstimates, ResourceKind, RollingPriorityEstimates}; + +/// Human-friendly representation of a resource fee quote. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ResourceFeeEstimateResponse { + /// Resource name (gasUsed, executionTime, etc). + pub resource: String, + /// Minimum fee to displace enough capacity. + pub threshold_priority_fee: String, + /// Recommended fee with safety margin. + pub recommended_priority_fee: String, + /// Cumulative resource usage above threshold. + pub cumulative_usage: String, + /// Number of transactions above threshold. + pub threshold_tx_count: u64, + /// Total transactions considered. + pub total_transactions: u64, +} + +/// Response payload for `base_meteredPriorityFeePerGas`. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct MeteredPriorityFeeResponse { + /// Bundled metering results. + #[serde(flatten)] + pub meter_bundle: MeterBundleResponse, + /// Single recommended priority fee (max across all resources and median across recent blocks). + pub recommended_priority_fee: String, + /// Number of recent blocks used to compute the rolling estimate. + pub blocks_sampled: u64, + /// Per-resource estimates (median across sampled blocks). + pub resource_estimates: Vec, +} + +/// Converts rolling estimates to the response format. +pub fn build_priority_fee_response( + meter_bundle: MeterBundleResponse, + estimates: RollingPriorityEstimates, +) -> MeteredPriorityFeeResponse { + let resource_estimates = resource_estimates_to_vec(&estimates.estimates); + + MeteredPriorityFeeResponse { + meter_bundle, + recommended_priority_fee: estimates.recommended_priority_fee.to_string(), + blocks_sampled: estimates.blocks_sampled as u64, + resource_estimates, + } +} + +fn resource_estimates_to_vec(estimates: &ResourceEstimates) -> Vec { + estimates + .iter() + .map(|(kind, est)| ResourceFeeEstimateResponse { + resource: kind.as_camel_case().to_string(), + threshold_priority_fee: est.threshold_priority_fee.to_string(), + recommended_priority_fee: est.recommended_priority_fee.to_string(), + cumulative_usage: est.cumulative_usage.to_string(), + threshold_tx_count: est.threshold_tx_count.try_into().unwrap_or(u64::MAX), + total_transactions: est.total_transactions.try_into().unwrap_or(u64::MAX), + }) + .collect() +} diff --git a/crates/rpc/src/base/mod.rs b/crates/rpc/src/base/mod.rs index e6dd892a..515577cc 100644 --- a/crates/rpc/src/base/mod.rs +++ b/crates/rpc/src/base/mod.rs @@ -4,6 +4,7 @@ pub mod estimator; pub mod kafka; pub(crate) mod meter; pub(crate) mod meter_rpc; +pub(crate) mod metered_fee_types; pub(crate) mod pubsub; pub(crate) mod traits; pub(crate) mod transaction_rpc; diff --git a/crates/rpc/src/base/traits.rs b/crates/rpc/src/base/traits.rs index 4f80de59..33acf02c 100644 --- a/crates/rpc/src/base/traits.rs +++ b/crates/rpc/src/base/traits.rs @@ -3,7 +3,10 @@ use alloy_primitives::TxHash; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use crate::{Bundle, MeterBundleResponse, TransactionStatusResponse}; +use crate::{ + Bundle, MeterBundleResponse, TransactionStatusResponse, + base::metered_fee_types::MeteredPriorityFeeResponse, +}; /// RPC API for transaction metering #[rpc(server, namespace = "base")] @@ -11,6 +14,14 @@ pub trait MeteringApi { /// Simulates and meters a bundle of transactions #[method(name = "meterBundle")] async fn meter_bundle(&self, bundle: Bundle) -> RpcResult; + + /// Estimates the priority fee necessary for a bundle to be included in recently observed + /// flashblocks, considering multiple resource constraints. + #[method(name = "meteredPriorityFeePerGas")] + async fn metered_priority_fee_per_gas( + &self, + bundle: Bundle, + ) -> RpcResult; } /// RPC API for transaction status diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index ee0a8851..5165e035 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -18,6 +18,7 @@ pub use base::{ kafka::{KafkaBundleConsumer, KafkaBundleConsumerConfig}, meter::meter_bundle, meter_rpc::MeteringApiImpl, + metered_fee_types::{MeteredPriorityFeeResponse, ResourceFeeEstimateResponse}, pubsub::{EthPubSub, EthPubSubApiServer}, traits::{MeteringApiServer, TransactionStatusApiServer}, transaction_rpc::TransactionStatusApiImpl, diff --git a/crates/runner/Cargo.toml b/crates/runner/Cargo.toml index 8f48a83e..625bfd1b 100644 --- a/crates/runner/Cargo.toml +++ b/crates/runner/Cargo.toml @@ -24,10 +24,14 @@ reth-exex.workspace = true reth-optimism-node.workspace = true reth-optimism-chainspec.workspace = true +# alloy +alloy-primitives.workspace = true + # misc eyre.workspace = true futures-util.workspace = true once_cell.workspace = true tracing.workspace = true url.workspace = true +parking_lot.workspace = true derive_more = { workspace = true, features = ["debug"] } diff --git a/crates/runner/src/config.rs b/crates/runner/src/config.rs index 58c60a56..f3bac8ad 100644 --- a/crates/runner/src/config.rs +++ b/crates/runner/src/config.rs @@ -15,6 +15,8 @@ pub struct BaseNodeConfig { pub tracing: TracingConfig, /// Indicates whether the metering RPC surface should be installed. pub metering_enabled: bool, + /// Configuration for priority fee estimation. + pub metering: MeteringConfig, /// Shared Flashblocks state cache. pub flashblocks_cell: FlashblocksCell, } @@ -43,3 +45,46 @@ pub struct TracingConfig { /// Emits `info`-level logs for the tracing ExEx when enabled. pub logs_enabled: bool, } + +/// Configuration for priority fee estimation. +#[derive(Debug, Clone)] +pub struct MeteringConfig { + /// Whether metering is enabled. + pub enabled: bool, + /// Kafka configuration for bundle events. + pub kafka: Option, + /// Resource limits for fee estimation. + pub resource_limits: ResourceLimitsConfig, + /// Percentile for recommended priority fee (0.0-1.0). + pub priority_fee_percentile: f64, + /// Default priority fee when resource is not congested (in wei). + pub uncongested_priority_fee: u128, + /// Number of recent blocks to retain in metering cache. + pub cache_size: usize, +} + +/// Kafka connection configuration. +#[derive(Debug, Clone)] +pub struct KafkaConfig { + /// Comma-separated broker addresses. + pub brokers: String, + /// Topic name for accepted bundle events. + pub topic: String, + /// Consumer group ID. + pub group_id: String, + /// Optional path to properties file. + pub properties_file: Option, +} + +/// Resource limits for priority fee estimation. +#[derive(Debug, Clone, Copy)] +pub struct ResourceLimitsConfig { + /// Gas limit per flashblock. + pub gas_limit: u64, + /// Execution time budget in microseconds. + pub execution_time_us: u64, + /// State root time budget in microseconds (optional). + pub state_root_time_us: Option, + /// Data availability bytes limit. + pub da_bytes: u64, +} diff --git a/crates/runner/src/extensions/rpc.rs b/crates/runner/src/extensions/rpc.rs index 164a3af5..32989b55 100644 --- a/crates/runner/src/extensions/rpc.rs +++ b/crates/runner/src/extensions/rpc.rs @@ -2,16 +2,19 @@ use std::sync::Arc; +use alloy_primitives::U256; use base_reth_flashblocks::{FlashblocksState, FlashblocksSubscriber}; use base_reth_rpc::{ EthApiExt, EthApiOverrideServer, EthPubSub, EthPubSubApiServer, MeteringApiImpl, - MeteringApiServer, TransactionStatusApiImpl, TransactionStatusApiServer, + MeteringApiServer, MeteringCache, PriorityFeeEstimator, ResourceLimits, + TransactionStatusApiImpl, TransactionStatusApiServer, }; +use parking_lot::RwLock; use tracing::info; use url::Url; use crate::{ - BaseNodeConfig, FlashblocksConfig, + BaseNodeConfig, FlashblocksConfig, MeteringConfig, extensions::{BaseNodeExtension, ConfigurableBaseNodeExtension, FlashblocksCell, OpBuilder}, }; @@ -22,8 +25,8 @@ pub struct BaseRpcExtension { pub flashblocks_cell: FlashblocksCell, /// Optional Flashblocks configuration. pub flashblocks: Option, - /// Indicates whether the metering RPC surface should be installed. - pub metering_enabled: bool, + /// Full metering configuration. + pub metering: MeteringConfig, /// Sequencer RPC endpoint for transaction status proxying. pub sequencer_rpc: Option, } @@ -34,7 +37,7 @@ impl BaseRpcExtension { Self { flashblocks_cell: config.flashblocks_cell.clone(), flashblocks: config.flashblocks.clone(), - metering_enabled: config.metering_enabled, + metering: config.metering.clone(), sequencer_rpc: config.rollup_args.sequencer.clone(), } } @@ -45,13 +48,43 @@ impl BaseNodeExtension for BaseRpcExtension { fn apply(&self, builder: OpBuilder) -> OpBuilder { let flashblocks_cell = self.flashblocks_cell.clone(); let flashblocks = self.flashblocks.clone(); - let metering_enabled = self.metering_enabled; + let metering = self.metering.clone(); let sequencer_rpc = self.sequencer_rpc.clone(); builder.extend_rpc_modules(move |ctx| { - if metering_enabled { + if metering.enabled { info!(message = "Starting Metering RPC"); - let metering_api = MeteringApiImpl::new(ctx.provider().clone()); + + // Create priority fee estimator if configured + let estimator = if metering.kafka.is_some() { + info!(message = "Enabling priority fee estimation"); + let cache = Arc::new(RwLock::new(MeteringCache::new(metering.cache_size))); + let limits = ResourceLimits { + gas_used: Some(metering.resource_limits.gas_limit), + execution_time_us: Some(metering.resource_limits.execution_time_us as u128), + state_root_time_us: metering + .resource_limits + .state_root_time_us + .map(|v| v as u128), + data_availability_bytes: Some(metering.resource_limits.da_bytes), + }; + let default_fee = U256::from(metering.uncongested_priority_fee); + let estimator = Arc::new(PriorityFeeEstimator::new( + cache, + metering.priority_fee_percentile, + limits, + default_fee, + None, // Dynamic DA config not wired yet + )); + Some(estimator) + } else { + None + }; + + let metering_api = estimator.map_or_else( + || MeteringApiImpl::new(ctx.provider().clone()), + |est| MeteringApiImpl::with_estimator(ctx.provider().clone(), est), + ); ctx.modules.merge_configured(metering_api.into_rpc())?; } @@ -85,8 +118,9 @@ impl BaseNodeExtension for BaseRpcExtension { ctx.modules.replace_configured(api_ext.into_rpc())?; // Register the eth_subscribe subscription endpoint for flashblocks - // Uses replace_configured since eth_subscribe already exists from reth's standard module - // Pass eth_api to enable proxying standard subscription types to reth's implementation + // Uses replace_configured since eth_subscribe already exists from reth's standard + // module Pass eth_api to enable proxying standard subscription types to + // reth's implementation let eth_pubsub = EthPubSub::new(ctx.registry.eth_api().clone(), fb); ctx.modules.replace_configured(eth_pubsub.into_rpc())?; } else { diff --git a/crates/runner/src/lib.rs b/crates/runner/src/lib.rs index c96fadf1..c4c9587a 100644 --- a/crates/runner/src/lib.rs +++ b/crates/runner/src/lib.rs @@ -13,7 +13,10 @@ mod runner; pub use runner::BaseNodeRunner; mod config; -pub use config::{BaseNodeConfig, FlashblocksConfig, TracingConfig}; +pub use config::{ + BaseNodeConfig, FlashblocksConfig, KafkaConfig, MeteringConfig, ResourceLimitsConfig, + TracingConfig, +}; mod extensions; pub use extensions::{ From 856ebbce804c15e750cfb835ced7543a2bef6d16 Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Mon, 22 Dec 2025 22:49:53 -0600 Subject: [PATCH 04/14] Add estimator regression tests Port additional tests from original metering crate: - compute_estimate_empty_transactions: verify uncongested behavior - estimate_for_block_respects_limits: integration test for block estimates - estimate_for_block_propagates_limit_errors: verify error propagation - estimate_rolling_aggregates_across_blocks: verify rolling median calculation --- crates/rpc/src/base/estimator.rs | 104 +++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/crates/rpc/src/base/estimator.rs b/crates/rpc/src/base/estimator.rs index b14c1741..9e0b5fc1 100644 --- a/crates/rpc/src/base/estimator.rs +++ b/crates/rpc/src/base/estimator.rs @@ -798,4 +798,108 @@ mod tests { assert_eq!(quote.threshold_tx_count, 0); assert_eq!(quote.cumulative_usage, 0); } + + #[test] + fn compute_estimate_empty_transactions() { + // No transactions = uncongested, return default fee + let txs_refs: Vec<&MeteredTransaction> = vec![]; + let quote = compute_estimate( + ResourceKind::GasUsed, + &txs_refs, + 15, + 30, + usage_extractor(ResourceKind::GasUsed), + 0.5, + DEFAULT_FEE, + ) + .expect("no error"); + assert_eq!(quote.threshold_priority_fee, DEFAULT_FEE); + assert_eq!(quote.recommended_priority_fee, DEFAULT_FEE); + } + + const DEFAULT_LIMITS: ResourceLimits = ResourceLimits { + gas_used: Some(25), + execution_time_us: Some(100), + state_root_time_us: None, + data_availability_bytes: Some(100), + }; + + fn setup_estimator( + limits: ResourceLimits, + ) -> (Arc>, PriorityFeeEstimator) { + let cache = Arc::new(RwLock::new(MeteringCache::new(4))); + let estimator = PriorityFeeEstimator::new(cache.clone(), 0.5, limits, DEFAULT_FEE, None); + (cache, estimator) + } + + #[test] + fn estimate_for_block_respects_limits() { + let (cache, estimator) = setup_estimator(DEFAULT_LIMITS); + { + let mut guard = cache.write(); + guard.upsert_transaction(1, 0, tx(10, 10)); + guard.upsert_transaction(1, 0, tx(5, 10)); + } + let mut demand = ResourceDemand::default(); + demand.gas_used = Some(15); + + let estimates = + estimator.estimate_for_block(Some(1), demand).expect("no error").expect("cached block"); + + assert_eq!(estimates.block_number, 1); + let gas_estimate = estimates.max_across_flashblocks.gas_used.expect("gas estimate present"); + assert_eq!(gas_estimate.threshold_priority_fee, U256::from(10)); + } + + #[test] + fn estimate_for_block_propagates_limit_errors() { + let mut limits = DEFAULT_LIMITS; + limits.gas_used = Some(10); + let (cache, estimator) = setup_estimator(limits); + { + let mut guard = cache.write(); + guard.upsert_transaction(1, 0, tx(10, 10)); + guard.upsert_transaction(1, 0, tx(5, 10)); + } + let mut demand = ResourceDemand::default(); + demand.gas_used = Some(15); + + let err = estimator + .estimate_for_block(Some(1), demand) + .expect_err("demand should exceed capacity"); + assert!(matches!( + err, + EstimateError::DemandExceedsCapacity { + resource: ResourceKind::GasUsed, + demand: 15, + limit: 10 + } + )); + } + + #[test] + fn estimate_rolling_aggregates_across_blocks() { + let (cache, estimator) = setup_estimator(DEFAULT_LIMITS); + { + let mut guard = cache.write(); + // Block 1 → threshold 10 + guard.upsert_transaction(1, 0, tx(10, 10)); + guard.upsert_transaction(1, 0, tx(5, 10)); + // Block 2 → threshold 30 + guard.upsert_transaction(2, 0, tx(30, 10)); + guard.upsert_transaction(2, 0, tx(25, 10)); + } + + let mut demand = ResourceDemand::default(); + demand.gas_used = Some(15); + + let rolling = + estimator.estimate_rolling(demand).expect("no error").expect("estimates available"); + + assert_eq!(rolling.blocks_sampled, 2); + let gas_estimate = rolling.estimates.gas_used.expect("gas estimate present"); + // Median across [10, 30] = 30 (upper median for even count) + assert_eq!(gas_estimate.recommended_priority_fee, U256::from(30)); + assert_eq!(rolling.recommended_priority_fee, U256::from(30)); + } } From c381fa97b57080beb08eb925ebaf98aaadaa339b Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Mon, 22 Dec 2025 23:06:14 -0600 Subject: [PATCH 05/14] Wire Kafka consumer and ResourceAnnotator into runner Completes the metering pipeline by: - Creating MeteringRuntime to hold cache, estimator, and channels - Spawning ResourceAnnotator task to correlate transactions with flashblocks - Spawning KafkaBundleConsumer task to consume AcceptedBundle events - Adding CompositeFlashblocksReceiver that forwards to both FlashblocksState and the metering pipeline via FlashblockInclusion events - Adding flashblock_inclusion_from_flashblock helper to extract tx hashes - Supporting Kafka properties file loading The metering cache is now populated when: 1. Kafka consumer receives AcceptedBundle events (transaction data) 2. FlashblocksSubscriber receives flashblocks (inclusion position) 3. ResourceAnnotator correlates both to update the cache --- Cargo.lock | 3 + crates/runner/Cargo.toml | 5 + crates/runner/src/extensions/rpc.rs | 233 +++++++++++++++++++++++----- 3 files changed, 206 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 21628ef6..1e7b8e86 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1644,6 +1644,7 @@ name = "base-reth-runner" version = "0.2.1" dependencies = [ "alloy-primitives", + "base-flashtypes", "base-reth-flashblocks", "base-reth-rpc", "base-tracex", @@ -1652,11 +1653,13 @@ dependencies = [ "futures-util", "once_cell", "parking_lot", + "rdkafka", "reth", "reth-db", "reth-exex", "reth-optimism-chainspec", "reth-optimism-node", + "tokio", "tracing", "url", ] diff --git a/crates/runner/Cargo.toml b/crates/runner/Cargo.toml index 625bfd1b..a07eaa9d 100644 --- a/crates/runner/Cargo.toml +++ b/crates/runner/Cargo.toml @@ -27,6 +27,9 @@ reth-optimism-chainspec.workspace = true # alloy alloy-primitives.workspace = true +# flashblocks +base-flashtypes.workspace = true + # misc eyre.workspace = true futures-util.workspace = true @@ -35,3 +38,5 @@ tracing.workspace = true url.workspace = true parking_lot.workspace = true derive_more = { workspace = true, features = ["debug"] } +rdkafka.workspace = true +tokio.workspace = true diff --git a/crates/runner/src/extensions/rpc.rs b/crates/runner/src/extensions/rpc.rs index 32989b55..696a92ed 100644 --- a/crates/runner/src/extensions/rpc.rs +++ b/crates/runner/src/extensions/rpc.rs @@ -2,15 +2,19 @@ use std::sync::Arc; -use alloy_primitives::U256; -use base_reth_flashblocks::{FlashblocksState, FlashblocksSubscriber}; +use alloy_primitives::{B256, U256, keccak256}; +use base_flashtypes::Flashblock; +use base_reth_flashblocks::{FlashblocksReceiver, FlashblocksState, FlashblocksSubscriber}; use base_reth_rpc::{ - EthApiExt, EthApiOverrideServer, EthPubSub, EthPubSubApiServer, MeteringApiImpl, - MeteringApiServer, MeteringCache, PriorityFeeEstimator, ResourceLimits, + EthApiExt, EthApiOverrideServer, EthPubSub, EthPubSubApiServer, FlashblockInclusion, + KafkaBundleConsumer, KafkaBundleConsumerConfig, MeteredTransaction, MeteringApiImpl, + MeteringApiServer, MeteringCache, PriorityFeeEstimator, ResourceAnnotator, ResourceLimits, TransactionStatusApiImpl, TransactionStatusApiServer, }; use parking_lot::RwLock; -use tracing::info; +use rdkafka::ClientConfig; +use tokio::sync::mpsc; +use tracing::{error, info, warn}; use url::Url; use crate::{ @@ -18,6 +22,95 @@ use crate::{ extensions::{BaseNodeExtension, ConfigurableBaseNodeExtension, FlashblocksCell, OpBuilder}, }; +/// Runtime state for the metering pipeline. +#[derive(Clone)] +struct MeteringRuntime { + /// Shared cache for metered transactions. + cache: Arc>, + /// Priority fee estimator. + estimator: Arc, + /// Sender for metered transactions from Kafka. + tx_sender: mpsc::UnboundedSender, + /// Sender for flashblock inclusions. + flashblock_sender: mpsc::UnboundedSender, +} + +/// Composite receiver that forwards flashblocks to both FlashblocksState and the metering pipeline. +struct CompositeFlashblocksReceiver { + state: Arc>, + /// Optional channel for the metering pipeline; flashblocks RPC still needs the stream even + /// when metering is disabled, so we only forward inclusions if a sender is provided. + metering_sender: Option>, +} + +impl CompositeFlashblocksReceiver { + fn new( + state: Arc>, + metering_sender: Option>, + ) -> Self { + Self { state, metering_sender } + } +} + +impl FlashblocksReceiver for CompositeFlashblocksReceiver +where + FlashblocksState: FlashblocksReceiver, +{ + fn on_flashblock_received(&self, flashblock: Flashblock) { + // Forward to the state first + self.state.on_flashblock_received(flashblock.clone()); + + // Then forward to metering if enabled + let Some(sender) = &self.metering_sender else { + return; + }; + let Some(inclusion) = flashblock_inclusion_from_flashblock(&flashblock) else { + return; + }; + + if sender.send(inclusion).is_err() { + warn!( + target: "metering::flashblocks", + "Failed to forward flashblock inclusion to metering" + ); + } + } +} + +/// Converts a flashblock to a FlashblockInclusion for the metering pipeline. +fn flashblock_inclusion_from_flashblock(flashblock: &Flashblock) -> Option { + if flashblock.diff.transactions.is_empty() { + return None; + } + + let ordered_tx_hashes: Vec = + flashblock.diff.transactions.iter().map(|tx_bytes| keccak256(tx_bytes)).collect(); + + Some(FlashblockInclusion { + block_number: flashblock.metadata.block_number, + flashblock_index: flashblock.index, + ordered_tx_hashes, + }) +} + +/// Loads Kafka configuration from a properties file. +fn load_kafka_config_from_file( + path: &str, +) -> Result, Box> { + let content = std::fs::read_to_string(path)?; + let mut props = Vec::new(); + for line in content.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + if let Some((key, value)) = line.split_once('=') { + props.push((key.trim().to_string(), value.trim().to_string())); + } + } + Ok(props) +} + /// Helper struct that wires the custom RPC modules into the node builder. #[derive(Debug, Clone)] pub struct BaseRpcExtension { @@ -52,38 +145,102 @@ impl BaseNodeExtension for BaseRpcExtension { let sequencer_rpc = self.sequencer_rpc.clone(); builder.extend_rpc_modules(move |ctx| { - if metering.enabled { - info!(message = "Starting Metering RPC"); - - // Create priority fee estimator if configured - let estimator = if metering.kafka.is_some() { - info!(message = "Enabling priority fee estimation"); - let cache = Arc::new(RwLock::new(MeteringCache::new(metering.cache_size))); - let limits = ResourceLimits { - gas_used: Some(metering.resource_limits.gas_limit), - execution_time_us: Some(metering.resource_limits.execution_time_us as u128), - state_root_time_us: metering - .resource_limits - .state_root_time_us - .map(|v| v as u128), - data_availability_bytes: Some(metering.resource_limits.da_bytes), - }; - let default_fee = U256::from(metering.uncongested_priority_fee); - let estimator = Arc::new(PriorityFeeEstimator::new( - cache, - metering.priority_fee_percentile, - limits, - default_fee, - None, // Dynamic DA config not wired yet - )); - Some(estimator) - } else { - None + // Set up metering runtime if enabled with Kafka + let metering_runtime = if metering.enabled && metering.kafka.is_some() { + info!(message = "Starting Metering RPC with priority fee estimation"); + + let cache = Arc::new(RwLock::new(MeteringCache::new(metering.cache_size))); + let limits = ResourceLimits { + gas_used: Some(metering.resource_limits.gas_limit), + execution_time_us: Some(metering.resource_limits.execution_time_us as u128), + state_root_time_us: metering + .resource_limits + .state_root_time_us + .map(|v| v as u128), + data_availability_bytes: Some(metering.resource_limits.da_bytes), }; + let default_fee = U256::from(metering.uncongested_priority_fee); + let estimator = Arc::new(PriorityFeeEstimator::new( + cache.clone(), + metering.priority_fee_percentile, + limits, + default_fee, + None, // Dynamic DA config not wired yet + )); + + // Create channels for the annotator + let (tx_sender, tx_receiver) = mpsc::unbounded_channel::(); + let (flashblock_sender, flashblock_receiver) = + mpsc::unbounded_channel::(); + + // Spawn the resource annotator + let annotator_cache = cache.clone(); + tokio::spawn(async move { + ResourceAnnotator::new(annotator_cache, tx_receiver, flashblock_receiver) + .run() + .await; + }); + + Some(MeteringRuntime { cache, estimator, tx_sender, flashblock_sender }) + } else { + None + }; - let metering_api = estimator.map_or_else( + // Spawn Kafka consumer if configured + if let (Some(runtime), Some(kafka_cfg)) = (&metering_runtime, &metering.kafka) { + info!(message = "Starting Kafka consumer for metering"); + + let mut client_config = ClientConfig::new(); + client_config.set("bootstrap.servers", &kafka_cfg.brokers); + client_config.set("group.id", &kafka_cfg.group_id); + client_config.set("enable.partition.eof", "false"); + client_config.set("session.timeout.ms", "6000"); + client_config.set("enable.auto.commit", "true"); + client_config.set("auto.offset.reset", "earliest"); + + if let Some(path) = kafka_cfg.properties_file.as_ref() { + match load_kafka_config_from_file(path) { + Ok(props) => { + for (key, value) in props { + client_config.set(key, value); + } + } + Err(err) => { + warn!( + message = "Failed to load Kafka properties file", + file = %path, + %err + ); + } + } + } + + let tx_sender = runtime.tx_sender.clone(); + let topic = kafka_cfg.topic.clone(); + tokio::spawn(async move { + let config = KafkaBundleConsumerConfig { client_config, topic }; + + match KafkaBundleConsumer::new(config, tx_sender) { + Ok(consumer) => consumer.run().await, + Err(err) => error!( + target: "metering::kafka", + %err, + "Failed to initialize Kafka consumer" + ), + } + }); + } + + // Register metering RPC + if metering.enabled { + let metering_api = metering_runtime.as_ref().map_or_else( || MeteringApiImpl::new(ctx.provider().clone()), - |est| MeteringApiImpl::with_estimator(ctx.provider().clone(), est), + |rt| { + MeteringApiImpl::with_estimator( + ctx.provider().clone(), + rt.estimator.clone(), + ) + }, ); ctx.modules.merge_configured(metering_api.into_rpc())?; } @@ -107,7 +264,13 @@ impl BaseNodeExtension for BaseRpcExtension { .clone(); fb.start(); - let mut flashblocks_client = FlashblocksSubscriber::new(fb.clone(), ws_url); + // Create composite receiver that forwards to both flashblocks state and metering + let metering_sender = + metering_runtime.as_ref().map(|rt| rt.flashblock_sender.clone()); + let receiver = + Arc::new(CompositeFlashblocksReceiver::new(fb.clone(), metering_sender)); + + let mut flashblocks_client = FlashblocksSubscriber::new(receiver, ws_url); flashblocks_client.start(); let api_ext = EthApiExt::new( From 5eed365325b682d5b35e0148540e91d4060181b4 Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Tue, 23 Dec 2025 01:35:45 -0600 Subject: [PATCH 06/14] Wire OpDAConfig to priority fee estimator - Add Kafka startup warning when metering is enabled but Kafka is not configured - Wire shared OpDAConfig from BaseNodeConfig to OpNode and PriorityFeeEstimator so that miner_setMaxDASize affects priority fee estimation - Add doc comment for metering-da-bytes explaining dynamic override capability - Add reth-optimism-payload-builder dependency to runner and node crates --- Cargo.lock | 2 ++ bin/node/Cargo.toml | 1 + bin/node/src/cli.rs | 9 ++++++++- crates/runner/Cargo.toml | 1 + crates/runner/src/config.rs | 3 +++ crates/runner/src/extensions/rpc.rs | 15 ++++++++++++++- crates/runner/src/runner.rs | 3 ++- 7 files changed, 31 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1e7b8e86..d1557557 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1576,6 +1576,7 @@ dependencies = [ "reth-cli-util", "reth-optimism-cli", "reth-optimism-node", + "reth-optimism-payload-builder", ] [[package]] @@ -1659,6 +1660,7 @@ dependencies = [ "reth-exex", "reth-optimism-chainspec", "reth-optimism-node", + "reth-optimism-payload-builder", "tokio", "tracing", "url", diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index 2782995f..bc760def 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -21,6 +21,7 @@ base-reth-runner.workspace = true reth-optimism-node.workspace = true reth-optimism-cli.workspace = true reth-cli-util.workspace = true +reth-optimism-payload-builder.workspace = true # misc clap.workspace = true diff --git a/bin/node/src/cli.rs b/bin/node/src/cli.rs index b4abf499..c944f687 100644 --- a/bin/node/src/cli.rs +++ b/bin/node/src/cli.rs @@ -8,6 +8,7 @@ use base_reth_runner::{ }; use once_cell::sync::OnceCell; use reth_optimism_node::args::RollupArgs; +use reth_optimism_payload_builder::config::OpDAConfig; /// CLI Arguments #[derive(Debug, Clone, PartialEq, clap::Args)] @@ -73,7 +74,8 @@ pub struct Args { #[arg(long = "metering-state-root-time-us")] pub metering_state_root_time_us: Option, - /// Data availability bytes limit per flashblock + /// Data availability bytes limit per flashblock (default). + /// This value is used when `miner_setMaxDASize` has not been called. #[arg(long = "metering-da-bytes", default_value = "120000")] pub metering_da_bytes: u64, @@ -135,6 +137,10 @@ impl From for BaseNodeConfig { cache_size: args.metering_cache_size, }; + // Create shared DA config. This is shared between the payload builder and the + // priority fee estimator, allowing miner_setMaxDASize to affect both. + let da_config = OpDAConfig::default(); + Self { rollup_args: args.rollup_args, flashblocks, @@ -145,6 +151,7 @@ impl From for BaseNodeConfig { metering_enabled: args.enable_metering, metering, flashblocks_cell, + da_config, } } } diff --git a/crates/runner/Cargo.toml b/crates/runner/Cargo.toml index a07eaa9d..59b887e2 100644 --- a/crates/runner/Cargo.toml +++ b/crates/runner/Cargo.toml @@ -23,6 +23,7 @@ reth-db.workspace = true reth-exex.workspace = true reth-optimism-node.workspace = true reth-optimism-chainspec.workspace = true +reth-optimism-payload-builder.workspace = true # alloy alloy-primitives.workspace = true diff --git a/crates/runner/src/config.rs b/crates/runner/src/config.rs index f3bac8ad..a79ea54f 100644 --- a/crates/runner/src/config.rs +++ b/crates/runner/src/config.rs @@ -1,6 +1,7 @@ //! Contains the Base node configuration structures. use reth_optimism_node::args::RollupArgs; +use reth_optimism_payload_builder::config::OpDAConfig; use crate::extensions::FlashblocksCell; @@ -19,6 +20,8 @@ pub struct BaseNodeConfig { pub metering: MeteringConfig, /// Shared Flashblocks state cache. pub flashblocks_cell: FlashblocksCell, + /// Shared DA config for dynamic updates via `miner_setMaxDASize`. + pub da_config: OpDAConfig, } impl BaseNodeConfig { diff --git a/crates/runner/src/extensions/rpc.rs b/crates/runner/src/extensions/rpc.rs index 696a92ed..3a4e99c1 100644 --- a/crates/runner/src/extensions/rpc.rs +++ b/crates/runner/src/extensions/rpc.rs @@ -13,6 +13,7 @@ use base_reth_rpc::{ }; use parking_lot::RwLock; use rdkafka::ClientConfig; +use reth_optimism_payload_builder::config::OpDAConfig; use tokio::sync::mpsc; use tracing::{error, info, warn}; use url::Url; @@ -122,6 +123,8 @@ pub struct BaseRpcExtension { pub metering: MeteringConfig, /// Sequencer RPC endpoint for transaction status proxying. pub sequencer_rpc: Option, + /// Shared DA config for dynamic updates via `miner_setMaxDASize`. + pub da_config: OpDAConfig, } impl BaseRpcExtension { @@ -132,6 +135,7 @@ impl BaseRpcExtension { flashblocks: config.flashblocks.clone(), metering: config.metering.clone(), sequencer_rpc: config.rollup_args.sequencer.clone(), + da_config: config.da_config.clone(), } } } @@ -143,8 +147,17 @@ impl BaseNodeExtension for BaseRpcExtension { let flashblocks = self.flashblocks.clone(); let metering = self.metering.clone(); let sequencer_rpc = self.sequencer_rpc.clone(); + let da_config = self.da_config.clone(); builder.extend_rpc_modules(move |ctx| { + // Warn if metering is enabled but Kafka is not configured + if metering.enabled && metering.kafka.is_none() { + warn!( + message = "Metering enabled but Kafka not configured", + help = "Priority fee estimation requires --metering-kafka-brokers, --metering-kafka-topic, and --metering-kafka-group-id" + ); + } + // Set up metering runtime if enabled with Kafka let metering_runtime = if metering.enabled && metering.kafka.is_some() { info!(message = "Starting Metering RPC with priority fee estimation"); @@ -165,7 +178,7 @@ impl BaseNodeExtension for BaseRpcExtension { metering.priority_fee_percentile, limits, default_fee, - None, // Dynamic DA config not wired yet + Some(da_config.clone()), )); // Create channels for the annotator diff --git a/crates/runner/src/runner.rs b/crates/runner/src/runner.rs index e33f2ec4..9f0dcaca 100644 --- a/crates/runner/src/runner.rs +++ b/crates/runner/src/runner.rs @@ -56,7 +56,8 @@ impl BaseNodeRunner { ) -> Result> { info!(target: "base-runner", "starting custom Base node"); - let op_node = OpNode::new(config.rollup_args.clone()); + let op_node = + OpNode::new(config.rollup_args.clone()).with_da_config(config.da_config.clone()); let builder = builder .with_types_and_provider::>() From 149e096e08341cd573fde804a3ffd80460171e20 Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Tue, 23 Dec 2025 01:56:04 -0600 Subject: [PATCH 07/14] Pre-sort transactions in cache by priority fee descending - Change FlashblockMetrics storage from IndexMap to sorted Vec - Use binary search insertion to maintain descending order by priority fee - Remove redundant sorting in compute_estimate (now expects pre-sorted input) - Rename upsert_transaction to insert_transaction (upserts weren't used) - Remove unused ResourceTotals::subtract method - Keep sort only for aggregate resources (merging multiple flashblocks) --- crates/rpc/src/base/annotator.rs | 2 +- crates/rpc/src/base/cache.rs | 133 +++++++++---------------------- crates/rpc/src/base/estimator.rs | 72 ++++++++--------- 3 files changed, 74 insertions(+), 133 deletions(-) diff --git a/crates/rpc/src/base/annotator.rs b/crates/rpc/src/base/annotator.rs index 5d9c8a5a..53a92b3b 100644 --- a/crates/rpc/src/base/annotator.rs +++ b/crates/rpc/src/base/annotator.rs @@ -107,7 +107,7 @@ impl ResourceAnnotator { let mut cache = self.cache.write(); for tx_hash in &event.ordered_tx_hashes { if let Some(tx) = self.pending_transactions.shift_remove(tx_hash) { - cache.upsert_transaction(event.block_number, event.flashblock_index, tx); + cache.insert_transaction(event.block_number, event.flashblock_index, tx); matched += 1; } else { missed += 1; diff --git a/crates/rpc/src/base/cache.rs b/crates/rpc/src/base/cache.rs index 3e5b4f6a..eb121210 100644 --- a/crates/rpc/src/base/cache.rs +++ b/crates/rpc/src/base/cache.rs @@ -1,9 +1,11 @@ //! In-memory cache for metering data used by the priority fee estimator. +//! +//! Transactions are stored sorted by priority fee (descending) so the estimator +//! can iterate from highest to lowest fee without re-sorting on each request. use std::collections::{BTreeMap, HashMap, VecDeque}; use alloy_primitives::{B256, U256}; -use indexmap::IndexMap; /// A metered transaction with resource consumption data. #[derive(Debug, Clone)] @@ -57,25 +59,19 @@ impl ResourceTotals { self.data_availability_bytes = self.data_availability_bytes.saturating_add(tx.data_availability_bytes); } - - fn subtract(&mut self, tx: &MeteredTransaction) { - self.gas_used = self.gas_used.saturating_sub(tx.gas_used); - self.execution_time_us = self.execution_time_us.saturating_sub(tx.execution_time_us); - self.state_root_time_us = self.state_root_time_us.saturating_sub(tx.state_root_time_us); - self.data_availability_bytes = - self.data_availability_bytes.saturating_sub(tx.data_availability_bytes); - } } /// Metrics for a single flashblock within a block. +/// +/// Transactions are stored sorted by priority fee in descending order (highest first). #[derive(Debug)] pub struct FlashblockMetrics { /// Block number. pub block_number: u64, /// Flashblock index within the block. pub flashblock_index: u64, - /// Transactions keyed by hash in insertion order. - transactions: IndexMap, + /// Transactions sorted by priority fee descending. + transactions: Vec, totals: ResourceTotals, } @@ -85,28 +81,20 @@ impl FlashblockMetrics { Self { block_number, flashblock_index, - transactions: IndexMap::new(), + transactions: Vec::new(), totals: ResourceTotals::default(), } } - /// Inserts or updates a transaction. - pub fn upsert_transaction(&mut self, tx: MeteredTransaction) { - let tx_hash = tx.tx_hash; - if let Some(existing) = self.transactions.get(&tx_hash) { - self.totals.subtract(existing); - } + /// Inserts a transaction, maintaining descending sort order by priority fee. + pub fn insert_transaction(&mut self, tx: MeteredTransaction) { self.totals.accumulate(&tx); - self.transactions.insert(tx_hash, tx); - } - - /// Removes a transaction by hash. - pub fn remove_transaction(&mut self, tx_hash: &B256) -> Option { - let removed = self.transactions.shift_remove(tx_hash); - if let Some(ref tx) = removed { - self.totals.subtract(tx); - } - removed + // Binary search for insertion point (descending order) + let pos = self + .transactions + .binary_search_by(|probe| tx.priority_fee_per_gas.cmp(&probe.priority_fee_per_gas)) + .unwrap_or_else(|pos| pos); + self.transactions.insert(pos, tx); } /// Returns the resource totals for this flashblock. @@ -114,16 +102,9 @@ impl FlashblockMetrics { self.totals } - /// Iterates over all transactions. - pub fn transactions(&self) -> impl Iterator { - self.transactions.values() - } - - /// Returns transactions sorted by priority fee (ascending). - pub fn transactions_sorted_by_priority_fee(&self) -> Vec<&MeteredTransaction> { - let mut txs: Vec<&MeteredTransaction> = self.transactions.values().collect(); - txs.sort_by(|a, b| a.priority_fee_per_gas.cmp(&b.priority_fee_per_gas)); - txs + /// Returns transactions sorted by priority fee descending (highest first). + pub fn transactions(&self) -> &[MeteredTransaction] { + &self.transactions } /// Returns the number of transactions. @@ -162,11 +143,6 @@ impl BlockMetrics { self.flashblocks.values() } - /// Returns the flashblock at the given index. - pub fn flashblock(&self, flashblock_index: u64) -> Option<&FlashblockMetrics> { - self.flashblocks.get(&flashblock_index) - } - /// Returns a mutable reference to the flashblock, creating it if necessary. /// Returns `(flashblock, is_new)`. pub fn flashblock_mut(&mut self, flashblock_index: u64) -> (&mut FlashblockMetrics, bool) { @@ -237,17 +213,8 @@ impl MeteringCache { self.blocks.get_mut(*self.block_index.get(&block_number).unwrap()).unwrap() } - /// Returns the flashblock metrics for the given block and flashblock index. - pub fn flashblock( - &self, - block_number: u64, - flashblock_index: u64, - ) -> Option<&FlashblockMetrics> { - self.block(block_number).and_then(|block| block.flashblock(flashblock_index)) - } - - /// Inserts or updates a transaction in the cache. - pub fn upsert_transaction( + /// Inserts a transaction into the cache. + pub fn insert_transaction( &mut self, block_number: u64, flashblock_index: u64, @@ -255,24 +222,10 @@ impl MeteringCache { ) { let block = self.block_mut(block_number); let (flashblock, _) = block.flashblock_mut(flashblock_index); - flashblock.upsert_transaction(tx); + flashblock.insert_transaction(tx); block.recompute_totals(); } - /// Removes a transaction from the cache. - pub fn remove_transaction( - &mut self, - block_number: u64, - flashblock_index: u64, - tx_hash: &B256, - ) -> Option { - let block = self.block_mut(block_number); - let (flashblock, _) = block.flashblock_mut(flashblock_index); - let removed = flashblock.remove_transaction(tx_hash); - block.recompute_totals(); - removed - } - /// Returns the number of cached blocks. pub fn len(&self) -> usize { self.blocks.len() @@ -331,53 +284,41 @@ mod tests { fn insert_and_retrieve_transactions() { let mut cache = MeteringCache::new(12); let tx1 = test_tx(1, 2); - cache.upsert_transaction(100, 0, tx1.clone()); + cache.insert_transaction(100, 0, tx1.clone()); let block = cache.block(100).unwrap(); let flashblock = block.flashblocks().next().unwrap(); assert_eq!(flashblock.len(), 1); - assert_eq!(flashblock.transactions().next().unwrap().tx_hash, tx1.tx_hash); + assert_eq!(flashblock.transactions()[0].tx_hash, tx1.tx_hash); } #[test] - fn replaces_existing_transaction() { + fn transactions_sorted_descending_by_priority_fee() { let mut cache = MeteringCache::new(12); - let mut tx1 = test_tx(1, 2); - cache.upsert_transaction(100, 0, tx1.clone()); - tx1.gas_used = 42; - cache.upsert_transaction(100, 0, tx1.clone()); + // Insert in random order + cache.insert_transaction(100, 0, test_tx(1, 10)); + cache.insert_transaction(100, 0, test_tx(2, 30)); + cache.insert_transaction(100, 0, test_tx(3, 20)); let block = cache.block(100).unwrap(); let flashblock = block.flashblocks().next().unwrap(); - assert_eq!(flashblock.len(), 1); - assert_eq!(flashblock.transactions().next().unwrap().gas_used, tx1.gas_used); + let fees: Vec<_> = flashblock + .transactions() + .iter() + .map(|tx| tx.priority_fee_per_gas) + .collect(); + // Should be sorted descending: 30, 20, 10 + assert_eq!(fees, vec![U256::from(30u64), U256::from(20u64), U256::from(10u64)]); } #[test] fn evicts_old_blocks() { let mut cache = MeteringCache::new(2); for block_number in 0..3u64 { - cache.upsert_transaction(block_number, 0, test_tx(block_number, block_number)); + cache.insert_transaction(block_number, 0, test_tx(block_number, block_number)); } assert!(cache.block(0).is_none()); assert!(cache.block(1).is_some()); assert!(cache.block(2).is_some()); } - - #[test] - fn transactions_sorted_by_priority_fee() { - let mut cache = MeteringCache::new(12); - cache.upsert_transaction(100, 0, test_tx(1, 30)); - cache.upsert_transaction(100, 0, test_tx(2, 10)); - cache.upsert_transaction(100, 0, test_tx(3, 20)); - - let block = cache.block(100).unwrap(); - let flashblock = block.flashblocks().next().unwrap(); - let sorted: Vec<_> = flashblock - .transactions_sorted_by_priority_fee() - .iter() - .map(|tx| tx.priority_fee_per_gas) - .collect(); - assert_eq!(sorted, vec![U256::from(10u64), U256::from(20u64), U256::from(30u64)]); - } } diff --git a/crates/rpc/src/base/estimator.rs b/crates/rpc/src/base/estimator.rs index 9e0b5fc1..86b79e14 100644 --- a/crates/rpc/src/base/estimator.rs +++ b/crates/rpc/src/base/estimator.rs @@ -328,18 +328,17 @@ impl PriorityFeeEstimator { let block_number = block_metrics.block_number; - // Materialise sorted transactions per flashblock so we can drop the lock before - // running the estimation logic. + // Clone transactions per flashblock so we can drop the lock. + // Transactions are pre-sorted descending by priority fee in the cache. let mut flashblock_transactions = Vec::new(); let mut total_tx_count = 0usize; for flashblock in block_metrics.flashblocks() { - let sorted: Vec = - flashblock.transactions_sorted_by_priority_fee().into_iter().cloned().collect(); - if sorted.is_empty() { + let txs: Vec = flashblock.transactions().to_vec(); + if txs.is_empty() { continue; } - total_tx_count += sorted.len(); - flashblock_transactions.push((flashblock.flashblock_index, sorted)); + total_tx_count += txs.len(); + flashblock_transactions.push((flashblock.flashblock_index, txs)); } drop(cache_guard); @@ -347,13 +346,13 @@ impl PriorityFeeEstimator { return Ok(None); } - // Build the aggregate list for use-it-or-lose-it resources by collecting references - // to avoid cloning transactions twice. + // Build the aggregate list for use-it-or-lose-it resources. + // Need to sort since we're combining multiple pre-sorted flashblocks. let mut aggregate_refs: Vec<&MeteredTransaction> = Vec::with_capacity(total_tx_count); for (_, txs) in &flashblock_transactions { aggregate_refs.extend(txs.iter()); } - aggregate_refs.sort_by(|a, b| a.priority_fee_per_gas.cmp(&b.priority_fee_per_gas)); + aggregate_refs.sort_by(|a, b| b.priority_fee_per_gas.cmp(&a.priority_fee_per_gas)); let mut flashblock_estimates = Vec::new(); @@ -479,18 +478,22 @@ impl PriorityFeeEstimator { /// Core estimation algorithm (top-down approach). /// -/// Given a list of transactions and a resource limit, determines the minimum priority +/// Given a sorted list of transactions and a resource limit, determines the minimum priority /// fee needed to be included alongside enough high-paying transactions while still /// leaving room for the bundle's demand. /// +/// # Arguments +/// +/// * `transactions` - Must be sorted by priority fee descending (highest first) +/// /// # Algorithm /// -/// 1. Sort transactions from highest to lowest priority fee. -/// 2. Walk from the top, subtracting each transaction's usage from remaining capacity. -/// 3. Stop when including another transaction would leave less capacity than the bundle needs. -/// 4. The threshold fee is the fee of the last included transaction (the minimum fee +/// 1. Walk from highest-paying transactions, subtracting each transaction's usage from +/// remaining capacity. +/// 2. Stop when including another transaction would leave less capacity than the bundle needs. +/// 3. The threshold fee is the fee of the last included transaction (the minimum fee /// among transactions that would be included alongside the bundle). -/// 5. If we include all transactions and still have capacity >= demand, the resource is +/// 4. If we include all transactions and still have capacity >= demand, the resource is /// not congested, so return the configured default fee. /// /// Returns `Err` if the bundle's demand exceeds the resource limit. @@ -519,17 +522,13 @@ fn compute_estimate( }); } - // Sort transactions by priority fee descending (highest first). - let mut sorted: Vec<_> = transactions.to_vec(); - sorted.sort_by(|a, b| b.priority_fee_per_gas.cmp(&a.priority_fee_per_gas)); - // Walk from highest-paying transactions, subtracting usage from remaining capacity. // Stop when we can no longer fit another transaction while leaving room for demand. let mut remaining = limit; let mut included_usage = 0u128; let mut last_included_idx: Option = None; - for (idx, tx) in sorted.iter().enumerate() { + for (idx, tx) in transactions.iter().enumerate() { let usage = usage_fn(tx); // Check if we can include this transaction and still have room for the bundle. @@ -544,15 +543,16 @@ fn compute_estimate( } // If we included all transactions and still have room, resource is not congested. - let is_uncongested = last_included_idx == Some(sorted.len() - 1) && remaining >= demand; + let is_uncongested = + last_included_idx == Some(transactions.len() - 1) && remaining >= demand; if is_uncongested { return Ok(ResourceEstimate { threshold_priority_fee: default_fee, recommended_priority_fee: default_fee, cumulative_usage: included_usage, - threshold_tx_count: sorted.len(), - total_transactions: sorted.len(), + threshold_tx_count: transactions.len(), + total_transactions: transactions.len(), }); } @@ -560,11 +560,11 @@ fn compute_estimate( Some(idx) => { // At least one transaction fits alongside the bundle. // The threshold is the fee of the last included transaction. - let threshold_fee = sorted[idx].priority_fee_per_gas; + let threshold_fee = transactions[idx].priority_fee_per_gas; // For recommended fee, look at included transactions (those above threshold) // and pick one at the specified percentile for a safety margin. - let included = &sorted[..=idx]; + let included = &transactions[..=idx]; let percentile = percentile.clamp(0.0, 1.0); let recommended_fee = if included.len() <= 1 { threshold_fee @@ -580,7 +580,7 @@ fn compute_estimate( // No transactions fit - even the first transaction would crowd out // the bundle. The bundle must beat the highest fee to be included. // Report 0 supporting transactions since none were actually included. - let threshold_fee = sorted[0].priority_fee_per_gas; + let threshold_fee = transactions[0].priority_fee_per_gas; (0, threshold_fee, threshold_fee) } }; @@ -590,7 +590,7 @@ fn compute_estimate( recommended_priority_fee: recommended_fee, cumulative_usage: included_usage, threshold_tx_count: supporting_count, - total_transactions: sorted.len(), + total_transactions: transactions.len(), }) } @@ -837,8 +837,8 @@ mod tests { let (cache, estimator) = setup_estimator(DEFAULT_LIMITS); { let mut guard = cache.write(); - guard.upsert_transaction(1, 0, tx(10, 10)); - guard.upsert_transaction(1, 0, tx(5, 10)); + guard.insert_transaction(1, 0, tx(10, 10)); + guard.insert_transaction(1, 0, tx(5, 10)); } let mut demand = ResourceDemand::default(); demand.gas_used = Some(15); @@ -858,8 +858,8 @@ mod tests { let (cache, estimator) = setup_estimator(limits); { let mut guard = cache.write(); - guard.upsert_transaction(1, 0, tx(10, 10)); - guard.upsert_transaction(1, 0, tx(5, 10)); + guard.insert_transaction(1, 0, tx(10, 10)); + guard.insert_transaction(1, 0, tx(5, 10)); } let mut demand = ResourceDemand::default(); demand.gas_used = Some(15); @@ -883,11 +883,11 @@ mod tests { { let mut guard = cache.write(); // Block 1 → threshold 10 - guard.upsert_transaction(1, 0, tx(10, 10)); - guard.upsert_transaction(1, 0, tx(5, 10)); + guard.insert_transaction(1, 0, tx(10, 10)); + guard.insert_transaction(1, 0, tx(5, 10)); // Block 2 → threshold 30 - guard.upsert_transaction(2, 0, tx(30, 10)); - guard.upsert_transaction(2, 0, tx(25, 10)); + guard.insert_transaction(2, 0, tx(30, 10)); + guard.insert_transaction(2, 0, tx(25, 10)); } let mut demand = ResourceDemand::default(); From a0bae13403863c713c9460659ddb14e450763121 Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Tue, 23 Dec 2025 02:02:37 -0600 Subject: [PATCH 08/14] Use U256 for fee types and simplify naming - Change fee fields from String to U256 (matches Ethereum RPC conventions) - Rename RollingPriorityEstimates to RollingPriorityEstimate (singular) - Rename recommended_priority_fee to priority_fee (shorter, clearer) - Rename resource_estimates_to_vec to build_resource_estimate_responses - Update variable names to match singular type --- crates/rpc/src/base/estimator.rs | 16 ++++++------ crates/rpc/src/base/metered_fee_types.rs | 31 ++++++++++++------------ crates/rpc/src/lib.rs | 2 +- 3 files changed, 25 insertions(+), 24 deletions(-) diff --git a/crates/rpc/src/base/estimator.rs b/crates/rpc/src/base/estimator.rs index 86b79e14..f7d46e19 100644 --- a/crates/rpc/src/base/estimator.rs +++ b/crates/rpc/src/base/estimator.rs @@ -244,15 +244,15 @@ pub struct BlockPriorityEstimates { pub max_across_flashblocks: ResourceEstimates, } -/// Rolling estimates aggregated across multiple recent blocks. +/// Priority fee estimate aggregated across multiple recent blocks. #[derive(Debug, Clone)] -pub struct RollingPriorityEstimates { +pub struct RollingPriorityEstimate { /// Number of blocks that contributed to this estimate. pub blocks_sampled: usize, /// Per-resource estimates (median across sampled blocks). pub estimates: ResourceEstimates, - /// Single recommended fee: maximum across all resources. - pub recommended_priority_fee: U256, + /// Recommended priority fee: maximum across all resources. + pub priority_fee: U256, } /// Computes resource fee estimates based on cached flashblock metering data. @@ -412,7 +412,7 @@ impl PriorityFeeEstimator { pub fn estimate_rolling( &self, demand: ResourceDemand, - ) -> Result, EstimateError> { + ) -> Result, EstimateError> { let cache_guard = self.cache.read(); let block_numbers: Vec = cache_guard.blocks_desc().map(|b| b.block_number).collect(); drop(cache_guard); @@ -468,10 +468,10 @@ impl PriorityFeeEstimator { return Ok(None); } - Ok(Some(RollingPriorityEstimates { + Ok(Some(RollingPriorityEstimate { blocks_sampled: block_numbers.len(), estimates, - recommended_priority_fee: max_fee, + priority_fee: max_fee, })) } } @@ -900,6 +900,6 @@ mod tests { let gas_estimate = rolling.estimates.gas_used.expect("gas estimate present"); // Median across [10, 30] = 30 (upper median for even count) assert_eq!(gas_estimate.recommended_priority_fee, U256::from(30)); - assert_eq!(rolling.recommended_priority_fee, U256::from(30)); + assert_eq!(rolling.priority_fee, U256::from(30)); } } diff --git a/crates/rpc/src/base/metered_fee_types.rs b/crates/rpc/src/base/metered_fee_types.rs index 4d00475f..21e31450 100644 --- a/crates/rpc/src/base/metered_fee_types.rs +++ b/crates/rpc/src/base/metered_fee_types.rs @@ -1,8 +1,9 @@ //! Response types for the metered priority fee RPC endpoint. +use alloy_primitives::U256; use tips_core::types::MeterBundleResponse; -use crate::{ResourceEstimates, ResourceKind, RollingPriorityEstimates}; +use crate::{ResourceEstimates, RollingPriorityEstimate}; /// Human-friendly representation of a resource fee quote. #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] @@ -11,11 +12,11 @@ pub struct ResourceFeeEstimateResponse { /// Resource name (gasUsed, executionTime, etc). pub resource: String, /// Minimum fee to displace enough capacity. - pub threshold_priority_fee: String, + pub threshold_priority_fee: U256, /// Recommended fee with safety margin. - pub recommended_priority_fee: String, + pub recommended_priority_fee: U256, /// Cumulative resource usage above threshold. - pub cumulative_usage: String, + pub cumulative_usage: U256, /// Number of transactions above threshold. pub threshold_tx_count: u64, /// Total transactions considered. @@ -29,37 +30,37 @@ pub struct MeteredPriorityFeeResponse { /// Bundled metering results. #[serde(flatten)] pub meter_bundle: MeterBundleResponse, - /// Single recommended priority fee (max across all resources and median across recent blocks). - pub recommended_priority_fee: String, + /// Recommended priority fee (max across all resources and median across recent blocks). + pub priority_fee: U256, /// Number of recent blocks used to compute the rolling estimate. pub blocks_sampled: u64, /// Per-resource estimates (median across sampled blocks). pub resource_estimates: Vec, } -/// Converts rolling estimates to the response format. +/// Converts a rolling estimate to the response format. pub fn build_priority_fee_response( meter_bundle: MeterBundleResponse, - estimates: RollingPriorityEstimates, + estimate: RollingPriorityEstimate, ) -> MeteredPriorityFeeResponse { - let resource_estimates = resource_estimates_to_vec(&estimates.estimates); + let resource_estimates = build_resource_estimate_responses(&estimate.estimates); MeteredPriorityFeeResponse { meter_bundle, - recommended_priority_fee: estimates.recommended_priority_fee.to_string(), - blocks_sampled: estimates.blocks_sampled as u64, + priority_fee: estimate.priority_fee, + blocks_sampled: estimate.blocks_sampled as u64, resource_estimates, } } -fn resource_estimates_to_vec(estimates: &ResourceEstimates) -> Vec { +fn build_resource_estimate_responses(estimates: &ResourceEstimates) -> Vec { estimates .iter() .map(|(kind, est)| ResourceFeeEstimateResponse { resource: kind.as_camel_case().to_string(), - threshold_priority_fee: est.threshold_priority_fee.to_string(), - recommended_priority_fee: est.recommended_priority_fee.to_string(), - cumulative_usage: est.cumulative_usage.to_string(), + threshold_priority_fee: est.threshold_priority_fee, + recommended_priority_fee: est.recommended_priority_fee, + cumulative_usage: U256::from(est.cumulative_usage), threshold_tx_count: est.threshold_tx_count.try_into().unwrap_or(u64::MAX), total_transactions: est.total_transactions.try_into().unwrap_or(u64::MAX), }) diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index 5165e035..77c9f32f 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -13,7 +13,7 @@ pub use base::{ estimator::{ BlockPriorityEstimates, EstimateError, FlashblockResourceEstimates, PriorityFeeEstimator, ResourceDemand, ResourceEstimate, ResourceEstimates, ResourceKind, ResourceLimits, - RollingPriorityEstimates, + RollingPriorityEstimate, }, kafka::{KafkaBundleConsumer, KafkaBundleConsumerConfig}, meter::meter_bundle, From fb4a308d966082f6aee93c754e1f754134183b2b Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Tue, 23 Dec 2025 02:06:45 -0600 Subject: [PATCH 09/14] Remove normal activity metrics from metering pipeline Keep only exceptional/diagnostic metrics: - metering.kafka.errors_total (errors) - metering.kafka.lag_ms (monitoring) - metering.kafka.messages_skipped (data issues) - metering.pending.evicted (capacity issues) - metering.pending.size (queue depth monitoring) - metering.streams.tx_misses_total (correlation failures) Remove normal activity counters: - metering.kafka.messages_total - metering.kafka.tx_events_total - metering.streams.flashblocks_total - metering.streams.tx_matched_total --- crates/rpc/src/base/annotator.rs | 3 --- crates/rpc/src/base/kafka.rs | 1 - 2 files changed, 4 deletions(-) diff --git a/crates/rpc/src/base/annotator.rs b/crates/rpc/src/base/annotator.rs index 53a92b3b..54ac62d6 100644 --- a/crates/rpc/src/base/annotator.rs +++ b/crates/rpc/src/base/annotator.rs @@ -94,11 +94,9 @@ impl ResourceAnnotator { } metrics::gauge!("metering.pending.size").set(self.pending_transactions.len() as f64); - metrics::counter!("metering.kafka.tx_events_total").increment(1); } fn handle_flashblock_event(&mut self, event: FlashblockInclusion) { - metrics::counter!("metering.streams.flashblocks_total").increment(1); let mut matched = 0usize; let mut missed = 0usize; @@ -138,6 +136,5 @@ impl ResourceAnnotator { } metrics::gauge!("metering.pending.size").set(self.pending_transactions.len() as f64); - metrics::counter!("metering.streams.tx_matched_total").increment(matched as u64); } } diff --git a/crates/rpc/src/base/kafka.rs b/crates/rpc/src/base/kafka.rs index 59803124..72e52b0f 100644 --- a/crates/rpc/src/base/kafka.rs +++ b/crates/rpc/src/base/kafka.rs @@ -93,7 +93,6 @@ impl KafkaBundleConsumer { message.payload().ok_or_else(|| eyre::eyre!("Kafka message missing payload"))?; let bundle: AcceptedBundle = serde_json::from_slice(payload)?; - metrics::counter!("metering.kafka.messages_total").increment(1); if let Some(ts) = message.timestamp().to_millis() { let now_ms = Utc::now().timestamp_millis(); From 72f0667365e6ac4aa7c3779bffc4d7cf32752466 Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Tue, 23 Dec 2025 02:20:38 -0600 Subject: [PATCH 10/14] Add reorg detection and cache clearing to metering pipeline - Add contains_block() and clear_blocks_from() methods to MeteringCache - Detect reorg when flashblock_index=0 arrives for existing block - Clear affected blocks from cache on reorg detection - Add metering.cache.reorgs_detected metric - Add unit and integration tests for reorg handling --- crates/rpc/src/base/annotator.rs | 140 +++++++++++++++++++++++ crates/rpc/src/base/cache.rs | 94 ++++++++++++++- crates/rpc/src/base/estimator.rs | 3 +- crates/rpc/src/base/metered_fee_types.rs | 4 +- 4 files changed, 233 insertions(+), 8 deletions(-) diff --git a/crates/rpc/src/base/annotator.rs b/crates/rpc/src/base/annotator.rs index 54ac62d6..07976777 100644 --- a/crates/rpc/src/base/annotator.rs +++ b/crates/rpc/src/base/annotator.rs @@ -97,6 +97,18 @@ impl ResourceAnnotator { } fn handle_flashblock_event(&mut self, event: FlashblockInclusion) { + // Reorg detection: flashblock_index=0 for existing block indicates reorg + if event.flashblock_index == 0 && self.cache.read().contains_block(event.block_number) { + let cleared = self.cache.write().clear_blocks_from(event.block_number); + + warn!( + target: "metering::annotator", + block_number = event.block_number, + blocks_cleared = cleared, + "Reorg detected: cleared cache from block" + ); + metrics::counter!("metering.cache.reorgs_detected").increment(1); + } let mut matched = 0usize; let mut missed = 0usize; @@ -138,3 +150,131 @@ impl ResourceAnnotator { metrics::gauge!("metering.pending.size").set(self.pending_transactions.len() as f64); } } + +#[cfg(test)] +mod tests { + use alloy_primitives::{B256, U256}; + use tokio::sync::mpsc; + + use super::*; + + fn test_tx(hash: u64, priority: u64) -> MeteredTransaction { + let mut hash_bytes = [0u8; 32]; + hash_bytes[24..].copy_from_slice(&hash.to_be_bytes()); + MeteredTransaction { + tx_hash: B256::new(hash_bytes), + priority_fee_per_gas: U256::from(priority), + gas_used: 10, + execution_time_us: 5, + state_root_time_us: 7, + data_availability_bytes: 20, + } + } + + fn test_flashblock( + block_number: u64, + flashblock_index: u64, + hashes: Vec, + ) -> FlashblockInclusion { + FlashblockInclusion { + block_number, + flashblock_index, + ordered_tx_hashes: hashes + .into_iter() + .map(|h| { + let mut hash_bytes = [0u8; 32]; + hash_bytes[24..].copy_from_slice(&h.to_be_bytes()); + B256::new(hash_bytes) + }) + .collect(), + } + } + + #[tokio::test] + async fn reorg_clears_affected_blocks() { + let cache = Arc::new(RwLock::new(MeteringCache::new(10))); + let (tx_sender, tx_rx) = mpsc::unbounded_channel(); + let (fb_sender, fb_rx) = mpsc::unbounded_channel(); + + let mut annotator = ResourceAnnotator::new(cache.clone(), tx_rx, fb_rx); + + // Pre-populate cache with blocks 100, 101, 102 + { + let mut c = cache.write(); + c.insert_transaction(100, 0, test_tx(1, 10)); + c.insert_transaction(101, 0, test_tx(2, 20)); + c.insert_transaction(102, 0, test_tx(3, 30)); + } + + assert!(cache.read().contains_block(100)); + assert!(cache.read().contains_block(101)); + assert!(cache.read().contains_block(102)); + + // Send flashblock_index=0 for existing block 101 (simulates reorg) + let event = test_flashblock(101, 0, vec![]); + annotator.handle_flashblock_event(event); + + // Blocks 101 and 102 should be cleared, block 100 should remain + assert!(cache.read().contains_block(100)); + assert!(!cache.read().contains_block(101)); + assert!(!cache.read().contains_block(102)); + + drop(tx_sender); + drop(fb_sender); + } + + #[tokio::test] + async fn non_zero_flashblock_does_not_trigger_reorg() { + let cache = Arc::new(RwLock::new(MeteringCache::new(10))); + let (tx_sender, tx_rx) = mpsc::unbounded_channel(); + let (fb_sender, fb_rx) = mpsc::unbounded_channel(); + + let mut annotator = ResourceAnnotator::new(cache.clone(), tx_rx, fb_rx); + + // Pre-populate cache with block 100 + { + let mut c = cache.write(); + c.insert_transaction(100, 0, test_tx(1, 10)); + } + + assert!(cache.read().contains_block(100)); + + // Send flashblock_index=1 for existing block 100 (not a reorg signal) + let event = test_flashblock(100, 1, vec![]); + annotator.handle_flashblock_event(event); + + // Block 100 should still exist + assert!(cache.read().contains_block(100)); + + drop(tx_sender); + drop(fb_sender); + } + + #[tokio::test] + async fn flashblock_zero_for_new_block_does_not_trigger_reorg() { + let cache = Arc::new(RwLock::new(MeteringCache::new(10))); + let (tx_sender, tx_rx) = mpsc::unbounded_channel(); + let (fb_sender, fb_rx) = mpsc::unbounded_channel(); + + let mut annotator = ResourceAnnotator::new(cache.clone(), tx_rx, fb_rx); + + // Pre-populate cache with block 100 + { + let mut c = cache.write(); + c.insert_transaction(100, 0, test_tx(1, 10)); + } + + assert!(cache.read().contains_block(100)); + assert!(!cache.read().contains_block(101)); + + // Send flashblock_index=0 for NEW block 101 (not a reorg, just a new block) + let event = test_flashblock(101, 0, vec![]); + annotator.handle_flashblock_event(event); + + // Block 100 should still exist (no reorg happened) + assert!(cache.read().contains_block(100)); + + drop(tx_sender); + drop(fb_sender); + } +} diff --git a/crates/rpc/src/base/cache.rs b/crates/rpc/src/base/cache.rs index eb121210..0d925919 100644 --- a/crates/rpc/src/base/cache.rs +++ b/crates/rpc/src/base/cache.rs @@ -241,6 +241,30 @@ impl MeteringCache { self.blocks.iter().rev() } + /// Returns true if the specified block_number exists in the cache. + pub fn contains_block(&self, block_number: u64) -> bool { + self.block_index.contains_key(&block_number) + } + + /// Clears all blocks with block_number >= the specified value. + /// Returns the number of blocks cleared. + pub fn clear_blocks_from(&mut self, block_number: u64) -> usize { + let mut cleared = 0; + + // Remove from back to front (blocks stored oldest first) + while let Some(block) = self.blocks.back() { + if block.block_number >= block_number { + let removed = self.blocks.pop_back().unwrap(); + self.block_index.remove(&removed.block_number); + cleared += 1; + } else { + break; + } + } + + cleared + } + fn evict_if_needed(&mut self) { let mut evicted = false; while self.blocks.len() > self.max_blocks { @@ -302,11 +326,8 @@ mod tests { let block = cache.block(100).unwrap(); let flashblock = block.flashblocks().next().unwrap(); - let fees: Vec<_> = flashblock - .transactions() - .iter() - .map(|tx| tx.priority_fee_per_gas) - .collect(); + let fees: Vec<_> = + flashblock.transactions().iter().map(|tx| tx.priority_fee_per_gas).collect(); // Should be sorted descending: 30, 20, 10 assert_eq!(fees, vec![U256::from(30u64), U256::from(20u64), U256::from(10u64)]); } @@ -321,4 +342,67 @@ mod tests { assert!(cache.block(1).is_some()); assert!(cache.block(2).is_some()); } + + #[test] + fn contains_block_returns_correct_values() { + let mut cache = MeteringCache::new(10); + cache.insert_transaction(100, 0, test_tx(1, 10)); + cache.insert_transaction(101, 0, test_tx(2, 20)); + + assert!(cache.contains_block(100)); + assert!(cache.contains_block(101)); + assert!(!cache.contains_block(99)); + assert!(!cache.contains_block(102)); + } + + #[test] + fn clear_blocks_from_clears_subsequent_blocks() { + let mut cache = MeteringCache::new(10); + cache.insert_transaction(100, 0, test_tx(1, 10)); + cache.insert_transaction(101, 0, test_tx(2, 20)); + cache.insert_transaction(102, 0, test_tx(3, 30)); + + let cleared = cache.clear_blocks_from(101); + + assert_eq!(cleared, 2); + assert!(cache.contains_block(100)); + assert!(!cache.contains_block(101)); + assert!(!cache.contains_block(102)); + assert_eq!(cache.len(), 1); + } + + #[test] + fn clear_blocks_from_returns_zero_when_no_match() { + let mut cache = MeteringCache::new(10); + cache.insert_transaction(100, 0, test_tx(1, 10)); + cache.insert_transaction(101, 0, test_tx(2, 20)); + + let cleared = cache.clear_blocks_from(200); + + assert_eq!(cleared, 0); + assert_eq!(cache.len(), 2); + } + + #[test] + fn clear_blocks_from_clears_all_blocks() { + let mut cache = MeteringCache::new(10); + cache.insert_transaction(100, 0, test_tx(1, 10)); + cache.insert_transaction(101, 0, test_tx(2, 20)); + cache.insert_transaction(102, 0, test_tx(3, 30)); + + let cleared = cache.clear_blocks_from(100); + + assert_eq!(cleared, 3); + assert!(cache.is_empty()); + } + + #[test] + fn clear_blocks_from_handles_empty_cache() { + let mut cache = MeteringCache::new(10); + + let cleared = cache.clear_blocks_from(100); + + assert_eq!(cleared, 0); + assert!(cache.is_empty()); + } } diff --git a/crates/rpc/src/base/estimator.rs b/crates/rpc/src/base/estimator.rs index f7d46e19..fc87e8cb 100644 --- a/crates/rpc/src/base/estimator.rs +++ b/crates/rpc/src/base/estimator.rs @@ -543,8 +543,7 @@ fn compute_estimate( } // If we included all transactions and still have room, resource is not congested. - let is_uncongested = - last_included_idx == Some(transactions.len() - 1) && remaining >= demand; + let is_uncongested = last_included_idx == Some(transactions.len() - 1) && remaining >= demand; if is_uncongested { return Ok(ResourceEstimate { diff --git a/crates/rpc/src/base/metered_fee_types.rs b/crates/rpc/src/base/metered_fee_types.rs index 21e31450..1c86bc54 100644 --- a/crates/rpc/src/base/metered_fee_types.rs +++ b/crates/rpc/src/base/metered_fee_types.rs @@ -53,7 +53,9 @@ pub fn build_priority_fee_response( } } -fn build_resource_estimate_responses(estimates: &ResourceEstimates) -> Vec { +fn build_resource_estimate_responses( + estimates: &ResourceEstimates, +) -> Vec { estimates .iter() .map(|(kind, est)| ResourceFeeEstimateResponse { From e2d56c5737837976dbb1b709a8fd731644055bda Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Tue, 23 Dec 2025 02:27:03 -0600 Subject: [PATCH 11/14] Simplify Kafka config to use properties file pattern - Remove --metering-kafka-brokers CLI arg (use properties file instead) - Make --metering-kafka-properties-file the only required arg for Kafka - Default --metering-kafka-topic to tips-ingress - Make --metering-kafka-group-id optional (overrides properties file) - Load all rdkafka settings from properties file instead of hardcoding --- bin/node/src/cli.rs | 38 ++++++++------------- crates/runner/src/config.rs | 14 ++++---- crates/runner/src/extensions/rpc.rs | 51 +++++++++++++++-------------- 3 files changed, 49 insertions(+), 54 deletions(-) diff --git a/bin/node/src/cli.rs b/bin/node/src/cli.rs index c944f687..d8909246 100644 --- a/bin/node/src/cli.rs +++ b/bin/node/src/cli.rs @@ -46,22 +46,20 @@ pub struct Args { pub enable_metering: bool, // --- Priority fee estimation args --- - /// Kafka brokers for metering bundle events (comma-separated) - #[arg(long = "metering-kafka-brokers")] - pub metering_kafka_brokers: Option, + /// Path to Kafka properties file (required for priority fee estimation). + /// The properties file should contain rdkafka settings like bootstrap.servers, + /// group.id, session.timeout.ms, etc. + #[arg(long = "metering-kafka-properties-file")] + pub metering_kafka_properties_file: Option, /// Kafka topic for accepted bundle events - #[arg(long = "metering-kafka-topic")] - pub metering_kafka_topic: Option, + #[arg(long = "metering-kafka-topic", default_value = "tips-ingress")] + pub metering_kafka_topic: String, - /// Kafka consumer group ID + /// Kafka consumer group ID (overrides group.id in properties file if set) #[arg(long = "metering-kafka-group-id")] pub metering_kafka_group_id: Option, - /// Optional path to Kafka properties file - #[arg(long = "metering-kafka-properties-file")] - pub metering_kafka_properties_file: Option, - /// Gas limit per flashblock for priority fee estimation #[arg(long = "metering-gas-limit", default_value = "30000000")] pub metering_gas_limit: u64, @@ -108,20 +106,12 @@ impl From for BaseNodeConfig { max_pending_blocks_depth: args.max_pending_blocks_depth, }); - // Build Kafka config if all required fields are present - let kafka = match ( - args.metering_kafka_brokers, - args.metering_kafka_topic, - args.metering_kafka_group_id, - ) { - (Some(brokers), Some(topic), Some(group_id)) => Some(KafkaConfig { - brokers, - topic, - group_id, - properties_file: args.metering_kafka_properties_file, - }), - _ => None, - }; + // Build Kafka config if properties file is provided + let kafka = args.metering_kafka_properties_file.map(|properties_file| KafkaConfig { + properties_file, + topic: args.metering_kafka_topic, + group_id_override: args.metering_kafka_group_id, + }); let metering = MeteringConfig { enabled: args.enable_metering, diff --git a/crates/runner/src/config.rs b/crates/runner/src/config.rs index a79ea54f..52e106a3 100644 --- a/crates/runner/src/config.rs +++ b/crates/runner/src/config.rs @@ -67,16 +67,18 @@ pub struct MeteringConfig { } /// Kafka connection configuration. +/// +/// All rdkafka settings (bootstrap.servers, group.id, timeouts, etc.) should be +/// specified in the properties file. The CLI only specifies the path to this file +/// and the topic name. #[derive(Debug, Clone)] pub struct KafkaConfig { - /// Comma-separated broker addresses. - pub brokers: String, + /// Path to the Kafka properties file containing rdkafka settings. + pub properties_file: String, /// Topic name for accepted bundle events. pub topic: String, - /// Consumer group ID. - pub group_id: String, - /// Optional path to properties file. - pub properties_file: Option, + /// Optional consumer group ID override (takes precedence over properties file). + pub group_id_override: Option, } /// Resource limits for priority fee estimation. diff --git a/crates/runner/src/extensions/rpc.rs b/crates/runner/src/extensions/rpc.rs index 3a4e99c1..67573078 100644 --- a/crates/runner/src/extensions/rpc.rs +++ b/crates/runner/src/extensions/rpc.rs @@ -154,7 +154,7 @@ impl BaseNodeExtension for BaseRpcExtension { if metering.enabled && metering.kafka.is_none() { warn!( message = "Metering enabled but Kafka not configured", - help = "Priority fee estimation requires --metering-kafka-brokers, --metering-kafka-topic, and --metering-kafka-group-id" + help = "Priority fee estimation requires --metering-kafka-properties-file" ); } @@ -201,31 +201,34 @@ impl BaseNodeExtension for BaseRpcExtension { // Spawn Kafka consumer if configured if let (Some(runtime), Some(kafka_cfg)) = (&metering_runtime, &metering.kafka) { - info!(message = "Starting Kafka consumer for metering"); + info!( + message = "Starting Kafka consumer for metering", + properties_file = %kafka_cfg.properties_file, + topic = %kafka_cfg.topic + ); - let mut client_config = ClientConfig::new(); - client_config.set("bootstrap.servers", &kafka_cfg.brokers); - client_config.set("group.id", &kafka_cfg.group_id); - client_config.set("enable.partition.eof", "false"); - client_config.set("session.timeout.ms", "6000"); - client_config.set("enable.auto.commit", "true"); - client_config.set("auto.offset.reset", "earliest"); - - if let Some(path) = kafka_cfg.properties_file.as_ref() { - match load_kafka_config_from_file(path) { - Ok(props) => { - for (key, value) in props { - client_config.set(key, value); - } - } - Err(err) => { - warn!( - message = "Failed to load Kafka properties file", - file = %path, - %err - ); - } + // Load all rdkafka settings from the properties file + let props = match load_kafka_config_from_file(&kafka_cfg.properties_file) { + Ok(props) => props, + Err(err) => { + error!( + target: "metering::kafka", + file = %kafka_cfg.properties_file, + %err, + "Failed to load Kafka properties file" + ); + return Ok(()); } + }; + + let mut client_config = ClientConfig::new(); + for (key, value) in props { + client_config.set(key, value); + } + + // Apply CLI override for group.id if specified + if let Some(group_id) = &kafka_cfg.group_id_override { + client_config.set("group.id", group_id); } let tx_sender = runtime.tx_sender.clone(); From 475ec4822d67d27d72ec1822b4ea3b859979b673 Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Tue, 23 Dec 2025 02:33:02 -0600 Subject: [PATCH 12/14] Consolidate metered fee types into types.rs - Move ResourceFeeEstimateResponse and MeteredPriorityFeeResponse to types.rs - Move build_priority_fee_response helper to meter_rpc.rs - Delete metered_fee_types.rs --- crates/rpc/src/base/meter_rpc.rs | 37 ++++++++++++- crates/rpc/src/base/metered_fee_types.rs | 70 ------------------------ crates/rpc/src/base/mod.rs | 1 - crates/rpc/src/base/traits.rs | 5 +- crates/rpc/src/base/types.rs | 39 ++++++++++++- crates/rpc/src/lib.rs | 6 +- 6 files changed, 77 insertions(+), 81 deletions(-) delete mode 100644 crates/rpc/src/base/metered_fee_types.rs diff --git a/crates/rpc/src/base/meter_rpc.rs b/crates/rpc/src/base/meter_rpc.rs index 2c9a0e9f..4c0ecd14 100644 --- a/crates/rpc/src/base/meter_rpc.rs +++ b/crates/rpc/src/base/meter_rpc.rs @@ -16,10 +16,10 @@ use reth_provider::{ChainSpecProvider, StateProviderFactory}; use tips_core::types::{Bundle, MeterBundleResponse, ParsedBundle}; use tracing::{debug, error, info}; +use super::types::{MeteredPriorityFeeResponse, ResourceFeeEstimateResponse}; use crate::{ - MeteringApiServer, PriorityFeeEstimator, ResourceDemand, - base::metered_fee_types::{MeteredPriorityFeeResponse, build_priority_fee_response}, - meter_bundle, + MeteringApiServer, PriorityFeeEstimator, ResourceDemand, ResourceEstimates, + RollingPriorityEstimate, meter_bundle, }; /// Implementation of the metering RPC API @@ -199,3 +199,34 @@ where Ok(response) } } + +/// Converts a rolling estimate to the response format. +fn build_priority_fee_response( + meter_bundle: MeterBundleResponse, + estimate: RollingPriorityEstimate, +) -> MeteredPriorityFeeResponse { + let resource_estimates = build_resource_estimate_responses(&estimate.estimates); + + MeteredPriorityFeeResponse { + meter_bundle, + priority_fee: estimate.priority_fee, + blocks_sampled: estimate.blocks_sampled as u64, + resource_estimates, + } +} + +fn build_resource_estimate_responses( + estimates: &ResourceEstimates, +) -> Vec { + estimates + .iter() + .map(|(kind, est)| ResourceFeeEstimateResponse { + resource: kind.as_camel_case().to_string(), + threshold_priority_fee: est.threshold_priority_fee, + recommended_priority_fee: est.recommended_priority_fee, + cumulative_usage: U256::from(est.cumulative_usage), + threshold_tx_count: est.threshold_tx_count.try_into().unwrap_or(u64::MAX), + total_transactions: est.total_transactions.try_into().unwrap_or(u64::MAX), + }) + .collect() +} diff --git a/crates/rpc/src/base/metered_fee_types.rs b/crates/rpc/src/base/metered_fee_types.rs deleted file mode 100644 index 1c86bc54..00000000 --- a/crates/rpc/src/base/metered_fee_types.rs +++ /dev/null @@ -1,70 +0,0 @@ -//! Response types for the metered priority fee RPC endpoint. - -use alloy_primitives::U256; -use tips_core::types::MeterBundleResponse; - -use crate::{ResourceEstimates, RollingPriorityEstimate}; - -/// Human-friendly representation of a resource fee quote. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ResourceFeeEstimateResponse { - /// Resource name (gasUsed, executionTime, etc). - pub resource: String, - /// Minimum fee to displace enough capacity. - pub threshold_priority_fee: U256, - /// Recommended fee with safety margin. - pub recommended_priority_fee: U256, - /// Cumulative resource usage above threshold. - pub cumulative_usage: U256, - /// Number of transactions above threshold. - pub threshold_tx_count: u64, - /// Total transactions considered. - pub total_transactions: u64, -} - -/// Response payload for `base_meteredPriorityFeePerGas`. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct MeteredPriorityFeeResponse { - /// Bundled metering results. - #[serde(flatten)] - pub meter_bundle: MeterBundleResponse, - /// Recommended priority fee (max across all resources and median across recent blocks). - pub priority_fee: U256, - /// Number of recent blocks used to compute the rolling estimate. - pub blocks_sampled: u64, - /// Per-resource estimates (median across sampled blocks). - pub resource_estimates: Vec, -} - -/// Converts a rolling estimate to the response format. -pub fn build_priority_fee_response( - meter_bundle: MeterBundleResponse, - estimate: RollingPriorityEstimate, -) -> MeteredPriorityFeeResponse { - let resource_estimates = build_resource_estimate_responses(&estimate.estimates); - - MeteredPriorityFeeResponse { - meter_bundle, - priority_fee: estimate.priority_fee, - blocks_sampled: estimate.blocks_sampled as u64, - resource_estimates, - } -} - -fn build_resource_estimate_responses( - estimates: &ResourceEstimates, -) -> Vec { - estimates - .iter() - .map(|(kind, est)| ResourceFeeEstimateResponse { - resource: kind.as_camel_case().to_string(), - threshold_priority_fee: est.threshold_priority_fee, - recommended_priority_fee: est.recommended_priority_fee, - cumulative_usage: U256::from(est.cumulative_usage), - threshold_tx_count: est.threshold_tx_count.try_into().unwrap_or(u64::MAX), - total_transactions: est.total_transactions.try_into().unwrap_or(u64::MAX), - }) - .collect() -} diff --git a/crates/rpc/src/base/mod.rs b/crates/rpc/src/base/mod.rs index 515577cc..e6dd892a 100644 --- a/crates/rpc/src/base/mod.rs +++ b/crates/rpc/src/base/mod.rs @@ -4,7 +4,6 @@ pub mod estimator; pub mod kafka; pub(crate) mod meter; pub(crate) mod meter_rpc; -pub(crate) mod metered_fee_types; pub(crate) mod pubsub; pub(crate) mod traits; pub(crate) mod transaction_rpc; diff --git a/crates/rpc/src/base/traits.rs b/crates/rpc/src/base/traits.rs index 33acf02c..c1d94b60 100644 --- a/crates/rpc/src/base/traits.rs +++ b/crates/rpc/src/base/traits.rs @@ -3,10 +3,7 @@ use alloy_primitives::TxHash; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use crate::{ - Bundle, MeterBundleResponse, TransactionStatusResponse, - base::metered_fee_types::MeteredPriorityFeeResponse, -}; +use crate::{Bundle, MeterBundleResponse, MeteredPriorityFeeResponse, TransactionStatusResponse}; /// RPC API for transaction metering #[rpc(server, namespace = "base")] diff --git a/crates/rpc/src/base/types.rs b/crates/rpc/src/base/types.rs index 3340a80e..df8b2c97 100644 --- a/crates/rpc/src/base/types.rs +++ b/crates/rpc/src/base/types.rs @@ -1,7 +1,9 @@ -//! Types for the transaction status rpc +//! Types for the Base RPC extensions. +use alloy_primitives::U256; use alloy_rpc_types_eth::pubsub::SubscriptionKind; use serde::{Deserialize, Serialize}; +use tips_core::types::MeterBundleResponse; /// The status of a transaction. #[derive(Clone, Serialize, Deserialize, PartialEq, Debug)] @@ -95,3 +97,38 @@ impl From for ExtendedSubscriptionKind { Self::Base(kind) } } + +// --- Metered priority fee types --- + +/// Human-friendly representation of a resource fee quote. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ResourceFeeEstimateResponse { + /// Resource name (gasUsed, executionTime, etc). + pub resource: String, + /// Minimum fee to displace enough capacity. + pub threshold_priority_fee: U256, + /// Recommended fee with safety margin. + pub recommended_priority_fee: U256, + /// Cumulative resource usage above threshold. + pub cumulative_usage: U256, + /// Number of transactions above threshold. + pub threshold_tx_count: u64, + /// Total transactions considered. + pub total_transactions: u64, +} + +/// Response payload for `base_meteredPriorityFeePerGas`. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct MeteredPriorityFeeResponse { + /// Bundled metering results. + #[serde(flatten)] + pub meter_bundle: MeterBundleResponse, + /// Recommended priority fee (max across all resources and median across recent blocks). + pub priority_fee: U256, + /// Number of recent blocks used to compute the rolling estimate. + pub blocks_sampled: u64, + /// Per-resource estimates (median across sampled blocks). + pub resource_estimates: Vec, +} diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index 77c9f32f..96fe8c43 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -18,11 +18,13 @@ pub use base::{ kafka::{KafkaBundleConsumer, KafkaBundleConsumerConfig}, meter::meter_bundle, meter_rpc::MeteringApiImpl, - metered_fee_types::{MeteredPriorityFeeResponse, ResourceFeeEstimateResponse}, pubsub::{EthPubSub, EthPubSubApiServer}, traits::{MeteringApiServer, TransactionStatusApiServer}, transaction_rpc::TransactionStatusApiImpl, - types::{BaseSubscriptionKind, ExtendedSubscriptionKind, Status, TransactionStatusResponse}, + types::{ + BaseSubscriptionKind, ExtendedSubscriptionKind, MeteredPriorityFeeResponse, + ResourceFeeEstimateResponse, Status, TransactionStatusResponse, + }, }; mod eth; From e4633df9b39050403f1a39838f7b4b0ae0f14b87 Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Tue, 23 Dec 2025 02:40:42 -0600 Subject: [PATCH 13/14] Trust sequencer ordering instead of sorting on insert - Rename insert_transaction to push_transaction - Remove binary search insertion, just append in sequencer order - Update docs and test to reflect sequencer ordering is preserved --- crates/rpc/src/base/annotator.rs | 12 +++--- crates/rpc/src/base/cache.rs | 63 +++++++++++++++----------------- crates/rpc/src/base/estimator.rs | 16 ++++---- 3 files changed, 43 insertions(+), 48 deletions(-) diff --git a/crates/rpc/src/base/annotator.rs b/crates/rpc/src/base/annotator.rs index 07976777..aa3af289 100644 --- a/crates/rpc/src/base/annotator.rs +++ b/crates/rpc/src/base/annotator.rs @@ -117,7 +117,7 @@ impl ResourceAnnotator { let mut cache = self.cache.write(); for tx_hash in &event.ordered_tx_hashes { if let Some(tx) = self.pending_transactions.shift_remove(tx_hash) { - cache.insert_transaction(event.block_number, event.flashblock_index, tx); + cache.push_transaction(event.block_number, event.flashblock_index, tx); matched += 1; } else { missed += 1; @@ -201,9 +201,9 @@ mod tests { // Pre-populate cache with blocks 100, 101, 102 { let mut c = cache.write(); - c.insert_transaction(100, 0, test_tx(1, 10)); - c.insert_transaction(101, 0, test_tx(2, 20)); - c.insert_transaction(102, 0, test_tx(3, 30)); + c.push_transaction(100, 0, test_tx(1, 10)); + c.push_transaction(101, 0, test_tx(2, 20)); + c.push_transaction(102, 0, test_tx(3, 30)); } assert!(cache.read().contains_block(100)); @@ -234,7 +234,7 @@ mod tests { // Pre-populate cache with block 100 { let mut c = cache.write(); - c.insert_transaction(100, 0, test_tx(1, 10)); + c.push_transaction(100, 0, test_tx(1, 10)); } assert!(cache.read().contains_block(100)); @@ -261,7 +261,7 @@ mod tests { // Pre-populate cache with block 100 { let mut c = cache.write(); - c.insert_transaction(100, 0, test_tx(1, 10)); + c.push_transaction(100, 0, test_tx(1, 10)); } assert!(cache.read().contains_block(100)); diff --git a/crates/rpc/src/base/cache.rs b/crates/rpc/src/base/cache.rs index 0d925919..d72a9844 100644 --- a/crates/rpc/src/base/cache.rs +++ b/crates/rpc/src/base/cache.rs @@ -1,7 +1,7 @@ //! In-memory cache for metering data used by the priority fee estimator. //! -//! Transactions are stored sorted by priority fee (descending) so the estimator -//! can iterate from highest to lowest fee without re-sorting on each request. +//! Transactions are stored in sequencer order (highest priority fee first) as received +//! from flashblock events. use std::collections::{BTreeMap, HashMap, VecDeque}; @@ -63,14 +63,14 @@ impl ResourceTotals { /// Metrics for a single flashblock within a block. /// -/// Transactions are stored sorted by priority fee in descending order (highest first). +/// Transactions are stored in sequencer order (highest priority fee first). #[derive(Debug)] pub struct FlashblockMetrics { /// Block number. pub block_number: u64, /// Flashblock index within the block. pub flashblock_index: u64, - /// Transactions sorted by priority fee descending. + /// Transactions in sequencer order. transactions: Vec, totals: ResourceTotals, } @@ -86,15 +86,10 @@ impl FlashblockMetrics { } } - /// Inserts a transaction, maintaining descending sort order by priority fee. - pub fn insert_transaction(&mut self, tx: MeteredTransaction) { + /// Appends a transaction, preserving sequencer order. + pub fn push_transaction(&mut self, tx: MeteredTransaction) { self.totals.accumulate(&tx); - // Binary search for insertion point (descending order) - let pos = self - .transactions - .binary_search_by(|probe| tx.priority_fee_per_gas.cmp(&probe.priority_fee_per_gas)) - .unwrap_or_else(|pos| pos); - self.transactions.insert(pos, tx); + self.transactions.push(tx); } /// Returns the resource totals for this flashblock. @@ -102,7 +97,7 @@ impl FlashblockMetrics { self.totals } - /// Returns transactions sorted by priority fee descending (highest first). + /// Returns transactions in sequencer order. pub fn transactions(&self) -> &[MeteredTransaction] { &self.transactions } @@ -213,8 +208,8 @@ impl MeteringCache { self.blocks.get_mut(*self.block_index.get(&block_number).unwrap()).unwrap() } - /// Inserts a transaction into the cache. - pub fn insert_transaction( + /// Appends a transaction to the cache, preserving sequencer order. + pub fn push_transaction( &mut self, block_number: u64, flashblock_index: u64, @@ -222,7 +217,7 @@ impl MeteringCache { ) { let block = self.block_mut(block_number); let (flashblock, _) = block.flashblock_mut(flashblock_index); - flashblock.insert_transaction(tx); + flashblock.push_transaction(tx); block.recompute_totals(); } @@ -308,7 +303,7 @@ mod tests { fn insert_and_retrieve_transactions() { let mut cache = MeteringCache::new(12); let tx1 = test_tx(1, 2); - cache.insert_transaction(100, 0, tx1.clone()); + cache.push_transaction(100, 0, tx1.clone()); let block = cache.block(100).unwrap(); let flashblock = block.flashblocks().next().unwrap(); @@ -317,18 +312,18 @@ mod tests { } #[test] - fn transactions_sorted_descending_by_priority_fee() { + fn transactions_preserve_sequencer_order() { let mut cache = MeteringCache::new(12); - // Insert in random order - cache.insert_transaction(100, 0, test_tx(1, 10)); - cache.insert_transaction(100, 0, test_tx(2, 30)); - cache.insert_transaction(100, 0, test_tx(3, 20)); + // Insert in sequencer order (highest priority first) + cache.push_transaction(100, 0, test_tx(1, 30)); + cache.push_transaction(100, 0, test_tx(2, 20)); + cache.push_transaction(100, 0, test_tx(3, 10)); let block = cache.block(100).unwrap(); let flashblock = block.flashblocks().next().unwrap(); let fees: Vec<_> = flashblock.transactions().iter().map(|tx| tx.priority_fee_per_gas).collect(); - // Should be sorted descending: 30, 20, 10 + // Order should be preserved as inserted assert_eq!(fees, vec![U256::from(30u64), U256::from(20u64), U256::from(10u64)]); } @@ -336,7 +331,7 @@ mod tests { fn evicts_old_blocks() { let mut cache = MeteringCache::new(2); for block_number in 0..3u64 { - cache.insert_transaction(block_number, 0, test_tx(block_number, block_number)); + cache.push_transaction(block_number, 0, test_tx(block_number, block_number)); } assert!(cache.block(0).is_none()); assert!(cache.block(1).is_some()); @@ -346,8 +341,8 @@ mod tests { #[test] fn contains_block_returns_correct_values() { let mut cache = MeteringCache::new(10); - cache.insert_transaction(100, 0, test_tx(1, 10)); - cache.insert_transaction(101, 0, test_tx(2, 20)); + cache.push_transaction(100, 0, test_tx(1, 10)); + cache.push_transaction(101, 0, test_tx(2, 20)); assert!(cache.contains_block(100)); assert!(cache.contains_block(101)); @@ -358,9 +353,9 @@ mod tests { #[test] fn clear_blocks_from_clears_subsequent_blocks() { let mut cache = MeteringCache::new(10); - cache.insert_transaction(100, 0, test_tx(1, 10)); - cache.insert_transaction(101, 0, test_tx(2, 20)); - cache.insert_transaction(102, 0, test_tx(3, 30)); + cache.push_transaction(100, 0, test_tx(1, 10)); + cache.push_transaction(101, 0, test_tx(2, 20)); + cache.push_transaction(102, 0, test_tx(3, 30)); let cleared = cache.clear_blocks_from(101); @@ -374,8 +369,8 @@ mod tests { #[test] fn clear_blocks_from_returns_zero_when_no_match() { let mut cache = MeteringCache::new(10); - cache.insert_transaction(100, 0, test_tx(1, 10)); - cache.insert_transaction(101, 0, test_tx(2, 20)); + cache.push_transaction(100, 0, test_tx(1, 10)); + cache.push_transaction(101, 0, test_tx(2, 20)); let cleared = cache.clear_blocks_from(200); @@ -386,9 +381,9 @@ mod tests { #[test] fn clear_blocks_from_clears_all_blocks() { let mut cache = MeteringCache::new(10); - cache.insert_transaction(100, 0, test_tx(1, 10)); - cache.insert_transaction(101, 0, test_tx(2, 20)); - cache.insert_transaction(102, 0, test_tx(3, 30)); + cache.push_transaction(100, 0, test_tx(1, 10)); + cache.push_transaction(101, 0, test_tx(2, 20)); + cache.push_transaction(102, 0, test_tx(3, 30)); let cleared = cache.clear_blocks_from(100); diff --git a/crates/rpc/src/base/estimator.rs b/crates/rpc/src/base/estimator.rs index fc87e8cb..4502d614 100644 --- a/crates/rpc/src/base/estimator.rs +++ b/crates/rpc/src/base/estimator.rs @@ -836,8 +836,8 @@ mod tests { let (cache, estimator) = setup_estimator(DEFAULT_LIMITS); { let mut guard = cache.write(); - guard.insert_transaction(1, 0, tx(10, 10)); - guard.insert_transaction(1, 0, tx(5, 10)); + guard.push_transaction(1, 0, tx(10, 10)); + guard.push_transaction(1, 0, tx(5, 10)); } let mut demand = ResourceDemand::default(); demand.gas_used = Some(15); @@ -857,8 +857,8 @@ mod tests { let (cache, estimator) = setup_estimator(limits); { let mut guard = cache.write(); - guard.insert_transaction(1, 0, tx(10, 10)); - guard.insert_transaction(1, 0, tx(5, 10)); + guard.push_transaction(1, 0, tx(10, 10)); + guard.push_transaction(1, 0, tx(5, 10)); } let mut demand = ResourceDemand::default(); demand.gas_used = Some(15); @@ -882,11 +882,11 @@ mod tests { { let mut guard = cache.write(); // Block 1 → threshold 10 - guard.insert_transaction(1, 0, tx(10, 10)); - guard.insert_transaction(1, 0, tx(5, 10)); + guard.push_transaction(1, 0, tx(10, 10)); + guard.push_transaction(1, 0, tx(5, 10)); // Block 2 → threshold 30 - guard.insert_transaction(2, 0, tx(30, 10)); - guard.insert_transaction(2, 0, tx(25, 10)); + guard.push_transaction(2, 0, tx(30, 10)); + guard.push_transaction(2, 0, tx(25, 10)); } let mut demand = ResourceDemand::default(); From 39463060fa3d479a0597d24d6fe93bc616565745 Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Tue, 23 Dec 2025 10:18:53 -0600 Subject: [PATCH 14/14] Fix nightly clippy warnings in metering pipeline - Add const fn annotations where possible (cache, estimator, runner) - Add Debug impls for ResourceAnnotator and KafkaBundleConsumer - Remove redundant clones in meter_rpc and runner - Change pub mod to pub(crate) mod for internal modules - Replace redundant closure with function reference - Remove unused cache field from MeteringRuntime --- crates/rpc/src/base/annotator.rs | 10 +++++++- crates/rpc/src/base/cache.rs | 14 +++++------ crates/rpc/src/base/estimator.rs | 38 ++++++++++++++--------------- crates/rpc/src/base/kafka.rs | 8 +++++- crates/rpc/src/base/meter_rpc.rs | 4 +-- crates/rpc/src/base/mod.rs | 8 +++--- crates/runner/src/extensions/rpc.rs | 14 +++-------- 7 files changed, 51 insertions(+), 45 deletions(-) diff --git a/crates/rpc/src/base/annotator.rs b/crates/rpc/src/base/annotator.rs index aa3af289..d61a9fa5 100644 --- a/crates/rpc/src/base/annotator.rs +++ b/crates/rpc/src/base/annotator.rs @@ -1,6 +1,6 @@ //! Resource annotator that correlates Kafka metering data with flashblock inclusions. -use std::sync::Arc; +use std::{fmt, sync::Arc}; use alloy_primitives::TxHash; use parking_lot::RwLock; @@ -40,6 +40,14 @@ pub struct ResourceAnnotator { pending_transactions: indexmap::IndexMap, } +impl fmt::Debug for ResourceAnnotator { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ResourceAnnotator") + .field("pending_transactions", &self.pending_transactions.len()) + .finish_non_exhaustive() + } +} + impl ResourceAnnotator { /// Creates a new resource annotator. pub fn new( diff --git a/crates/rpc/src/base/cache.rs b/crates/rpc/src/base/cache.rs index d72a9844..59aae123 100644 --- a/crates/rpc/src/base/cache.rs +++ b/crates/rpc/src/base/cache.rs @@ -26,7 +26,7 @@ pub struct MeteredTransaction { impl MeteredTransaction { /// Creates a zeroed transaction (placeholder with no resource usage). - pub fn zeroed(tx_hash: B256) -> Self { + pub const fn zeroed(tx_hash: B256) -> Self { Self { tx_hash, priority_fee_per_gas: U256::ZERO, @@ -52,7 +52,7 @@ pub struct ResourceTotals { } impl ResourceTotals { - fn accumulate(&mut self, tx: &MeteredTransaction) { + const fn accumulate(&mut self, tx: &MeteredTransaction) { self.gas_used = self.gas_used.saturating_add(tx.gas_used); self.execution_time_us = self.execution_time_us.saturating_add(tx.execution_time_us); self.state_root_time_us = self.state_root_time_us.saturating_add(tx.state_root_time_us); @@ -93,7 +93,7 @@ impl FlashblockMetrics { } /// Returns the resource totals for this flashblock. - pub fn totals(&self) -> ResourceTotals { + pub const fn totals(&self) -> ResourceTotals { self.totals } @@ -103,12 +103,12 @@ impl FlashblockMetrics { } /// Returns the number of transactions. - pub fn len(&self) -> usize { + pub const fn len(&self) -> usize { self.transactions.len() } /// Returns true if empty. - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.transactions.is_empty() } } @@ -150,7 +150,7 @@ impl BlockMetrics { } /// Returns the resource totals for this block. - pub fn totals(&self) -> ResourceTotals { + pub const fn totals(&self) -> ResourceTotals { self.totals } @@ -184,7 +184,7 @@ impl MeteringCache { } /// Returns the maximum number of blocks retained. - pub fn max_blocks(&self) -> usize { + pub const fn max_blocks(&self) -> usize { self.max_blocks } diff --git a/crates/rpc/src/base/estimator.rs b/crates/rpc/src/base/estimator.rs index 4502d614..e6e29677 100644 --- a/crates/rpc/src/base/estimator.rs +++ b/crates/rpc/src/base/estimator.rs @@ -84,7 +84,7 @@ pub enum ResourceKind { impl ResourceKind { /// Returns all resource kinds in a fixed order. - pub fn all() -> [Self; 4] { + pub const fn all() -> [Self; 4] { [Self::GasUsed, Self::ExecutionTime, Self::StateRootTime, Self::DataAvailability] } @@ -98,7 +98,7 @@ impl ResourceKind { /// /// Other resources like gas and DA bytes are bounded per-block but are /// evaluated per-flashblock since their limits apply independently. - fn use_it_or_lose_it(self) -> bool { + const fn use_it_or_lose_it(self) -> bool { matches!(self, Self::ExecutionTime) } @@ -185,7 +185,7 @@ pub struct ResourceEstimates { impl ResourceEstimates { /// Returns the estimate for the given resource kind. - pub fn get(&self, kind: ResourceKind) -> Option<&ResourceEstimate> { + pub const fn get(&self, kind: ResourceKind) -> Option<&ResourceEstimate> { match kind { ResourceKind::GasUsed => self.gas_used.as_ref(), ResourceKind::ExecutionTime => self.execution_time.as_ref(), @@ -195,7 +195,7 @@ impl ResourceEstimates { } /// Sets the estimate for the given resource kind. - pub fn set(&mut self, kind: ResourceKind, estimate: ResourceEstimate) { + pub const fn set(&mut self, kind: ResourceKind, estimate: ResourceEstimate) { match kind { ResourceKind::GasUsed => self.gas_used = Some(estimate), ResourceKind::ExecutionTime => self.execution_time = Some(estimate), @@ -278,7 +278,7 @@ impl PriorityFeeEstimator { /// - `limits`: Configured resource capacity limits. /// - `default_priority_fee`: Fee to return when a resource is not congested. /// - `da_config`: Optional shared DA config for dynamic DA limit updates. - pub fn new( + pub const fn new( cache: Arc>, percentile: f64, limits: ResourceLimits, @@ -318,10 +318,8 @@ impl PriorityFeeEstimator { demand: ResourceDemand, ) -> Result, EstimateError> { let cache_guard = self.cache.read(); - let block_metrics = match block_number { - Some(target) => cache_guard.block(target), - None => cache_guard.blocks_desc().next(), - }; + let block_metrics = block_number + .map_or_else(|| cache_guard.blocks_desc().next(), |target| cache_guard.block(target)); let Some(block_metrics) = block_metrics else { return Ok(None); }; @@ -555,8 +553,15 @@ fn compute_estimate( }); } - let (supporting_count, threshold_fee, recommended_fee) = match last_included_idx { - Some(idx) => { + let (supporting_count, threshold_fee, recommended_fee) = last_included_idx.map_or_else( + || { + // No transactions fit - even the first transaction would crowd out + // the bundle. The bundle must beat the highest fee to be included. + // Report 0 supporting transactions since none were actually included. + let threshold_fee = transactions[0].priority_fee_per_gas; + (0, threshold_fee, threshold_fee) + }, + |idx| { // At least one transaction fits alongside the bundle. // The threshold is the fee of the last included transaction. let threshold_fee = transactions[idx].priority_fee_per_gas; @@ -574,15 +579,8 @@ fn compute_estimate( }; (idx + 1, threshold_fee, recommended_fee) - } - None => { - // No transactions fit - even the first transaction would crowd out - // the bundle. The bundle must beat the highest fee to be included. - // Report 0 supporting transactions since none were actually included. - let threshold_fee = transactions[0].priority_fee_per_gas; - (0, threshold_fee, threshold_fee) - } - }; + }, + ); Ok(ResourceEstimate { threshold_priority_fee: threshold_fee, diff --git a/crates/rpc/src/base/kafka.rs b/crates/rpc/src/base/kafka.rs index 72e52b0f..f6e6805a 100644 --- a/crates/rpc/src/base/kafka.rs +++ b/crates/rpc/src/base/kafka.rs @@ -1,6 +1,6 @@ //! Kafka consumer for accepted bundle events. -use std::time::Duration; +use std::{fmt, time::Duration}; use alloy_consensus::{Transaction, transaction::Recovered}; use alloy_eips::Encodable2718; @@ -38,6 +38,12 @@ pub struct KafkaBundleConsumer { topic: String, } +impl fmt::Debug for KafkaBundleConsumer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("KafkaBundleConsumer").field("topic", &self.topic).finish_non_exhaustive() + } +} + impl KafkaBundleConsumer { /// Creates a new Kafka bundle consumer. pub fn new( diff --git a/crates/rpc/src/base/meter_rpc.rs b/crates/rpc/src/base/meter_rpc.rs index 4c0ecd14..45181ff2 100644 --- a/crates/rpc/src/base/meter_rpc.rs +++ b/crates/rpc/src/base/meter_rpc.rs @@ -42,7 +42,7 @@ where } /// Creates a new instance of MeteringApi with priority fee estimation enabled. - pub fn with_estimator( + pub const fn with_estimator( provider: Provider, priority_fee_estimator: Arc, ) -> Self { @@ -100,7 +100,7 @@ where ) })?; - let chain_spec = self.provider.chain_spec().clone(); + let chain_spec = self.provider.chain_spec(); let (results, total_gas_used, total_gas_fees, bundle_hash, total_execution_time) = meter_bundle(state_provider, chain_spec, parsed_bundle, &header).map_err(|e| { diff --git a/crates/rpc/src/base/mod.rs b/crates/rpc/src/base/mod.rs index e6dd892a..772cdbce 100644 --- a/crates/rpc/src/base/mod.rs +++ b/crates/rpc/src/base/mod.rs @@ -1,7 +1,7 @@ -pub mod annotator; -pub mod cache; -pub mod estimator; -pub mod kafka; +pub(crate) mod annotator; +pub(crate) mod cache; +pub(crate) mod estimator; +pub(crate) mod kafka; pub(crate) mod meter; pub(crate) mod meter_rpc; pub(crate) mod pubsub; diff --git a/crates/runner/src/extensions/rpc.rs b/crates/runner/src/extensions/rpc.rs index 67573078..90b3ae10 100644 --- a/crates/runner/src/extensions/rpc.rs +++ b/crates/runner/src/extensions/rpc.rs @@ -26,8 +26,6 @@ use crate::{ /// Runtime state for the metering pipeline. #[derive(Clone)] struct MeteringRuntime { - /// Shared cache for metered transactions. - cache: Arc>, /// Priority fee estimator. estimator: Arc, /// Sender for metered transactions from Kafka. @@ -45,7 +43,7 @@ struct CompositeFlashblocksReceiver { } impl CompositeFlashblocksReceiver { - fn new( + const fn new( state: Arc>, metering_sender: Option>, ) -> Self { @@ -84,8 +82,7 @@ fn flashblock_inclusion_from_flashblock(flashblock: &Flashblock) -> Option = - flashblock.diff.transactions.iter().map(|tx_bytes| keccak256(tx_bytes)).collect(); + let ordered_tx_hashes: Vec = flashblock.diff.transactions.iter().map(keccak256).collect(); Some(FlashblockInclusion { block_number: flashblock.metadata.block_number, @@ -187,14 +184,11 @@ impl BaseNodeExtension for BaseRpcExtension { mpsc::unbounded_channel::(); // Spawn the resource annotator - let annotator_cache = cache.clone(); tokio::spawn(async move { - ResourceAnnotator::new(annotator_cache, tx_receiver, flashblock_receiver) - .run() - .await; + ResourceAnnotator::new(cache, tx_receiver, flashblock_receiver).run().await; }); - Some(MeteringRuntime { cache, estimator, tx_sender, flashblock_sender }) + Some(MeteringRuntime { estimator, tx_sender, flashblock_sender }) } else { None };