diff --git a/Cargo.lock b/Cargo.lock
index f81b5e17..414adbc5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1050,6 +1050,12 @@ dependencies = [
"libc",
]
+[[package]]
+name = "anes"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
+
[[package]]
name = "anstream"
version = "1.0.0"
@@ -2456,6 +2462,12 @@ dependencies = [
"thiserror 2.0.18",
]
+[[package]]
+name = "cast"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
+
[[package]]
name = "castaway"
version = "0.2.4"
@@ -2581,6 +2593,33 @@ dependencies = [
"windows-link 0.2.1",
]
+[[package]]
+name = "ciborium"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
+dependencies = [
+ "ciborium-io",
+ "ciborium-ll",
+ "serde",
+]
+
+[[package]]
+name = "ciborium-io"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
+
+[[package]]
+name = "ciborium-ll"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
+dependencies = [
+ "ciborium-io",
+ "half",
+]
+
[[package]]
name = "cipher"
version = "0.4.4"
@@ -2935,6 +2974,42 @@ dependencies = [
"cfg-if",
]
+[[package]]
+name = "criterion"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
+dependencies = [
+ "anes",
+ "cast",
+ "ciborium",
+ "clap",
+ "criterion-plot",
+ "is-terminal",
+ "itertools 0.10.5",
+ "num-traits",
+ "once_cell",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate",
+ "walkdir",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
+dependencies = [
+ "cast",
+ "itertools 0.10.5",
+]
+
[[package]]
name = "critical-section"
version = "1.2.0"
@@ -3450,7 +3525,7 @@ dependencies = [
"libc",
"option-ext",
"redox_users 0.5.2",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -3788,7 +3863,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
dependencies = [
"libc",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -4445,6 +4520,17 @@ dependencies = [
"tracing",
]
+[[package]]
+name = "half"
+version = "2.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b"
+dependencies = [
+ "cfg-if",
+ "crunchy",
+ "zerocopy 0.8.48",
+]
+
[[package]]
name = "hash-db"
version = "0.15.2"
@@ -4811,7 +4897,7 @@ dependencies = [
"libc",
"percent-encoding",
"pin-project-lite",
- "socket2 0.5.10",
+ "socket2 0.6.3",
"system-configuration",
"tokio",
"tower-service",
@@ -5224,6 +5310,17 @@ dependencies = [
"serde",
]
+[[package]]
+name = "is-terminal"
+version = "0.4.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "windows-sys 0.61.2",
+]
+
[[package]]
name = "is_terminal_polyfill"
version = "1.70.2"
@@ -5627,9 +5724,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
[[package]]
name = "libc"
-version = "0.2.185"
+version = "0.2.186"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "52ff2c0fe9bc6cb6b14a0592c2ff4fa9ceb83eea9db979b0487cd054946a2b8f"
+checksum = "68ab91017fe16c622486840e4c83c9a37afeff978bd239b5293d61ece587de66"
[[package]]
name = "libgit2-sys"
@@ -6715,7 +6812,7 @@ version = "0.50.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
dependencies = [
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -6928,6 +7025,12 @@ version = "1.70.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe"
+[[package]]
+name = "oorandom"
+version = "11.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
+
[[package]]
name = "op-alloy"
version = "0.24.0"
@@ -7072,6 +7175,7 @@ dependencies = [
"chrono",
"clap",
"clap_builder",
+ "criterion",
"ctor",
"dashmap",
"derive_more",
@@ -7100,6 +7204,7 @@ dependencies = [
"opentelemetry",
"p2p",
"parking_lot",
+ "proptest",
"rand 0.9.4",
"reqwest 0.12.28",
"reth",
@@ -7143,6 +7248,7 @@ dependencies = [
"reth-tracing-otlp",
"reth-transaction-pool",
"reth-trie",
+ "reth-trie-db",
"revm",
"rlimit 0.10.2",
"secp256k1 0.30.0",
@@ -7674,6 +7780,34 @@ dependencies = [
"crunchy",
]
+[[package]]
+name = "plotters"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
+dependencies = [
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "plotters-backend"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
+
+[[package]]
+name = "plotters-svg"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
+dependencies = [
+ "plotters-backend",
+]
+
[[package]]
name = "polling"
version = "3.11.0"
@@ -8042,7 +8176,7 @@ dependencies = [
"quinn-udp",
"rustc-hash",
"rustls",
- "socket2 0.5.10",
+ "socket2 0.6.3",
"thiserror 2.0.18",
"tokio",
"tracing",
@@ -8080,9 +8214,9 @@ dependencies = [
"cfg_aliases",
"libc",
"once_cell",
- "socket2 0.5.10",
+ "socket2 0.6.3",
"tracing",
- "windows-sys 0.59.0",
+ "windows-sys 0.60.2",
]
[[package]]
@@ -11959,9 +12093,9 @@ dependencies = [
[[package]]
name = "ruint"
-version = "1.17.2"
+version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c141e807189ad38a07276942c6623032d3753c8859c146104ac2e4d68865945a"
+checksum = "0298da754d1395046b0afdc2f20ee76d29a8ae310cd30ffa84ed42acba9cb12a"
dependencies = [
"alloy-rlp",
"arbitrary",
@@ -12053,14 +12187,14 @@ dependencies = [
"errno",
"libc",
"linux-raw-sys",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
name = "rustls"
-version = "0.23.38"
+version = "0.23.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69f9466fb2c14ea04357e91413efb882e2a6d4a406e625449bc0a5d360d53a21"
+checksum = "7c2c118cb077cca2822033836dfb1b975355dfb784b5e8da48f7b6c5db74e60e"
dependencies = [
"aws-lc-rs",
"log",
@@ -12133,7 +12267,7 @@ dependencies = [
"security-framework",
"security-framework-sys",
"webpki-root-certs 1.0.7",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -13093,7 +13227,7 @@ dependencies = [
"getrandom 0.4.2",
"once_cell",
"rustix",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -13289,6 +13423,16 @@ dependencies = [
"zerovec",
]
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
[[package]]
name = "tinyvec"
version = "1.11.0"
@@ -14496,7 +14640,7 @@ version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
dependencies = [
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
diff --git a/crates/op-rbuilder/Cargo.toml b/crates/op-rbuilder/Cargo.toml
index e15c041b..022c70cc 100644
--- a/crates/op-rbuilder/Cargo.toml
+++ b/crates/op-rbuilder/Cargo.toml
@@ -155,6 +155,9 @@ testcontainers.workspace = true
nanoid.workspace = true
reth-ipc.workspace = true
reth-node-builder = { workspace = true, features = ["test-utils"] }
+reth-trie-db.workspace = true
+proptest = "1.10"
+criterion = { version = "0.5", features = ["html_reports"] }
ctor.workspace = true
rlimit.workspace = true
hyper.workspace = true
@@ -198,6 +201,10 @@ testing = [
telemetry = ["reth-tracing-otlp", "opentelemetry"]
loki = ["tracing-loki", "telemetry"]
+[[bench]]
+name = "bench_flashblocks_state_root"
+harness = false
+
[[bin]]
name = "op-rbuilder"
path = "src/bin/op-rbuilder/main.rs"
diff --git a/crates/op-rbuilder/benches/bench_flashblocks_state_root.rs b/crates/op-rbuilder/benches/bench_flashblocks_state_root.rs
new file mode 100644
index 00000000..9604bb00
--- /dev/null
+++ b/crates/op-rbuilder/benches/bench_flashblocks_state_root.rs
@@ -0,0 +1,295 @@
+//! Benchmark comparing flashblocks state root calculation with and without incremental trie caching.
+//!
+//! This benchmark simulates building 10 sequential flashblocks, measuring the total time
+//! spent in state root calculation. It uses `StateRootCalculator` — the same
+//! code path as the production payload builder — so results reflect real-world
+//! performance.
+//!
+//! It compares:
+//! - Without cache: Full state root calculation each time
+//! - With cache: Incremental using `IncrementalStateRootCalculator`
+//!
+//! Run with:
+//! ```
+//! cargo bench -p op-rbuilder --bench bench_flashblocks_state_root
+//! ```
+
+use alloy_primitives::{Address, B256, U256, keccak256};
+use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main};
+use op_rbuilder::builder::StateRootCalculator;
+use rand::{Rng, SeedableRng, rngs::StdRng};
+use reth_chainspec::MAINNET;
+use reth_primitives_traits::Account;
+use reth_provider::{
+ DatabaseProviderFactory, HashingWriter, LatestStateProvider,
+ test_utils::create_test_provider_factory_with_chain_spec,
+};
+use reth_trie::{HashedPostState, HashedStorage};
+use std::{collections::HashMap, time::Instant};
+
+const SEED: u64 = 42;
+
+type AccountList = Vec<(Address, Account)>;
+type StorageMap = HashMap
>;
+
+/// Generate random accounts and storage for initial database state
+fn generate_test_data(
+ num_accounts: usize,
+ storage_per_account: usize,
+ seed: u64,
+) -> (AccountList, StorageMap) {
+ let mut rng = StdRng::seed_from_u64(seed);
+ let mut accounts = Vec::with_capacity(num_accounts);
+ let mut storage = HashMap::new();
+
+ for _ in 0..num_accounts {
+ let mut addr_bytes = [0u8; 20];
+ rng.fill(&mut addr_bytes);
+ let address = Address::from_slice(&addr_bytes);
+
+ let account = Account {
+ nonce: rng.random_range(0..1000),
+ balance: U256::from(rng.random_range(0u64..1_000_000)),
+ bytecode_hash: if rng.random_bool(0.3) {
+ let mut hash = [0u8; 32];
+ rng.fill(&mut hash);
+ Some(B256::from(hash))
+ } else {
+ None
+ },
+ };
+ accounts.push((address, account));
+
+ if storage_per_account > 0 && rng.random_bool(0.5) {
+ let mut slots = Vec::with_capacity(storage_per_account);
+ for _ in 0..storage_per_account {
+ let mut key = [0u8; 32];
+ rng.fill(&mut key);
+ let value = U256::from(rng.random_range(1u64..1_000_000));
+ slots.push((B256::from(key), value));
+ }
+ storage.insert(address, slots);
+ }
+ }
+
+ (accounts, storage)
+}
+
+/// Setup test database with initial state
+fn setup_database(
+ accounts: &[(Address, Account)],
+ storage: &HashMap>,
+) -> reth_provider::providers::ProviderFactory {
+ let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone());
+
+ {
+ let provider_rw = provider_factory.provider_rw().unwrap();
+
+ let accounts_iter = accounts.iter().map(|(addr, acc)| (*addr, Some(*acc)));
+ provider_rw
+ .insert_account_for_hashing(accounts_iter)
+ .unwrap();
+
+ let storage_entries: Vec<_> = storage
+ .iter()
+ .map(|(addr, slots)| {
+ let entries: Vec<_> = slots
+ .iter()
+ .map(|(key, value)| reth_primitives_traits::StorageEntry {
+ key: *key,
+ value: *value,
+ })
+ .collect();
+ (*addr, entries)
+ })
+ .collect();
+ provider_rw
+ .insert_storage_for_hashing(storage_entries)
+ .unwrap();
+
+ provider_rw.commit().unwrap();
+ }
+
+ provider_factory
+}
+
+/// Generate a flashblock's worth of state changes
+fn generate_flashblock_changes(
+ base_accounts: &[(Address, Account)],
+ change_size: usize,
+ seed: u64,
+) -> (AccountList, StorageMap) {
+ let mut rng = StdRng::seed_from_u64(seed);
+ let mut accounts = Vec::with_capacity(change_size);
+ let mut storage = HashMap::new();
+
+ for i in 0..change_size {
+ let address = if i < base_accounts.len() && rng.random_bool(0.7) {
+ base_accounts[rng.random_range(0..base_accounts.len())].0
+ } else {
+ let mut addr_bytes = [0u8; 20];
+ rng.fill(&mut addr_bytes);
+ Address::from_slice(&addr_bytes)
+ };
+
+ let account = Account {
+ nonce: rng.random_range(1000..2000),
+ balance: U256::from(rng.random_range(1_000_000u64..2_000_000)),
+ bytecode_hash: None,
+ };
+ accounts.push((address, account));
+
+ if rng.random_bool(0.3) {
+ let mut slots = Vec::new();
+ for _ in 0..rng.random_range(1..10) {
+ let mut key = [0u8; 32];
+ rng.fill(&mut key);
+ let value = U256::from(rng.random_range(1u64..1_000_000));
+ slots.push((B256::from(key), value));
+ }
+ storage.insert(address, slots);
+ }
+ }
+
+ (accounts, storage)
+}
+
+/// Convert to HashedPostState for state root calculation
+fn to_hashed_post_state(
+ accounts: &[(Address, Account)],
+ storage: &HashMap>,
+) -> HashedPostState {
+ let hashed_accounts: Vec<_> = accounts
+ .iter()
+ .map(|(addr, acc)| (keccak256(addr), Some(*acc)))
+ .collect();
+
+ let mut hashed_storages = alloy_primitives::map::HashMap::default();
+ for (addr, slots) in storage {
+ let hashed_addr = keccak256(addr);
+ let hashed_storage = HashedStorage::from_iter(
+ false,
+ slots.iter().map(|(key, value)| (keccak256(key), *value)),
+ );
+ hashed_storages.insert(hashed_addr, hashed_storage);
+ }
+
+ HashedPostState {
+ accounts: hashed_accounts.into_iter().collect(),
+ storages: hashed_storages,
+ }
+}
+
+/// Benchmark without incremental trie cache (baseline — fresh calculator each flashblock)
+fn bench_without_cache(
+ provider_factory: &reth_provider::providers::ProviderFactory<
+ reth_provider::test_utils::MockNodeTypesWithDB,
+ >,
+ flashblock_changes: &[HashedPostState],
+) -> (u128, Vec) {
+ let mut individual_times = Vec::new();
+ let total_start = Instant::now();
+
+ for hashed_state in flashblock_changes {
+ let fb_start = Instant::now();
+ let provider = provider_factory.database_provider_ro().unwrap();
+ let latest = LatestStateProvider::new(provider);
+ let output = StateRootCalculator::new(false)
+ .compute(&latest, hashed_state.clone())
+ .unwrap();
+ individual_times.push(fb_start.elapsed().as_micros());
+ black_box(output.state_root);
+ }
+
+ (total_start.elapsed().as_micros(), individual_times)
+}
+
+/// Benchmark with incremental trie cache (single calculator across all flashblocks)
+fn bench_with_cache(
+ provider_factory: &reth_provider::providers::ProviderFactory<
+ reth_provider::test_utils::MockNodeTypesWithDB,
+ >,
+ flashblock_changes: &[HashedPostState],
+) -> (u128, Vec) {
+ let mut individual_times = Vec::new();
+ let mut calc = StateRootCalculator::new(true);
+ let total_start = Instant::now();
+
+ for hashed_state in flashblock_changes {
+ let fb_start = Instant::now();
+ let provider = provider_factory.database_provider_ro().unwrap();
+ let latest = LatestStateProvider::new(provider);
+
+ let output = calc.compute(&latest, hashed_state.clone()).unwrap();
+
+ individual_times.push(fb_start.elapsed().as_micros());
+ black_box(output.state_root);
+ }
+
+ (total_start.elapsed().as_micros(), individual_times)
+}
+
+fn bench_flashblocks_state_root(c: &mut Criterion) {
+ // Setup: Create a large database with 50k accounts, 10 storage slots each
+ eprintln!("\n=== Setting up database with 50,000 accounts...");
+ let (base_accounts, base_storage) = generate_test_data(50_000, 10, SEED);
+ let provider_factory = setup_database(&base_accounts, &base_storage);
+ eprintln!("Database setup complete\n");
+
+ // Test different flashblock sizes (transactions per flashblock)
+ for txs_per_flashblock in [50, 100, 200] {
+ let mut group = c.benchmark_group(format!("flashblocks_{}_txs", txs_per_flashblock));
+ group.sample_size(10);
+
+ eprintln!(
+ "--- Testing with {} transactions per flashblock ---",
+ txs_per_flashblock
+ );
+
+ // Generate 10 flashblocks worth of changes
+ let mut flashblock_changes = Vec::new();
+ for i in 0..10 {
+ let (accounts, storage) =
+ generate_flashblock_changes(&base_accounts, txs_per_flashblock, SEED + i + 1);
+ let hashed_state = to_hashed_post_state(&accounts, &storage);
+ flashblock_changes.push(hashed_state);
+ }
+
+ // Benchmark without cache (baseline)
+ group.bench_function(BenchmarkId::new("without_cache", "10_flashblocks"), |b| {
+ b.iter(|| bench_without_cache(&provider_factory, &flashblock_changes))
+ });
+
+ // Benchmark with incremental cache
+ group.bench_function(BenchmarkId::new("with_cache", "10_flashblocks"), |b| {
+ b.iter(|| bench_with_cache(&provider_factory, &flashblock_changes))
+ });
+
+ // Manual comparison run for detailed output
+ eprintln!("\nManual timing comparison:");
+ let (total_without, times_without) =
+ bench_without_cache(&provider_factory, &flashblock_changes);
+ eprintln!(" WITHOUT cache: {} us total", total_without);
+ eprintln!(" Per-flashblock: {:?} us", times_without);
+
+ let (total_with, times_with) = bench_with_cache(&provider_factory, &flashblock_changes);
+ eprintln!(" WITH cache: {} us total", total_with);
+ eprintln!(" Per-flashblock: {:?} us", times_with);
+
+ let speedup = total_without as f64 / total_with as f64;
+ let improvement = ((total_without - total_with) as f64 / total_without as f64) * 100.0;
+ eprintln!(
+ " Cache speedup: {:.2}x ({:.1}% faster)",
+ speedup, improvement
+ );
+ eprintln!();
+
+ group.finish();
+ }
+
+ eprintln!("\n=== Benchmark complete! ===");
+ eprintln!("Results saved to target/criterion/");
+}
+
+criterion_group!(benches, bench_flashblocks_state_root);
+criterion_main!(benches);
diff --git a/crates/op-rbuilder/src/builder/mod.rs b/crates/op-rbuilder/src/builder/mod.rs
index da485518..25a4c783 100644
--- a/crates/op-rbuilder/src/builder/mod.rs
+++ b/crates/op-rbuilder/src/builder/mod.rs
@@ -20,6 +20,7 @@ mod p2p;
mod payload;
mod payload_handler;
mod service;
+mod state_root;
mod syncer_ctx;
mod timing;
mod wspub;
@@ -31,6 +32,7 @@ pub use builder_tx::{
pub use config::FlashblocksConfig;
pub use context::OpPayloadBuilderCtx;
pub use service::FlashblocksServiceBuilder;
+pub use state_root::StateRootCalculator;
/// Configuration values that are applicable to any type of block builder.
#[derive(Debug, Clone)]
diff --git a/crates/op-rbuilder/src/builder/payload.rs b/crates/op-rbuilder/src/builder/payload.rs
index df82ed2d..628d94b8 100644
--- a/crates/op-rbuilder/src/builder/payload.rs
+++ b/crates/op-rbuilder/src/builder/payload.rs
@@ -1,4 +1,4 @@
-use super::wspub::WebSocketPublisher;
+use super::{state_root::StateRootCalculator, wspub::WebSocketPublisher};
use crate::{
backrun_bundle::BackrunBundlesPayloadCtx,
builder::{
@@ -52,7 +52,7 @@ use reth_revm::{
};
use reth_tasks::Runtime;
use reth_transaction_pool::TransactionPool;
-use reth_trie::{HashedPostState, TrieInput, updates::TrieUpdates};
+use reth_trie::{HashedPostState, updates::TrieUpdates};
use revm::Database;
use std::{collections::BTreeMap, ops::Deref, sync::Arc, time::Instant};
use tokio::sync::{mpsc, watch};
@@ -112,14 +112,12 @@ pub(super) struct FlashblocksState {
da_footprint_per_batch: Option,
/// Whether to disable state root calculation for each flashblock
disable_state_root: bool,
- /// Whether to enable incremental state root calculation using cached trie nodes
- enable_incremental_state_root: bool,
/// Index into ExecutionInfo tracking the last consumed flashblock
/// Used for slicing transactions/receipts per flashblock
last_flashblock_tx_index: usize,
- /// Cached trie updates from previous flashblock for incremental state root calculation.
- /// None only for the first flashblock; populated after each subsequent state root calculation.
- prev_trie_updates: Option>,
+ /// State root calculator. Manages cached trie updates and cumulative prefix
+ /// sets across flashblocks when incremental mode is enabled.
+ state_root_calculator: StateRootCalculator,
}
struct FallbackBuildOutput {
@@ -151,7 +149,7 @@ impl FlashblocksState {
Self {
target_flashblock_count,
disable_state_root,
- enable_incremental_state_root,
+ state_root_calculator: StateRootCalculator::new(enable_incremental_state_root),
..Default::default()
}
}
@@ -173,9 +171,8 @@ impl FlashblocksState {
da_per_batch: self.da_per_batch,
da_footprint_per_batch: self.da_footprint_per_batch,
disable_state_root: self.disable_state_root,
- enable_incremental_state_root: self.enable_incremental_state_root,
last_flashblock_tx_index: self.last_flashblock_tx_index,
- prev_trie_updates: self.prev_trie_updates.clone(),
+ state_root_calculator: self.state_root_calculator.clone(),
}
}
@@ -1306,7 +1303,7 @@ where
pub(super) fn build_block(
state: &mut State,
ctx: &OpPayloadBuilderCtx,
- fb_state: Option<&mut FlashblocksState>,
+ mut fb_state: Option<&mut FlashblocksState>,
info: &mut ExecutionInfo,
calculate_state_root: bool,
enable_tx_tracking_debug_logs: bool,
@@ -1376,13 +1373,6 @@ where
let _state_root_span = span!(Level::INFO, "state_root").entered();
let state_provider = state.database.as_ref();
- // prev_trie_updates is None for the first flashblock.
- let enable_incremental = fb_state
- .as_deref()
- .is_some_and(|s| s.enable_incremental_state_root);
- let prev_trie = fb_state
- .as_deref()
- .and_then(|s| s.prev_trie_updates.clone());
let flashblock_index = fb_state
.as_deref()
.map(|s| s.flashblock_index())
@@ -1390,50 +1380,32 @@ where
hashed_state = state_provider.hashed_post_state(&state.bundle_state);
- let trie_output;
- (state_root, trie_output) = if let Some(prev_trie) = prev_trie
- && enable_incremental
- {
- // Incremental path: Use cached trie from previous flashblock
- debug!(
- target: "payload_builder",
- flashblock_index,
- "Using incremental state root calculation with cached trie"
- );
-
- let trie_input = TrieInput::new(
- (*prev_trie).clone(),
- hashed_state.clone(),
- hashed_state.construct_prefix_sets(),
- );
-
- state_provider
- .state_root_from_nodes_with_updates(trie_input)
- .map_err(PayloadBuilderError::other)?
- } else {
- debug!(
- target: "payload_builder",
- flashblock_index,
- "Using full state root calculation"
- );
-
- state
- .database
- .as_ref()
- .state_root_with_updates(hashed_state.clone())
- .inspect_err(|err| {
- warn!(
- target: "payload_builder",
- parent_header=%ctx.parent().hash(),
- %err,
- "failed to calculate state root for payload"
- );
- })?
+ let mut default_calc = StateRootCalculator::default();
+ let calc = match fb_state.as_deref_mut() {
+ Some(s) => &mut s.state_root_calculator,
+ None => &mut default_calc,
};
- // Cache trie updates to apply in fb_state later (avoids mut on fb_state parameter).
- // Wrap in Arc once so the same allocation is reused for both `executed` and fb_state.
- trie_updates_to_cache = Some(Arc::new(trie_output));
+ debug!(
+ target: "payload_builder",
+ flashblock_index,
+ incremental = calc.has_cached_trie(),
+ "Computing state root"
+ );
+
+ let output = calc
+ .compute(state_provider, hashed_state.clone())
+ .inspect_err(|err| {
+ warn!(
+ target: "payload_builder",
+ parent_header=%ctx.parent().hash(),
+ %err,
+ "failed to calculate state root for payload"
+ );
+ })
+ .map_err(PayloadBuilderError::other)?;
+ state_root = output.state_root;
+ trie_updates_to_cache = Some(output.trie_updates);
let state_root_calculation_time = state_root_start_time.elapsed();
ctx.metrics
@@ -1457,7 +1429,7 @@ where
block_number = ctx.block_number(),
flashblock_index = flashblock_index_for_trace,
duration_ms = state_root_calculation_time.as_millis() as u64,
- incremental = fb_state.as_deref().and_then(|s| s.prev_trie_updates.as_ref()).is_some(),
+ incremental = fb_state.as_deref().is_some_and(|s| s.state_root_calculator.has_cached_trie()),
cumulative_gas = info.cumulative_gas_used,
num_txs = info.executed_transactions.len(),
stage = "state_root_computed"
@@ -1555,9 +1527,7 @@ where
recovered_block: Arc::new(recovered_block),
execution_output: Arc::new(execution_output),
trie_updates: either::Either::Left(
- trie_updates_to_cache
- .clone()
- .unwrap_or_else(|| Arc::new(TrieUpdates::default())),
+ trie_updates_to_cache.unwrap_or_else(|| Arc::new(TrieUpdates::default())),
),
hashed_state: either::Either::Left(Arc::new(hashed_state)),
};
@@ -1598,9 +1568,6 @@ where
// pick the new transactions from the info field and update the last flashblock index
let (new_transactions, new_receipts) = if let Some(fb_state) = fb_state {
- if let Some(updates) = trie_updates_to_cache.take() {
- fb_state.prev_trie_updates = Some(updates);
- }
let new_txs = fb_state.slice_new_transactions(&info.executed_transactions);
let new_receipts = fb_state.slice_new_receipts(&info.receipts);
fb_state.set_last_flashblock_tx_index(info.executed_transactions.len());
diff --git a/crates/op-rbuilder/src/builder/state_root.rs b/crates/op-rbuilder/src/builder/state_root.rs
new file mode 100644
index 00000000..d4bab636
--- /dev/null
+++ b/crates/op-rbuilder/src/builder/state_root.rs
@@ -0,0 +1,369 @@
+//! State root computation for flashblocks.
+//!
+//! [`StateRootCalculator`] manages state root computation across a sequence of
+//! flashblocks. The first call computes from scratch; subsequent calls use the
+//! cached trie for incremental computation.
+
+use alloy_primitives::B256;
+use reth_provider::{ProviderError, StateRootProvider};
+use reth_trie::{HashedPostState, TrieInput, prefix_set::TriePrefixSetsMut, updates::TrieUpdates};
+use std::sync::Arc;
+
+/// Output of [`StateRootCalculator::compute`].
+pub struct StateRootOutput {
+ /// The computed state root hash.
+ pub state_root: B256,
+ /// Trie updates (shared with the calculator's internal cache).
+ pub trie_updates: Arc,
+}
+
+/// Manages state root computation across flashblocks.
+///
+/// When `incremental` is true, caches trie updates and cumulative prefix sets
+/// so that each successive flashblock's state root can be computed
+/// incrementally. The first call always computes from scratch; subsequent
+/// calls reuse the cached trie.
+///
+/// When `incremental` is false, every call computes from scratch (no caching).
+///
+/// When computing incrementally, current prefix sets are extended with
+/// cumulative prefix sets from all prior flashblocks so the trie walker
+/// re-visits every previously modified path — preventing stale cached hashes
+/// from reverted storage slots.
+#[derive(Clone, Debug, Default)]
+pub struct StateRootCalculator {
+ incremental: bool,
+ prev_trie_updates: Option>,
+ cumulative_prefix_sets: Option,
+}
+
+impl StateRootCalculator {
+ pub fn new(incremental: bool) -> Self {
+ Self {
+ incremental,
+ prev_trie_updates: None,
+ cumulative_prefix_sets: None,
+ }
+ }
+
+ /// Whether the next [`Self::compute`] call will use cached trie state.
+ pub fn has_cached_trie(&self) -> bool {
+ self.prev_trie_updates.is_some()
+ }
+
+ /// Compute the state root, using the incremental path if a prior trie is cached.
+ ///
+ /// Updates internal state so the next call can build on this result.
+ pub fn compute(
+ &mut self,
+ state_provider: &(impl StateRootProvider + ?Sized),
+ hashed_state: HashedPostState,
+ ) -> Result {
+ if !self.incremental {
+ let (state_root, trie_updates) =
+ state_provider.state_root_with_updates(hashed_state)?;
+ return Ok(StateRootOutput {
+ state_root,
+ trie_updates: Arc::new(trie_updates),
+ });
+ }
+
+ // Incremental path: build cumulative prefix sets (seed on the first
+ // call, extend on subsequent calls) so reverted slots in a later
+ // flashblock force the walker to re-visit previously modified
+ // subtrees and invalidate their stale cached hashes.
+ let mut prefix_sets = hashed_state.construct_prefix_sets();
+ if let Some(prev_sets) = self.cumulative_prefix_sets.take() {
+ prefix_sets.extend(prev_sets);
+ }
+ let cumulative = prefix_sets.clone();
+
+ let (state_root, trie_updates) = if let Some(prev_trie) = &self.prev_trie_updates {
+ let trie_input = TrieInput::new(prev_trie.as_ref().clone(), hashed_state, prefix_sets);
+ state_provider.state_root_from_nodes_with_updates(trie_input)?
+ } else {
+ // First call: full computation that seeds the cache for subsequent calls.
+ state_provider.state_root_with_updates(hashed_state)?
+ };
+
+ let trie_updates = Arc::new(trie_updates);
+ self.prev_trie_updates = Some(Arc::clone(&trie_updates));
+ self.cumulative_prefix_sets = Some(cumulative);
+
+ Ok(StateRootOutput {
+ state_root,
+ trie_updates,
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use alloy_primitives::{U256, keccak256};
+ use proptest::prelude::*;
+ use reth_db::{tables, transaction::DbTxMut};
+ use reth_primitives_traits::{Account, StorageEntry};
+ use reth_provider::{
+ DatabaseProviderFactory, LatestStateProvider, StorageTrieWriter, TrieWriter,
+ test_utils::create_test_provider_factory,
+ };
+ use reth_trie::{HashedStorage, StateRoot, StorageRoot};
+ use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot, LegacyKeyAdapter};
+
+ type InitialAccount = (B256, Account, Vec<(B256, U256)>);
+
+ /// Helper: insert an account and its storage into the DB.
+ fn insert_account(
+ tx: &impl DbTxMut,
+ hashed_address: B256,
+ account: Account,
+ storage: &[(B256, U256)],
+ ) {
+ tx.put::(hashed_address, account)
+ .unwrap();
+ for &(key, value) in storage {
+ tx.put::(hashed_address, StorageEntry { key, value })
+ .unwrap();
+ }
+ }
+
+ /// Simulates two flashblocks and returns (full_root, incremental_root).
+ ///
+ /// When `populate_trie` is true, the DB is seeded with branch nodes from a
+ /// prior trie computation (mimicking a node that has been running for a
+ /// while). When false, only hashed account/storage rows are inserted.
+ fn simulate_flashblocks(
+ initial_accounts: &[InitialAccount],
+ fb1_state: HashedPostState,
+ fb2_cumulative_state: HashedPostState,
+ populate_trie: bool,
+ ) -> (B256, B256) {
+ let factory = create_test_provider_factory();
+ let tx = factory.provider_rw().unwrap();
+
+ for (hashed_address, account, storage) in initial_accounts {
+ insert_account(tx.tx_ref(), *hashed_address, *account, storage);
+ }
+
+ if populate_trie {
+ // Populate storage trie tables.
+ // reth v2.0.0 parameterised StorageRoot over a TrieTableAdapter
+ // (LegacyKeyAdapter | PackedKeyAdapter) to support v1 and v2
+ // storage layouts. LegacyKeyAdapter matches pre-v2 chains, which
+ // is what these tests construct (no storage-settings migration).
+ for (hashed_address, _, _) in initial_accounts {
+ let (_, _, storage_updates) =
+ ,
+ reth_trie_db::DatabaseHashedCursorFactory<_>,
+ >>::from_tx_hashed(tx.tx_ref(), *hashed_address)
+ .root_with_updates()
+ .unwrap();
+ let sorted_updates = storage_updates.into_sorted();
+ tx.write_storage_trie_updates_sorted(core::iter::once((
+ hashed_address,
+ &sorted_updates,
+ )))
+ .unwrap();
+ }
+
+ // Populate account trie table — same LegacyKeyAdapter choice as
+ // the storage trie above.
+ let (_initial_root, account_trie_updates) = ,
+ reth_trie_db::DatabaseHashedCursorFactory<_>,
+ >>::from_tx(tx.tx_ref())
+ .root_with_updates()
+ .unwrap();
+ tx.write_trie_updates(account_trie_updates).unwrap();
+ }
+
+ tx.commit().unwrap();
+
+ // Full (ground truth): fresh calculator, single call
+ let provider = factory.database_provider_ro().unwrap();
+ let latest = LatestStateProvider::new(provider);
+ let full = StateRootCalculator::new(false)
+ .compute(&latest, fb2_cumulative_state.clone())
+ .unwrap();
+
+ // Incremental: calculator across both flashblocks
+ let mut calc = StateRootCalculator::new(true);
+
+ let provider = factory.database_provider_ro().unwrap();
+ let latest = LatestStateProvider::new(provider);
+ calc.compute(&latest, fb1_state).unwrap();
+
+ let provider = factory.database_provider_ro().unwrap();
+ let latest = LatestStateProvider::new(provider);
+ let incremental = calc.compute(&latest, fb2_cumulative_state).unwrap();
+
+ (full.state_root, incremental.state_root)
+ }
+
+ /// Single contract with 20 storage slots (populated trie with branch nodes).
+ ///
+ /// FB1 modifies slots[13] (in a hashed subtree under branch 0xb). FB2 reverts
+ /// it (absent from cumulative state) and modifies slots[0] (same parent branch,
+ /// different sub-nibble). Without cumulative prefix sets the walker would skip
+ /// the reverted subtree and use the stale cached hash → wrong root.
+ #[test]
+ fn test_storage_revert_to_original_with_populated_trie() {
+ let hashed_address = keccak256([0x70; 20]);
+ let slots: Vec<_> = (1u8..=20)
+ .map(|i| keccak256(B256::with_last_byte(i)))
+ .collect();
+
+ let account = Account {
+ nonce: 1,
+ balance: U256::from(1000),
+ bytecode_hash: Some(keccak256("contract")),
+ };
+
+ let initial_storage: Vec<_> = slots
+ .iter()
+ .enumerate()
+ .map(|(i, s)| (*s, U256::from((i + 1) as u64 * 100)))
+ .collect();
+ let initial_accounts = vec![(hashed_address, account, initial_storage)];
+
+ // FB1: Modify slots[13] (in the hashed subtree) from 1400→9999
+ let mut fb1_state = HashedPostState::default();
+ fb1_state.accounts.insert(hashed_address, Some(account));
+ fb1_state.storages.insert(
+ hashed_address,
+ HashedStorage::from_iter(false, [(slots[13], U256::from(9999))]),
+ );
+
+ // FB2: slots[13] reverted (absent). slots[0] modified (same parent branch 0xb).
+ let fb2_account = Account {
+ nonce: 2,
+ ..account
+ };
+ let mut fb2_cumulative = HashedPostState::default();
+ fb2_cumulative
+ .accounts
+ .insert(hashed_address, Some(fb2_account));
+ fb2_cumulative.storages.insert(
+ hashed_address,
+ HashedStorage::from_iter(false, [(slots[0], U256::from(777))]),
+ );
+
+ let (full, incremental) =
+ simulate_flashblocks(&initial_accounts, fb1_state, fb2_cumulative, true);
+ assert_eq!(
+ full, incremental,
+ "incremental state root diverges from ground truth. \
+ Full: {:?}, Incremental: {:?}.",
+ full, incremental
+ );
+ }
+
+ proptest! {
+ #![proptest_config(ProptestConfig::with_cases(2000))]
+
+ /// Fuzz test: generate random two-flashblock scenarios and verify
+ /// incremental state root matches full state root.
+ #[test]
+ fn fuzz_incremental_vs_full_state_root(
+ seed in 0u64..100_000,
+ num_accounts in 1usize..5,
+ num_initial_slots in 0usize..6,
+ num_fb1_changes in 1usize..4,
+ num_fb2_changes in 1usize..4,
+ ) {
+ let mut rng_state = seed;
+ let next = |s: &mut u64| -> u64 {
+ *s = s.wrapping_mul(6364136223846793005).wrapping_add(1442695040888963407);
+ *s >> 33
+ };
+
+ // Generate initial accounts
+ let mut initial_accounts = Vec::new();
+ let mut all_slots = Vec::new();
+ for i in 0..num_accounts {
+ let hashed_addr = keccak256(B256::with_last_byte(i as u8 + 1));
+ let account = Account {
+ nonce: next(&mut rng_state) % 100,
+ balance: U256::from(next(&mut rng_state) % 100_000),
+ bytecode_hash: if next(&mut rng_state) % 3 == 0 {
+ Some(keccak256(format!("code_{i}").as_bytes()))
+ } else {
+ None
+ },
+ };
+ let mut storage = Vec::new();
+ let mut slots = Vec::new();
+ for s in 0..num_initial_slots {
+ let slot = keccak256(B256::from(U256::from(i * 100 + s)));
+ slots.push(slot);
+ storage.push((slot, U256::from(next(&mut rng_state) % 10_000 + 1)));
+ }
+ initial_accounts.push((hashed_addr, account, storage));
+ all_slots.push(slots);
+ }
+
+ // Generate FB1 state changes
+ let mut fb1_state = HashedPostState::default();
+ for _ in 0..num_fb1_changes {
+ let acct_idx = (next(&mut rng_state) as usize) % num_accounts;
+ let (hashed_addr, account, _) = &initial_accounts[acct_idx];
+ let new_account = Account {
+ nonce: account.nonce + next(&mut rng_state) % 10 + 1,
+ ..*account
+ };
+ fb1_state.accounts.insert(*hashed_addr, Some(new_account));
+ if !all_slots[acct_idx].is_empty() {
+ let slot_idx = (next(&mut rng_state) as usize) % all_slots[acct_idx].len();
+ let slot = all_slots[acct_idx][slot_idx];
+ fb1_state.storages.insert(
+ *hashed_addr,
+ HashedStorage::from_iter(
+ false,
+ [(slot, U256::from(next(&mut rng_state) % 50_000 + 1))],
+ ),
+ );
+ }
+ }
+
+ // Generate FB2 cumulative state (superset of FB1 with additional changes)
+ let mut fb2_cumulative = fb1_state.clone();
+ for _ in 0..num_fb2_changes {
+ let acct_idx = (next(&mut rng_state) as usize) % num_accounts;
+ let (hashed_addr, account, _) = &initial_accounts[acct_idx];
+ let existing = fb2_cumulative
+ .accounts
+ .get(hashed_addr)
+ .copied()
+ .flatten()
+ .unwrap_or(*account);
+ let new_account = Account {
+ nonce: existing.nonce + next(&mut rng_state) % 5 + 1,
+ ..existing
+ };
+ fb2_cumulative
+ .accounts
+ .insert(*hashed_addr, Some(new_account));
+ if !all_slots[acct_idx].is_empty() {
+ let slot_idx = (next(&mut rng_state) as usize) % all_slots[acct_idx].len();
+ let slot = all_slots[acct_idx][slot_idx];
+ fb2_cumulative.storages.insert(
+ *hashed_addr,
+ HashedStorage::from_iter(
+ false,
+ [(slot, U256::from(next(&mut rng_state) % 50_000 + 1))],
+ ),
+ );
+ }
+ }
+
+ let (full, incremental) =
+ simulate_flashblocks(&initial_accounts, fb1_state, fb2_cumulative, false);
+ prop_assert_eq!(
+ full, incremental,
+ "Fuzz: incremental diverged from full (seed={})", seed
+ );
+ }
+ }
+}
diff --git a/crates/op-rbuilder/src/tests/flashblocks.rs b/crates/op-rbuilder/src/tests/flashblocks.rs
index 35d2781f..98491f61 100644
--- a/crates/op-rbuilder/src/tests/flashblocks.rs
+++ b/crates/op-rbuilder/src/tests/flashblocks.rs
@@ -621,3 +621,64 @@ async fn progressive_lag_reduces_flashblocks(rbuilder: LocalInstance) -> eyre::R
flashblocks_listener.stop().await
}
+
+/// Verify that incremental state root computation produces valid blocks.
+///
+/// The test framework calls `new_payload` on each built block, which validates the
+/// state root against the node's own EVM execution. If the incremental trie produces
+/// an incorrect state root, `new_payload` will return `Invalid` and the block build
+/// will fail.
+#[rb_test(args = OpRbuilderArgs {
+ chain_block_time: 1000,
+ flashblocks: FlashblocksArgs {
+ flashblocks_port: 1239,
+ flashblocks_addr: "127.0.0.1".into(),
+ flashblocks_block_time: 200,
+ flashblocks_enable_incremental_state_root: true,
+ ..Default::default()
+ },
+ ..Default::default()
+})]
+async fn test_incremental_state_root(rbuilder: LocalInstance) -> eyre::Result<()> {
+ use alloy_primitives::B256;
+
+ let driver = rbuilder.driver().await?;
+ let flashblocks_listener = rbuilder.spawn_flashblocks_listener();
+
+ // Build multiple blocks with transactions to exercise the incremental trie
+ // across flashblock boundaries within each block.
+ for _ in 0..3 {
+ for _ in 0..3 {
+ let _ = driver
+ .create_transaction()
+ .random_valid_transfer()
+ .send()
+ .await?;
+ }
+ let block = driver.build_new_block_with_current_timestamp(None).await?;
+
+ // Block was accepted by new_payload (state root validated by the node).
+ // Also verify the state root is actually computed (non-zero).
+ assert_ne!(
+ block.header.state_root,
+ B256::ZERO,
+ "State root should be computed with incremental trie enabled"
+ );
+ assert!(
+ block.transactions.len() >= 3,
+ "Block should contain user transactions"
+ );
+ }
+
+ let flashblocks = flashblocks_listener.get_flashblocks();
+ // 3 blocks × 6 flashblocks each: 1 base/fallback (index 0) + 5 incremental
+ // flashblocks (1000ms / 200ms).
+ assert_eq!(
+ 18,
+ flashblocks.len(),
+ "Expected 18 flashblocks across 3 blocks, got {}",
+ flashblocks.len()
+ );
+
+ flashblocks_listener.stop().await
+}
diff --git a/crates/op-rbuilder/src/tests/mod.rs b/crates/op-rbuilder/src/tests/mod.rs
index 53b0f30e..760f7a13 100644
--- a/crates/op-rbuilder/src/tests/mod.rs
+++ b/crates/op-rbuilder/src/tests/mod.rs
@@ -31,6 +31,7 @@ mod forks;
#[cfg(test)]
mod backrun;
+
// If the order of deployment from the signer changes the address will change
#[cfg(test)]
const FLASHBLOCKS_NUMBER_ADDRESS: alloy_primitives::Address =