From 75b8933e46a43450696c803bc1fe3b9ce1c52bc7 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Tue, 28 Apr 2026 11:04:32 +0530 Subject: [PATCH 01/13] feat: add realtime fork as new sibling crate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduces the realtime fork (`realtime/`) as a clean sibling to shasta/permissionless/pacaya, ported from the surge-real-time-poc and feat/l2-to-l1-to-l2-sync work without dragging in the shasta-side and pacaya-side modifications that those branches accumulated. What's in the new fork: - L1 + L2 execution layers with bridge-callback simulation - Async proposal submitter, batch builder, bridge handler (UserOp status tracking, mempool scan for return signals) - Realtime chain monitor for `RealTimeInbox::ProposedAndProved` - Raiko v3 client; deferred-finalization multicall builder - Self-contained `NodeConfig` (does not rely on pacaya::node::config, which was removed in #941 when pacaya became a utility crate) Wiring: - `Fork::Realtime` enum variant + `FromStr` impl + `FORK` env var override in common/src/fork_info, default-disabled timestamp (only activates when `FORK=realtime` is set) - `Realtime` match arm in `Node/src/main.rs` - Workspace `Cargo.toml`: adds realtime member + `sled` dep Common deltas (consumed by realtime): - `taiko_driver::reorg_stale_block` RPC + `ReorgStaleBlock{Request,Response}` - `BuildPreconfBlockResponse.state_root` (parsed leniently — defaults to B256::ZERO if missing, so existing shasta/permissionless JSON paths remain compatible) - `transaction_monitor::monitor_new_transaction` accepts optional tx-hash and tx-result oneshot notifiers; existing 2-arg callers are unaffected (only the new realtime async submitter passes Some) Untouched from origin/master: - `shasta/`, `pacaya/` — zero diff - `permissionless/` — single line: `state_root: B256::ZERO` placeholder in the BuildPreconfBlockResponse construction, mirroring mikhailUshakoff's PR #939 approach - `common/src/config/mod.rs` — the `TAIKO_BRIDGE_L2_ADDRESS` → `L2_BRIDGE_ADDRESS` rename flagged on PR #945 is intentionally deferred to a separate coordinated PR - `common/src/shared/internal_server.rs` and the warp→axum migration — preserved as-is - Dockerfile + x86-64 CI fixes — separate PR Co-Authored-By: Claude Opus 4.7 (1M context) --- Cargo.lock | 184 +++- Cargo.toml | 3 + common/src/fork_info/config.rs | 2 + common/src/fork_info/fork.rs | 15 + common/src/fork_info/mod.rs | 9 + common/src/l2/taiko_driver/mod.rs | 29 +- common/src/l2/taiko_driver/models.rs | 19 + common/src/l2/taiko_driver/operation_type.rs | 2 + common/src/shared/transaction_monitor.rs | 86 +- node/Cargo.toml | 1 + node/src/main.rs | 11 + permissionless/src/node/block_advancer.rs | 1 + realtime/Cargo.toml | 44 + realtime/src/chain_monitor/mod.rs | 12 + realtime/src/l1/abi/Multicall.json | 44 + realtime/src/l1/abi/RealTimeInbox.json | 975 ++++++++++++++++++ realtime/src/l1/bindings.rs | 110 ++ realtime/src/l1/config.rs | 40 + realtime/src/l1/execution_layer.rs | 555 ++++++++++ realtime/src/l1/mod.rs | 5 + realtime/src/l1/proposal_tx_builder.rs | 358 +++++++ realtime/src/l1/protocol_config.rs | 33 + realtime/src/l2/abi/Anchor.json | 1 + realtime/src/l2/bindings.rs | 10 + realtime/src/l2/execution_layer.rs | 556 ++++++++++ realtime/src/l2/mod.rs | 3 + realtime/src/l2/taiko.rs | 368 +++++++ realtime/src/lib.rs | 171 +++ realtime/src/node/config.rs | 8 + realtime/src/node/mod.rs | 525 ++++++++++ .../node/proposal_manager/async_submitter.rs | 499 +++++++++ .../node/proposal_manager/batch_builder.rs | 360 +++++++ .../node/proposal_manager/bridge_handler.rs | 519 ++++++++++ .../node/proposal_manager/l2_block_payload.rs | 12 + realtime/src/node/proposal_manager/mod.rs | 646 ++++++++++++ .../src/node/proposal_manager/proposal.rs | 117 +++ realtime/src/raiko/mod.rs | 184 ++++ realtime/src/shared_abi/Bridge.json | 738 +++++++++++++ realtime/src/shared_abi/SignalService.json | 1 + realtime/src/shared_abi/bindings.rs | 33 + realtime/src/shared_abi/mod.rs | 1 + realtime/src/utils/config.rs | 121 +++ realtime/src/utils/mod.rs | 1 + 43 files changed, 7352 insertions(+), 60 deletions(-) create mode 100644 realtime/Cargo.toml create mode 100644 realtime/src/chain_monitor/mod.rs create mode 100644 realtime/src/l1/abi/Multicall.json create mode 100644 realtime/src/l1/abi/RealTimeInbox.json create mode 100644 realtime/src/l1/bindings.rs create mode 100644 realtime/src/l1/config.rs create mode 100644 realtime/src/l1/execution_layer.rs create mode 100644 realtime/src/l1/mod.rs create mode 100644 realtime/src/l1/proposal_tx_builder.rs create mode 100644 realtime/src/l1/protocol_config.rs create mode 100644 realtime/src/l2/abi/Anchor.json create mode 100644 realtime/src/l2/bindings.rs create mode 100644 realtime/src/l2/execution_layer.rs create mode 100644 realtime/src/l2/mod.rs create mode 100644 realtime/src/l2/taiko.rs create mode 100644 realtime/src/lib.rs create mode 100644 realtime/src/node/config.rs create mode 100644 realtime/src/node/mod.rs create mode 100644 realtime/src/node/proposal_manager/async_submitter.rs create mode 100644 realtime/src/node/proposal_manager/batch_builder.rs create mode 100644 realtime/src/node/proposal_manager/bridge_handler.rs create mode 100644 realtime/src/node/proposal_manager/l2_block_payload.rs create mode 100644 realtime/src/node/proposal_manager/mod.rs create mode 100644 realtime/src/node/proposal_manager/proposal.rs create mode 100644 realtime/src/raiko/mod.rs create mode 100644 realtime/src/shared_abi/Bridge.json create mode 100644 realtime/src/shared_abi/SignalService.json create mode 100644 realtime/src/shared_abi/bindings.rs create mode 100644 realtime/src/shared_abi/mod.rs create mode 100644 realtime/src/utils/config.rs create mode 100644 realtime/src/utils/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 5760a348..7e9a4e4a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -623,7 +623,7 @@ dependencies = [ "futures", "futures-utils-wasm", "lru 0.16.3", - "parking_lot", + "parking_lot 0.12.5", "pin-project", "reqwest 0.12.28", "serde", @@ -647,7 +647,7 @@ dependencies = [ "auto_impl", "bimap", "futures", - "parking_lot", + "parking_lot 0.12.5", "serde", "serde_json", "tokio", @@ -956,7 +956,7 @@ dependencies = [ "derive_more", "futures", "futures-utils-wasm", - "parking_lot", + "parking_lot 0.12.5", "serde", "serde_json", "thiserror 2.0.18", @@ -2194,7 +2194,7 @@ dependencies = [ [[package]] name = "common" -version = "1.37.1" +version = "1.37.2" dependencies = [ "alloy", "alloy-json-rpc", @@ -2581,7 +2581,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core", + "parking_lot_core 0.9.12", "serde", ] @@ -2608,7 +2608,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de" dependencies = [ "data-encoding", - "syn 1.0.109", + "syn 2.0.117", ] [[package]] @@ -2830,7 +2830,7 @@ dependencies = [ "lru 0.12.5", "more-asserts", "multiaddr", - "parking_lot", + "parking_lot 0.12.5", "rand 0.8.5", "smallvec", "socket2 0.5.10", @@ -2863,7 +2863,7 @@ dependencies = [ "lru 0.12.5", "more-asserts", "multiaddr", - "parking_lot", + "parking_lot 0.12.5", "rand 0.8.5", "smallvec", "socket2 0.5.10", @@ -3345,6 +3345,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "fs_extra" version = "1.3.0" @@ -3426,7 +3436,7 @@ checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" dependencies = [ "futures-core", "lock_api", - "parking_lot", + "parking_lot 0.12.5", ] [[package]] @@ -3508,6 +3518,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -3766,7 +3785,7 @@ dependencies = [ "ipconfig", "moka", "once_cell", - "parking_lot", + "parking_lot 0.12.5", "rand 0.9.4", "resolv-conf", "serde", @@ -4268,6 +4287,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + [[package]] name = "ipconfig" version = "0.3.2" @@ -4406,7 +4434,7 @@ dependencies = [ "http-body", "http-body-util", "jsonrpsee-types", - "parking_lot", + "parking_lot 0.12.5", "pin-project", "rand 0.9.4", "rustc-hash", @@ -4828,7 +4856,7 @@ dependencies = [ "multiaddr", "multihash", "multistream-select", - "parking_lot", + "parking_lot 0.12.5", "pin-project", "quick-protobuf", "rand 0.8.5", @@ -4850,7 +4878,7 @@ dependencies = [ "hickory-resolver", "libp2p-core", "libp2p-identity", - "parking_lot", + "parking_lot 0.12.5", "smallvec", "tracing", ] @@ -5513,7 +5541,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "equivalent", - "parking_lot", + "parking_lot 0.12.5", "portable-atomic", "smallvec", "tagptr", @@ -5675,7 +5703,7 @@ dependencies = [ [[package]] name = "node" -version = "1.37.1" +version = "1.37.2" dependencies = [ "alloy", "alloy-json-rpc", @@ -5686,6 +5714,7 @@ dependencies = [ "common", "pacaya", "permissionless", + "realtime", "rustls", "serde_json", "shasta", @@ -6273,7 +6302,7 @@ dependencies = [ [[package]] name = "p2p-boot-node" -version = "1.37.1" +version = "1.37.2" dependencies = [ "anyhow", "discv5 0.10.2", @@ -6285,7 +6314,7 @@ dependencies = [ [[package]] name = "pacaya" -version = "1.37.1" +version = "1.37.2" dependencies = [ "alloy", "alloy-json-rpc", @@ -6350,6 +6379,17 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core 0.8.6", +] + [[package]] name = "parking_lot" version = "0.12.5" @@ -6357,7 +6397,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", - "parking_lot_core", + "parking_lot_core 0.9.12", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", ] [[package]] @@ -6406,7 +6460,7 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "permissionless" -version = "1.37.1" +version = "1.37.2" dependencies = [ "alethia-reth-consensus 0.6.0 (git+https://github.com/taikoxyz/alethia-reth.git?rev=637f7a150f72fe8d6cc5949a41aebb638a5305cf)", "alloy", @@ -6793,7 +6847,7 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot", + "parking_lot 0.12.5", "thiserror 2.0.18", ] @@ -6805,7 +6859,7 @@ checksum = "cf41c1a7c32ed72abe5082fb19505b969095c12da9f5732a4bc9878757fd087c" dependencies = [ "dtoa", "itoa", - "parking_lot", + "parking_lot 0.12.5", "prometheus-client-derive-encode", ] @@ -7169,6 +7223,50 @@ dependencies = [ "yasna", ] +[[package]] +name = "realtime" +version = "1.37.2" +dependencies = [ + "alethia-reth-consensus 0.6.0 (git+https://github.com/taikoxyz/alethia-reth.git?rev=637f7a150f72fe8d6cc5949a41aebb638a5305cf)", + "alloy", + "alloy-json-rpc", + "alloy-rlp", + "anyhow", + "async-trait", + "bindings", + "chrono", + "common", + "dotenvy", + "flate2", + "futures-util", + "hex", + "http", + "jsonrpsee", + "jsonwebtoken 10.3.0", + "mockito", + "pacaya", + "prometheus", + "protocol", + "reqwest 0.13.2", + "rpc", + "serde", + "serde_json", + "sled", + "tokio", + "tokio-util", + "tracing", + "tracing-subscriber 0.3.23", +] + +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_syscall" version = "0.5.18" @@ -7387,7 +7485,7 @@ dependencies = [ "alloy-primitives", "derive_more", "metrics 0.24.3", - "parking_lot", + "parking_lot 0.12.5", "pin-project", "rand 0.9.4", "reth-chainspec 1.11.3", @@ -7607,7 +7705,7 @@ dependencies = [ "discv5 0.10.2", "enr", "itertools 0.14.0", - "parking_lot", + "parking_lot 0.12.5", "rand 0.8.5", "reth-ethereum-forks 1.11.3", "reth-net-banlist 1.11.3", @@ -8020,7 +8118,7 @@ dependencies = [ "byteorder", "dashmap", "derive_more", - "parking_lot", + "parking_lot 0.12.5", "reth-mdbx-sys", "smallvec", "thiserror 2.0.18", @@ -8105,7 +8203,7 @@ dependencies = [ "futures", "itertools 0.14.0", "metrics 0.24.3", - "parking_lot", + "parking_lot 0.12.5", "pin-project", "rand 0.8.5", "rand 0.9.4", @@ -8492,7 +8590,7 @@ dependencies = [ "itertools 0.14.0", "metrics 0.24.3", "notify", - "parking_lot", + "parking_lot 0.12.5", "rayon", "reth-chain-state", "reth-chainspec 1.11.3", @@ -8785,7 +8883,7 @@ dependencies = [ "bitflags 2.11.0", "futures-util", "metrics 0.24.3", - "parking_lot", + "parking_lot 0.12.5", "pin-project", "rand 0.9.4", "reth-chain-state", @@ -8827,7 +8925,7 @@ dependencies = [ "auto_impl", "itertools 0.14.0", "metrics 0.24.3", - "parking_lot", + "parking_lot 0.12.5", "reth-execution-errors", "reth-metrics 1.11.3", "reth-primitives-traits 1.11.3", @@ -8870,7 +8968,7 @@ source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c4 dependencies = [ "alloy-primitives", "metrics 0.24.3", - "parking_lot", + "parking_lot 0.12.5", "reth-db-api", "reth-execution-errors", "reth-metrics 1.11.3", @@ -10161,7 +10259,7 @@ dependencies = [ [[package]] name = "shasta" -version = "1.37.1" +version = "1.37.2" dependencies = [ "alethia-reth-consensus 0.6.0 (git+https://github.com/taikoxyz/alethia-reth.git?rev=637f7a150f72fe8d6cc5949a41aebb638a5305cf)", "alloy", @@ -10262,6 +10360,22 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" +[[package]] +name = "sled" +version = "0.34.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" +dependencies = [ + "crc32fast", + "crossbeam-epoch", + "crossbeam-utils", + "fs2", + "fxhash", + "libc", + "log", + "parking_lot 0.11.2", +] + [[package]] name = "smallvec" version = "1.15.1" @@ -10870,7 +10984,7 @@ dependencies = [ "bytes", "libc", "mio", - "parking_lot", + "parking_lot 0.12.5", "pin-project-lite", "signal-hook-registry", "socket2 0.6.2", @@ -11441,7 +11555,7 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "urc" -version = "1.37.1" +version = "1.37.2" dependencies = [ "alloy", "alloy-json-rpc", @@ -11730,7 +11844,7 @@ checksum = "1c598d6b99ea013e35844697fc4670d08339d5cda15588f193c6beedd12f644b" dependencies = [ "futures", "js-sys", - "parking_lot", + "parking_lot 0.12.5", "pin-utils", "slab", "wasm-bindgen", @@ -12480,7 +12594,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot", + "parking_lot 0.12.5", "pin-project", "rand 0.8.5", "static_assertions", @@ -12495,7 +12609,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot", + "parking_lot 0.12.5", "pin-project", "rand 0.9.4", "static_assertions", diff --git a/Cargo.toml b/Cargo.toml index 0e362688..74bc8cbe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "node", "pacaya", "permissionless", + "realtime", "shasta", "tools/p2p_boot_node", "urc", @@ -85,12 +86,14 @@ permissionless = { path = "permissionless" } prometheus = { version = "0.14", default-features = false } rand = { version = "0.10", default-features = false } rand_core = { version = "0.9", default-features = false } +realtime = { path = "realtime" } reqwest = { version = "0.13", default-features = true, features = ["json"] } rustls = { version = "0.23", default-features = true } secp256k1 = { version = "0.30", features = ["recovery", "rand"] } serde = { version = "1.0", default-features = false, features = ["derive"] } serde_json = { version = "1.0", default-features = false } shasta = { path = "shasta" } +sled = { version = "0.34", default-features = false } ssz_rs = { version = "0.9.0" } strum = { version = "0.27", features = ["derive"] } diff --git a/common/src/fork_info/config.rs b/common/src/fork_info/config.rs index dd845c24..75dad7b4 100644 --- a/common/src/fork_info/config.rs +++ b/common/src/fork_info/config.rs @@ -15,6 +15,7 @@ impl Default for ForkInfoConfig { fork_switch_timestamps: vec![ Duration::from_secs(0), // Shasta Duration::from_secs(99999999999), // Permissionless + Duration::from_secs(99999999999), // Realtime ], fork_switch_transition_period: Duration::from_secs(15), } @@ -27,6 +28,7 @@ impl From<&Config> for ForkInfoConfig { .map(|f| match f { Fork::Shasta => Duration::from_secs(config.shasta_timestamp_sec), Fork::Permissionless => Duration::from_secs(config.permissionless_timestamp_sec), + Fork::Realtime => Duration::from_secs(99999999999), // Only activated via FORK=realtime }) .collect(); Self { diff --git a/common/src/fork_info/fork.rs b/common/src/fork_info/fork.rs index d1435d1a..4e07bd43 100644 --- a/common/src/fork_info/fork.rs +++ b/common/src/fork_info/fork.rs @@ -1,10 +1,25 @@ use std::fmt::{Display, Formatter, Result as FmtResult}; +use std::str::FromStr; use strum::{EnumIter, IntoEnumIterator}; #[derive(Clone, Debug, PartialEq, Eq, EnumIter)] pub enum Fork { Shasta, Permissionless, + Realtime, +} + +impl FromStr for Fork { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "shasta" => Ok(Fork::Shasta), + "permissionless" => Ok(Fork::Permissionless), + "realtime" => Ok(Fork::Realtime), + _ => Err(anyhow::anyhow!("Unknown fork: {}", s)), + } + } } impl Fork { diff --git a/common/src/fork_info/mod.rs b/common/src/fork_info/mod.rs index d6603846..a80729a7 100644 --- a/common/src/fork_info/mod.rs +++ b/common/src/fork_info/mod.rs @@ -3,8 +3,10 @@ pub mod fork; use anyhow::Error; use config::ForkInfoConfig; pub use fork::Fork; +use std::str::FromStr; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use strum::IntoEnumIterator; +use tracing::info; #[derive(Debug, Clone)] pub struct ForkInfo { @@ -23,6 +25,13 @@ impl Default for ForkInfo { impl ForkInfo { pub fn from_config(config: ForkInfoConfig) -> Result { + // FORK env var overrides timestamp-based fork detection + if let Ok(fork_override) = std::env::var("FORK") { + let fork = Fork::from_str(&fork_override)?; + info!("FORK env var set, overriding fork detection to: {}", fork); + return Ok(Self { fork, config }); + } + let current_timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?; let fork = Self::choose_current_fork(&config, current_timestamp.as_secs())?; Ok(Self { fork, config }) diff --git a/common/src/l2/taiko_driver/mod.rs b/common/src/l2/taiko_driver/mod.rs index 5111a72f..06b9ac05 100644 --- a/common/src/l2/taiko_driver/mod.rs +++ b/common/src/l2/taiko_driver/mod.rs @@ -6,7 +6,10 @@ mod status_provider_trait; use crate::{metrics::Metrics, utils::rpc_client::HttpRPCClient}; use anyhow::Error; pub use config::TaikoDriverConfig; -use models::{BuildPreconfBlockRequestBody, BuildPreconfBlockResponse, TaikoStatus}; +use models::{ + BuildPreconfBlockRequestBody, BuildPreconfBlockResponse, ReorgStaleBlockRequest, + ReorgStaleBlockResponse, TaikoStatus, +}; pub use operation_type::OperationType; use serde_json::Value; pub use status_provider_trait::StatusProvider; @@ -73,6 +76,30 @@ impl TaikoDriver { } } + pub async fn reorg_stale_block( + &self, + new_head_block_number: u64, + ) -> Result { + const API_ENDPOINT: &str = "reorgStaleBlock"; + + let request_body = ReorgStaleBlockRequest { + new_head_block_number, + }; + + let response = self + .call_driver( + &self.preconf_rpc, + http::Method::POST, + API_ENDPOINT, + &request_body, + OperationType::ReorgStaleBlock, + ) + .await?; + + let reorg_response: ReorgStaleBlockResponse = serde_json::from_value(response)?; + Ok(reorg_response) + } + async fn call_driver( &self, client: &HttpRPCClient, diff --git a/common/src/l2/taiko_driver/models.rs b/common/src/l2/taiko_driver/models.rs index 6c9c2597..60d72e40 100644 --- a/common/src/l2/taiko_driver/models.rs +++ b/common/src/l2/taiko_driver/models.rs @@ -14,6 +14,7 @@ pub struct BuildPreconfBlockRequestBody { pub struct BuildPreconfBlockResponse { pub number: u64, pub hash: B256, + pub state_root: B256, pub parent_hash: B256, pub is_forced_inclusion: bool, } @@ -29,6 +30,11 @@ impl BuildPreconfBlockResponse { ) .ok()?, hash: Self::to_b256(header.get("hash")?.as_str()?)?, + state_root: header + .get("stateRoot") + .and_then(|v| v.as_str()) + .and_then(Self::to_b256) + .unwrap_or(B256::ZERO), parent_hash: Self::to_b256(header.get("parentHash")?.as_str()?)?, is_forced_inclusion, }) @@ -67,6 +73,19 @@ pub struct TaikoStatus { pub end_of_sequencing_block_hash: B256, } +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ReorgStaleBlockRequest { + pub new_head_block_number: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ReorgStaleBlockResponse { + pub new_head_block_hash: B256, + pub blocks_removed: u64, +} + fn deserialize_end_of_sequencing_block_hash<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, diff --git a/common/src/l2/taiko_driver/operation_type.rs b/common/src/l2/taiko_driver/operation_type.rs index be2677af..83ce1bda 100644 --- a/common/src/l2/taiko_driver/operation_type.rs +++ b/common/src/l2/taiko_driver/operation_type.rs @@ -4,6 +4,7 @@ use std::fmt; pub enum OperationType { Preconfirm, Reanchor, + ReorgStaleBlock, Status, } @@ -12,6 +13,7 @@ impl fmt::Display for OperationType { let s = match self { OperationType::Preconfirm => "Preconfirm", OperationType::Reanchor => "Reanchor", + OperationType::ReorgStaleBlock => "ReorgStaleBlock", OperationType::Status => "Status", }; write!(f, "{s}") diff --git a/common/src/shared/transaction_monitor.rs b/common/src/shared/transaction_monitor.rs index fd3c7aae..d9a09e18 100644 --- a/common/src/shared/transaction_monitor.rs +++ b/common/src/shared/transaction_monitor.rs @@ -54,6 +54,9 @@ pub struct TransactionMonitorThread { metrics: Arc, chain_id: u64, sent_tx_hashes: Vec>, + tx_hash_notifier: Option>, + /// Notifies the caller whether the transaction was confirmed (true) or failed (false). + tx_result_notifier: Option>, } //#[derive(Debug)] @@ -103,6 +106,8 @@ impl TransactionMonitor { &self, tx: TransactionRequest, nonce: u64, + tx_hash_notifier: Option>, + tx_result_notifier: Option>, ) -> Result<(), Error> { let mut guard = self.join_handle.lock().await; if let Some(join_handle) = guard.as_ref() @@ -113,7 +118,7 @@ impl TransactionMonitor { )); } - let monitor_thread = TransactionMonitorThread::new( + let mut monitor_thread = TransactionMonitorThread::new( self.provider.clone(), self.config.clone(), nonce, @@ -121,6 +126,8 @@ impl TransactionMonitor { self.metrics.clone(), self.chain_id, ); + monitor_thread.tx_hash_notifier = tx_hash_notifier; + monitor_thread.tx_result_notifier = tx_result_notifier; let join_handle = monitor_thread.spawn_monitoring_task(tx); *guard = Some(join_handle); Ok(()) @@ -182,6 +189,8 @@ impl TransactionMonitorThread { metrics, chain_id, sent_tx_hashes: Vec::new(), + tx_hash_notifier: None, + tx_result_notifier: None, } } pub fn spawn_monitoring_task(mut self, tx: TransactionRequest) -> JoinHandle<()> { @@ -190,6 +199,12 @@ impl TransactionMonitorThread { }) } + fn notify_result(&mut self, success: bool) { + if let Some(notifier) = self.tx_result_notifier.take() { + let _ = notifier.send(success); + } + } + pub fn spawn_monitoring_task_with_builder( mut self, tx_builder: impl TransactionRequestBuilder, @@ -212,6 +227,7 @@ impl TransactionMonitorThread { if !matches!(tx.buildable_type(), Some(TxType::Eip1559 | TxType::Eip4844)) { self.send_error_signal(TransactionError::UnsupportedTransactionType) .await; + self.notify_result(false); return; } tx.set_chain_id(self.chain_id); @@ -265,11 +281,13 @@ impl TransactionMonitorThread { error!("Failed to get L1 block number: {}", e); self.send_error_signal(TransactionError::GetBlockNumberFailed) .await; + self.notify_result(false); return; } }; if sending_attempt > 0 && self.verify_tx_included(sending_attempt).await { + self.notify_result(true); return; } @@ -277,12 +295,18 @@ impl TransactionMonitorThread { if let Some(pending_tx) = self.send_transaction(tx_clone, sending_attempt).await { pending_tx } else { + self.notify_result(false); return; }; let tx_hash = *pending_tx.tx_hash(); self.sent_tx_hashes.push(tx_hash); + // Notify the first tx hash to the caller if requested + if let Some(notifier) = self.tx_hash_notifier.take() { + let _ = notifier.send(tx_hash); + } + if root_provider.is_none() { root_provider = Some(pending_tx.provider().clone()); } @@ -303,7 +327,7 @@ impl TransactionMonitorThread { max_fee_per_blob_gas ); - if self + if let Some(confirmed) = self .is_transaction_handled_by_builder( pending_tx.provider().clone(), tx_hash, @@ -312,6 +336,7 @@ impl TransactionMonitorThread { ) .await { + self.notify_result(confirmed); return; } @@ -326,14 +351,15 @@ impl TransactionMonitorThread { //Wait for transaction result let mut wait_attempt = 0; + let mut resolved = false; if let Some(root_provider) = root_provider { // We can use unwrap since tx_hashes is updated before root_provider let tx_hash = self .sent_tx_hashes .last() .expect("assert: tx_hashes is updated before root_provider"); - while wait_attempt < self.config.max_attempts_to_wait_tx - && !self + while wait_attempt < self.config.max_attempts_to_wait_tx { + if let Some(confirmed) = self .is_transaction_handled_by_builder( root_provider.clone(), *tx_hash, @@ -341,51 +367,63 @@ impl TransactionMonitorThread { self.config.max_attempts_to_send_tx, ) .await - && !self + { + self.notify_result(confirmed); + resolved = true; + break; + } + if self .verify_tx_included(wait_attempt + self.config.max_attempts_to_send_tx) .await - { + { + self.notify_result(true); + resolved = true; + break; + } warn!("🟣 Transaction watcher timed out without a result. Waiting..."); wait_attempt += 1; } } - if wait_attempt >= self.config.max_attempts_to_wait_tx { - error!( - "⛔ Transaction {} with nonce {} not confirmed", - if let Some(tx_hash) = self.sent_tx_hashes.last() { - tx_hash.to_string() - } else { - "unknown".to_string() - }, - self.nonce, - ); + if !resolved { + if wait_attempt >= self.config.max_attempts_to_wait_tx { + error!( + "⛔ Transaction {} with nonce {} not confirmed", + if let Some(tx_hash) = self.sent_tx_hashes.last() { + tx_hash.to_string() + } else { + "unknown".to_string() + }, + self.nonce, + ); - self.send_error_signal(TransactionError::NotConfirmed).await; + self.send_error_signal(TransactionError::NotConfirmed).await; + } + self.notify_result(false); } } - /// Returns true if transaction removed from mempool for any reason + /// Returns Some(true) if confirmed, Some(false) if failed, None if still pending. async fn is_transaction_handled_by_builder( &self, root_provider: RootProvider, tx_hash: B256, l1_block_at_send: u64, sending_attempt: u64, - ) -> bool { + ) -> Option { loop { let check_tx = PendingTransactionBuilder::new(root_provider.clone(), tx_hash); let tx_status = self.wait_for_tx_receipt(check_tx, sending_attempt).await; match tx_status { - TxStatus::Confirmed => return true, + TxStatus::Confirmed => return Some(true), TxStatus::Failed(err_str) => { if let Some(error) = tools::convert_error_payload(&err_str) { self.send_error_signal(error).await; - return true; + return Some(false); } self.send_error_signal(TransactionError::TransactionReverted) .await; - return true; + return Some(false); } TxStatus::Pending => {} // Continue with retry attempts } @@ -397,7 +435,7 @@ impl TransactionMonitorThread { error!("Failed to get L1 block number: {}", e); self.send_error_signal(TransactionError::GetBlockNumberFailed) .await; - return true; + return Some(false); } }; if current_l1_height != l1_block_at_send { @@ -409,7 +447,7 @@ impl TransactionMonitorThread { ); } - false + None } async fn send_transaction( diff --git a/node/Cargo.toml b/node/Cargo.toml index 5a43edd4..ac312c2b 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -20,6 +20,7 @@ clap = { workspace = true } common = { workspace = true } pacaya = { workspace = true } permissionless = { workspace = true } +realtime = { workspace = true } rustls = { workspace = true } serde_json = { workspace = true } shasta = { workspace = true } diff --git a/node/src/main.rs b/node/src/main.rs index a5aa128a..a33377b7 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -104,6 +104,17 @@ async fn run_node(iteration: u64, metrics: Arc) -> Result { + info!("Current fork: REALTIME ⚡"); + realtime::create_realtime_node( + config.clone(), + metrics.clone(), + cancel_token.clone(), + fork_info, + ) + .await?; + Vec::new() + } }; extra_routes.push(metrics_route(metrics.clone())); diff --git a/permissionless/src/node/block_advancer.rs b/permissionless/src/node/block_advancer.rs index a0aeec3a..fe2fe26a 100644 --- a/permissionless/src/node/block_advancer.rs +++ b/permissionless/src/node/block_advancer.rs @@ -79,6 +79,7 @@ impl BlockAdvancer for PermissionlessBlockAdvancer { Ok(BuildPreconfBlockResponse { number: l2_slot_context.info.parent_id() + 1, hash: B256::ZERO, // TODO: missing hash from the response, do we need it for permissionless? + state_root: B256::ZERO, parent_hash: *l2_slot_context.info.parent_hash(), is_forced_inclusion: l2_block_payload.is_forced_inclusion, }) diff --git a/realtime/Cargo.toml b/realtime/Cargo.toml new file mode 100644 index 00000000..d345d87c --- /dev/null +++ b/realtime/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "realtime" +version.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true +publish = false + +[dependencies] +alloy = { workspace = true } +alloy-json-rpc = { workspace = true } +alloy-rlp = { workspace = true } +anyhow = { workspace = true } +async-trait = { workspace = true } +chrono = { workspace = true } +common = { workspace = true } +dotenvy = { workspace = true } +flate2 = { workspace = true } +futures-util = { workspace = true } +hex = { workspace = true } +http = { workspace = true } +jsonrpsee = { workspace = true } +jsonwebtoken = { workspace = true } +pacaya = { workspace = true } +prometheus = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +sled = { workspace = true } +taiko_alethia_reth = { workspace = true } +taiko_bindings = { workspace = true } +taiko_protocol = { workspace = true } +taiko_rpc = { workspace = true } +tokio = { workspace = true } +tokio-util = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } + +[dev-dependencies] +mockito = { workspace = true } +tokio = { workspace = true, features = ["full", "test-util"] } + +[lints] +workspace = true diff --git a/realtime/src/chain_monitor/mod.rs b/realtime/src/chain_monitor/mod.rs new file mode 100644 index 00000000..2af8d789 --- /dev/null +++ b/realtime/src/chain_monitor/mod.rs @@ -0,0 +1,12 @@ +use crate::l1::bindings::RealTimeInbox; +use common::chain_monitor::ChainMonitor; +use tracing::info; + +pub type RealtimeChainMonitor = ChainMonitor; + +pub fn print_proposed_and_proved_info(event: &RealTimeInbox::ProposedAndProved) { + info!( + "ProposedAndProved event → proposalHash = {}, lastFinalizedBlockHash = {}, maxAnchorBlockNumber = {}", + event.proposalHash, event.lastFinalizedBlockHash, event.maxAnchorBlockNumber + ); +} diff --git a/realtime/src/l1/abi/Multicall.json b/realtime/src/l1/abi/Multicall.json new file mode 100644 index 00000000..53b423ce --- /dev/null +++ b/realtime/src/l1/abi/Multicall.json @@ -0,0 +1,44 @@ +{ + "abi": [ + { + "type": "receive", + "stateMutability": "payable" + }, + { + "type": "function", + "name": "multicall", + "inputs": [ + { + "name": "calls", + "type": "tuple[]", + "internalType": "struct Multicall.Call[]", + "components": [ + { + "name": "target", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "outputs": [ + { + "name": "results", + "type": "bytes[]", + "internalType": "bytes[]" + } + ], + "stateMutability": "payable" + } + ] +} \ No newline at end of file diff --git a/realtime/src/l1/abi/RealTimeInbox.json b/realtime/src/l1/abi/RealTimeInbox.json new file mode 100644 index 00000000..68ef732d --- /dev/null +++ b/realtime/src/l1/abi/RealTimeInbox.json @@ -0,0 +1,975 @@ +{ + "abi": [ + { + "type": "constructor", + "inputs": [ + { + "name": "_config", + "type": "tuple", + "internalType": "struct IRealTimeInbox.Config", + "components": [ + { + "name": "proofVerifier", + "type": "address", + "internalType": "address" + }, + { + "name": "signalService", + "type": "address", + "internalType": "address" + }, + { + "name": "basefeeSharingPctg", + "type": "uint8", + "internalType": "uint8" + } + ] + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "acceptOwnership", + "inputs": [], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "activate", + "inputs": [ + { + "name": "_genesisBlockHash", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "decodeProposeInput", + "inputs": [ + { + "name": "_data", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [ + { + "name": "input_", + "type": "tuple", + "internalType": "struct IRealTimeInbox.ProposeInput", + "components": [ + { + "name": "blobReference", + "type": "tuple", + "internalType": "struct LibBlobs.BlobReference", + "components": [ + { + "name": "blobStartIndex", + "type": "uint16", + "internalType": "uint16" + }, + { + "name": "numBlobs", + "type": "uint16", + "internalType": "uint16" + }, + { + "name": "offset", + "type": "uint24", + "internalType": "uint24" + } + ] + }, + { + "name": "signalSlots", + "type": "bytes32[]", + "internalType": "bytes32[]" + }, + { + "name": "maxAnchorBlockNumber", + "type": "uint48", + "internalType": "uint48" + } + ] + } + ], + "stateMutability": "pure" + }, + { + "type": "function", + "name": "encodeProposeInput", + "inputs": [ + { + "name": "_input", + "type": "tuple", + "internalType": "struct IRealTimeInbox.ProposeInput", + "components": [ + { + "name": "blobReference", + "type": "tuple", + "internalType": "struct LibBlobs.BlobReference", + "components": [ + { + "name": "blobStartIndex", + "type": "uint16", + "internalType": "uint16" + }, + { + "name": "numBlobs", + "type": "uint16", + "internalType": "uint16" + }, + { + "name": "offset", + "type": "uint24", + "internalType": "uint24" + } + ] + }, + { + "name": "signalSlots", + "type": "bytes32[]", + "internalType": "bytes32[]" + }, + { + "name": "maxAnchorBlockNumber", + "type": "uint48", + "internalType": "uint48" + } + ] + } + ], + "outputs": [ + { + "name": "encoded_", + "type": "bytes", + "internalType": "bytes" + } + ], + "stateMutability": "pure" + }, + { + "type": "function", + "name": "finalizePropose", + "inputs": [ + { + "name": "_requiredReturnSignals", + "type": "bytes32[]", + "internalType": "bytes32[]" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "getConfig", + "inputs": [], + "outputs": [ + { + "name": "config_", + "type": "tuple", + "internalType": "struct IRealTimeInbox.Config", + "components": [ + { + "name": "proofVerifier", + "type": "address", + "internalType": "address" + }, + { + "name": "signalService", + "type": "address", + "internalType": "address" + }, + { + "name": "basefeeSharingPctg", + "type": "uint8", + "internalType": "uint8" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getLastFinalizedBlockHash", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "hashCommitment", + "inputs": [ + { + "name": "_commitment", + "type": "tuple", + "internalType": "struct IRealTimeInbox.Commitment", + "components": [ + { + "name": "proposalHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "lastFinalizedBlockHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "checkpoint", + "type": "tuple", + "internalType": "struct ICheckpointStore.Checkpoint", + "components": [ + { + "name": "blockNumber", + "type": "uint48", + "internalType": "uint48" + }, + { + "name": "blockHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "stateRoot", + "type": "bytes32", + "internalType": "bytes32" + } + ] + } + ] + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "pure" + }, + { + "type": "function", + "name": "hashProposal", + "inputs": [ + { + "name": "_proposal", + "type": "tuple", + "internalType": "struct IRealTimeInbox.Proposal", + "components": [ + { + "name": "maxAnchorBlockNumber", + "type": "uint48", + "internalType": "uint48" + }, + { + "name": "maxAnchorBlockHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "basefeeSharingPctg", + "type": "uint8", + "internalType": "uint8" + }, + { + "name": "sources", + "type": "tuple[]", + "internalType": "struct IInbox.DerivationSource[]", + "components": [ + { + "name": "isForcedInclusion", + "type": "bool", + "internalType": "bool" + }, + { + "name": "blobSlice", + "type": "tuple", + "internalType": "struct LibBlobs.BlobSlice", + "components": [ + { + "name": "blobHashes", + "type": "bytes32[]", + "internalType": "bytes32[]" + }, + { + "name": "offset", + "type": "uint24", + "internalType": "uint24" + }, + { + "name": "timestamp", + "type": "uint48", + "internalType": "uint48" + } + ] + } + ] + }, + { + "name": "signalSlotsHash", + "type": "bytes32", + "internalType": "bytes32" + } + ] + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "pure" + }, + { + "type": "function", + "name": "hashSignalSlots", + "inputs": [ + { + "name": "_signalSlots", + "type": "bytes32[]", + "internalType": "bytes32[]" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "pure" + }, + { + "type": "function", + "name": "impl", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "inNonReentrant", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "init", + "inputs": [ + { + "name": "_owner", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "lastFinalizedBlockHash", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "owner", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "pause", + "inputs": [], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "paused", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "pendingOwner", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "propose", + "inputs": [ + { + "name": "_data", + "type": "bytes", + "internalType": "bytes" + }, + { + "name": "_checkpoint", + "type": "tuple", + "internalType": "struct ICheckpointStore.Checkpoint", + "components": [ + { + "name": "blockNumber", + "type": "uint48", + "internalType": "uint48" + }, + { + "name": "blockHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "stateRoot", + "type": "bytes32", + "internalType": "bytes32" + } + ] + }, + { + "name": "_proof", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "proxiableUUID", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "renounceOwnership", + "inputs": [], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "resolver", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "tentativePropose", + "inputs": [ + { + "name": "_data", + "type": "bytes", + "internalType": "bytes" + }, + { + "name": "_checkpoint", + "type": "tuple", + "internalType": "struct ICheckpointStore.Checkpoint", + "components": [ + { + "name": "blockNumber", + "type": "uint48", + "internalType": "uint48" + }, + { + "name": "blockHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "stateRoot", + "type": "bytes32", + "internalType": "bytes32" + } + ] + }, + { + "name": "_proof", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [ + { + "name": "proposalId_", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "transferOwnership", + "inputs": [ + { + "name": "newOwner", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "unpause", + "inputs": [], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "upgradeTo", + "inputs": [ + { + "name": "newImplementation", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "upgradeToAndCall", + "inputs": [ + { + "name": "newImplementation", + "type": "address", + "internalType": "address" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "payable" + }, + { + "type": "event", + "name": "Activated", + "inputs": [ + { + "name": "genesisBlockHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "AdminChanged", + "inputs": [ + { + "name": "previousAdmin", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "newAdmin", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "BeaconUpgraded", + "inputs": [ + { + "name": "beacon", + "type": "address", + "indexed": true, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "Initialized", + "inputs": [ + { + "name": "version", + "type": "uint8", + "indexed": false, + "internalType": "uint8" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "OwnershipTransferStarted", + "inputs": [ + { + "name": "previousOwner", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "newOwner", + "type": "address", + "indexed": true, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "OwnershipTransferred", + "inputs": [ + { + "name": "previousOwner", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "newOwner", + "type": "address", + "indexed": true, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "Paused", + "inputs": [ + { + "name": "account", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ProposedAndProved", + "inputs": [ + { + "name": "proposalHash", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + }, + { + "name": "lastFinalizedBlockHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "maxAnchorBlockNumber", + "type": "uint48", + "indexed": false, + "internalType": "uint48" + }, + { + "name": "basefeeSharingPctg", + "type": "uint8", + "indexed": false, + "internalType": "uint8" + }, + { + "name": "sources", + "type": "tuple[]", + "indexed": false, + "internalType": "struct IInbox.DerivationSource[]", + "components": [ + { + "name": "isForcedInclusion", + "type": "bool", + "internalType": "bool" + }, + { + "name": "blobSlice", + "type": "tuple", + "internalType": "struct LibBlobs.BlobSlice", + "components": [ + { + "name": "blobHashes", + "type": "bytes32[]", + "internalType": "bytes32[]" + }, + { + "name": "offset", + "type": "uint24", + "internalType": "uint24" + }, + { + "name": "timestamp", + "type": "uint48", + "internalType": "uint48" + } + ] + } + ] + }, + { + "name": "signalSlots", + "type": "bytes32[]", + "indexed": false, + "internalType": "bytes32[]" + }, + { + "name": "checkpoint", + "type": "tuple", + "indexed": false, + "internalType": "struct ICheckpointStore.Checkpoint", + "components": [ + { + "name": "blockNumber", + "type": "uint48", + "internalType": "uint48" + }, + { + "name": "blockHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "stateRoot", + "type": "bytes32", + "internalType": "bytes32" + } + ] + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "TentativeProposed", + "inputs": [ + { + "name": "proposalId", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + }, + { + "name": "requiredReturnSignalsHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "Unpaused", + "inputs": [ + { + "name": "account", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "Upgraded", + "inputs": [ + { + "name": "implementation", + "type": "address", + "indexed": true, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "error", + "name": "ACCESS_DENIED", + "inputs": [] + }, + { + "type": "error", + "name": "AlreadyActivated", + "inputs": [] + }, + { + "type": "error", + "name": "BlobNotFound", + "inputs": [] + }, + { + "type": "error", + "name": "FUNC_NOT_IMPLEMENTED", + "inputs": [] + }, + { + "type": "error", + "name": "INVALID_PAUSE_STATUS", + "inputs": [] + }, + { + "type": "error", + "name": "InvalidGenesisBlockHash", + "inputs": [] + }, + { + "type": "error", + "name": "MaxAnchorBlockTooOld", + "inputs": [] + }, + { + "type": "error", + "name": "NoBlobs", + "inputs": [] + }, + { + "type": "error", + "name": "NoPendingProposal", + "inputs": [] + }, + { + "type": "error", + "name": "NotActivated", + "inputs": [] + }, + { + "type": "error", + "name": "PendingProposalAlreadyExists", + "inputs": [] + }, + { + "type": "error", + "name": "REENTRANT_CALL", + "inputs": [] + }, + { + "type": "error", + "name": "RequiredSignalNotSent", + "inputs": [ + { + "name": "slot", + "type": "bytes32", + "internalType": "bytes32" + } + ] + }, + { + "type": "error", + "name": "RequiredSignalsMismatch", + "inputs": [] + }, + { + "type": "error", + "name": "SignalSlotNotSent", + "inputs": [ + { + "name": "slot", + "type": "bytes32", + "internalType": "bytes32" + } + ] + }, + { + "type": "error", + "name": "ZERO_ADDRESS", + "inputs": [] + }, + { + "type": "error", + "name": "ZERO_VALUE", + "inputs": [] + } + ] +} diff --git a/realtime/src/l1/bindings.rs b/realtime/src/l1/bindings.rs new file mode 100644 index 00000000..85d0be4c --- /dev/null +++ b/realtime/src/l1/bindings.rs @@ -0,0 +1,110 @@ +#![allow(clippy::too_many_arguments)] + +use alloy::sol; + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + #[derive(Debug, Default)] + RealTimeInbox, + "src/l1/abi/RealTimeInbox.json" +); + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + #[derive(Debug)] + Multicall, + "src/l1/abi/Multicall.json" +); + +// Define ProposeInput and BlobReference manually since the RealTimeInbox ABI +// only exposes propose(bytes _data, ...) where _data is abi.encode(ProposeInput). +// These types are internal to the contract but needed for encoding. +sol! { + struct BlobReference { + uint16 blobStartIndex; + uint16 numBlobs; + uint24 offset; + } + + struct ProposeInput { + BlobReference blobReference; + bytes32[] signalSlots; + uint48 maxAnchorBlockNumber; + } + + /// Input for `tentativePropose` — splits signals into existing (verified + /// immediately) and requiredReturn (verified at finalizePropose after the + /// L1 callback in the same multicall produces them). + struct ProposeInputV2 { + BlobReference blobReference; + bytes32[] existingSignals; + bytes32[] requiredReturnSignals; + uint48 maxAnchorBlockNumber; + } + + // SurgeVerifier SubProof encoding + struct SubProof { + uint8 proofBitFlag; + bytes data; + } +} + +/// Proof types supported by the SurgeVerifier. +/// Each variant maps to a bit flag used in `SubProof.proofBitFlag`. +/// Must match the constants in `SurgeVerifier.sol`. +/// +/// Note: MOCK_ECDSA (0b00000001) is not a variant here — it is selected +/// at runtime via the `MOCK_MODE` env flag, which overrides the bit flag +/// to 1 regardless of the proof type. +#[derive(Debug, Clone, Copy)] +pub enum ProofType { + Risc0, // 0b00000010 + Sp1, // 0b00000100 + Zisk, // 0b00001000 +} + +impl ProofType { + pub fn proof_bit_flag(&self) -> u8 { + match self { + ProofType::Risc0 => 1 << 1, + ProofType::Sp1 => 1 << 2, + ProofType::Zisk => 1 << 3, + } + } + + /// Returns the proof type string expected by Raiko. + pub fn raiko_proof_type(&self) -> &'static str { + match self { + ProofType::Risc0 => "risc0", + ProofType::Sp1 => "sp1", + ProofType::Zisk => "zisk", + } + } +} + +/// SurgeVerifier MOCK_ECDSA bit flag — used when `MOCK_MODE=true`. +pub const MOCK_ECDSA_BIT_FLAG: u8 = 1; + +impl std::str::FromStr for ProofType { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "risc0" => Ok(ProofType::Risc0), + "sp1" => Ok(ProofType::Sp1), + "zisk" => Ok(ProofType::Zisk), + _ => Err(anyhow::anyhow!( + "Invalid PROOF_TYPE '{}'. Must be one of: sp1, risc0, zisk", + s + )), + } + } +} + +impl std::fmt::Display for ProofType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.raiko_proof_type()) + } +} diff --git a/realtime/src/l1/config.rs b/realtime/src/l1/config.rs new file mode 100644 index 00000000..ad6db1d2 --- /dev/null +++ b/realtime/src/l1/config.rs @@ -0,0 +1,40 @@ +use crate::l1::bindings::ProofType; +use crate::raiko::RaikoClient; +use crate::utils::config::RealtimeConfig; +use alloy::primitives::Address; + +#[derive(Clone)] +pub struct ContractAddresses { + pub realtime_inbox: Address, + pub proposer_multicall: Address, + pub bridge: Address, + #[allow(dead_code)] + pub signal_service: Address, +} + +pub struct EthereumL1Config { + pub realtime_inbox: Address, + pub proposer_multicall: Address, + pub bridge: Address, + pub signal_service: Address, + pub proof_type: ProofType, + pub mock_mode: bool, + pub raiko_client: RaikoClient, +} + +impl TryFrom for EthereumL1Config { + type Error = anyhow::Error; + + fn try_from(config: RealtimeConfig) -> Result { + let raiko_client = RaikoClient::new(&config); + Ok(EthereumL1Config { + realtime_inbox: config.realtime_inbox, + proposer_multicall: config.proposer_multicall, + bridge: config.bridge, + signal_service: config.signal_service, + proof_type: config.proof_type, + mock_mode: config.mock_mode, + raiko_client, + }) + } +} diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs new file mode 100644 index 00000000..9f7628a7 --- /dev/null +++ b/realtime/src/l1/execution_layer.rs @@ -0,0 +1,555 @@ +use super::config::EthereumL1Config; +use super::proposal_tx_builder::ProposalTxBuilder; +use super::protocol_config::ProtocolConfig; +use crate::l1::bindings::RealTimeInbox::{self, RealTimeInboxInstance}; +use crate::node::proposal_manager::proposal::Proposal; +use crate::raiko::RaikoClient; +use crate::shared_abi::bindings::{ + Bridge, Bridge::MessageSent, IBridge::Message, SignalService::SignalSent, +}; +use crate::{l1::config::ContractAddresses, node::proposal_manager::bridge_handler::UserOp}; +use alloy::{ + eips::{BlockId, BlockNumberOrTag}, + primitives::{Address, B256, Bytes, FixedBytes}, + providers::{DynProvider, ext::DebugApi}, + rpc::types::{ + TransactionRequest, + trace::geth::{ + GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions, + GethDebugTracingOptions, + }, + }, + sol_types::SolEvent, +}; +use anyhow::{Error, anyhow}; +use common::{ + l1::{ + traits::{ELTrait, PreconferProvider}, + transaction_error::TransactionError, + }, + metrics::Metrics, + shared::{ + alloy_tools, execution_layer::ExecutionLayer as ExecutionLayerCommon, + transaction_monitor::TransactionMonitor, + }, +}; +use pacaya::l1::{operators_cache::OperatorError, traits::PreconfOperator}; +use std::sync::Arc; +use tokio::sync::mpsc::Sender; +use tracing::info; + +pub struct ExecutionLayer { + common: ExecutionLayerCommon, + provider: DynProvider, + preconfer_address: Address, + pub transaction_monitor: TransactionMonitor, + contract_addresses: ContractAddresses, + realtime_inbox: RealTimeInboxInstance, + #[allow(dead_code)] + raiko_client: RaikoClient, + proof_type: crate::l1::bindings::ProofType, + mock_mode: bool, + extra_gas_percentage: u64, +} + +impl ELTrait for ExecutionLayer { + type Config = EthereumL1Config; + async fn new( + common_config: common::l1::config::EthereumL1Config, + specific_config: Self::Config, + transaction_error_channel: Sender, + metrics: Arc, + ) -> Result { + let provider = alloy_tools::construct_alloy_provider( + &common_config.signer, + common_config + .execution_rpc_urls + .first() + .ok_or_else(|| anyhow!("L1 RPC URL is required"))?, + ) + .await?; + let common = + ExecutionLayerCommon::new(provider.clone(), common_config.signer.get_address()).await?; + + let transaction_monitor = TransactionMonitor::new( + provider.clone(), + &common_config, + transaction_error_channel, + metrics.clone(), + common.chain_id(), + ) + .await + .map_err(|e| Error::msg(format!("Failed to create TransactionMonitor: {e}")))?; + + let realtime_inbox = RealTimeInbox::new(specific_config.realtime_inbox, provider.clone()); + + let config = realtime_inbox + .getConfig() + .call() + .await + .map_err(|e| anyhow::anyhow!("Failed to call getConfig for RealTimeInbox: {e}"))?; + + tracing::info!( + "RealTimeInbox: {}, proofVerifier: {}, signalService: {}", + specific_config.realtime_inbox, + config.proofVerifier, + config.signalService, + ); + + let contract_addresses = ContractAddresses { + realtime_inbox: specific_config.realtime_inbox, + proposer_multicall: specific_config.proposer_multicall, + bridge: specific_config.bridge, + signal_service: specific_config.signal_service, + }; + + let proof_type = specific_config.proof_type; + let mock_mode = specific_config.mock_mode; + let raiko_client = specific_config.raiko_client; + let extra_gas_percentage = common_config.extra_gas_percentage; + + Ok(Self { + common, + provider, + preconfer_address: common_config.signer.get_address(), + transaction_monitor, + contract_addresses, + realtime_inbox, + raiko_client, + proof_type, + mock_mode, + extra_gas_percentage, + }) + } + + fn common(&self) -> &ExecutionLayerCommon { + &self.common + } +} + +impl PreconferProvider for ExecutionLayer { + async fn get_preconfer_wallet_eth(&self) -> Result { + self.common() + .get_account_balance(self.preconfer_address) + .await + } + + async fn get_preconfer_nonce_pending(&self) -> Result { + self.common() + .get_account_nonce(self.preconfer_address, BlockNumberOrTag::Pending) + .await + } + + async fn get_preconfer_nonce_latest(&self) -> Result { + self.common() + .get_account_nonce(self.preconfer_address, BlockNumberOrTag::Latest) + .await + } + + fn get_preconfer_address(&self) -> Address { + self.preconfer_address + } +} + +impl PreconfOperator for ExecutionLayer { + fn get_preconfer_address(&self) -> Address { + self.preconfer_address + } + + async fn get_operators_for_current_and_next_epoch( + &self, + _current_epoch_timestamp: u64, + _current_slot_timestamp: u64, + ) -> Result<(Address, Address), OperatorError> { + // RealTime: anyone can propose, but we still use operator tracking for slot management. + // Return self as both current and next operator. + Ok((self.preconfer_address, self.preconfer_address)) + } + + async fn is_preconf_router_specified_in_taiko_wrapper(&self) -> Result { + Ok(true) + } + + async fn get_l2_height_from_taiko_inbox(&self) -> Result { + Ok(0) + } + + async fn get_handover_window_slots(&self) -> Result { + Err(anyhow::anyhow!( + "Not implemented for RealTime execution layer" + )) + } +} + +impl ExecutionLayer { + #[allow(dead_code)] + pub fn get_raiko_client(&self) -> &RaikoClient { + &self.raiko_client + } + + /// Returns a clone of the configured contract addresses (L1 inbox, + /// bridge, signal service, proposer multicall). Useful for callers that + /// need to reference these during block building. + pub fn contract_addresses(&self) -> ContractAddresses { + self.contract_addresses.clone() + } + + pub async fn send_batch_to_l1( + &self, + batch: Proposal, + tx_hash_notifier: Option>, + tx_result_notifier: Option>, + ) -> Result<(), Error> { + info!( + "📦 Proposing with {} blocks | user_ops: {:?} | signal_slots: {:?} | l1_calls: {:?} | zk_proof: {}", + batch.l2_blocks.len(), + batch.user_ops, + batch.signal_slots, + batch.l1_calls, + batch.zk_proof.is_some(), + ); + + let builder = ProposalTxBuilder::new( + self.provider.clone(), + self.extra_gas_percentage, + self.proof_type, + self.mock_mode, + ); + + let tx = builder + .build_propose_tx( + batch, + self.preconfer_address, + self.contract_addresses.clone(), + ) + .await?; + + let pending_nonce = self.get_preconfer_nonce_pending().await?; + self.transaction_monitor + .monitor_new_transaction(tx, pending_nonce, tx_hash_notifier, tx_result_notifier) + .await + .map_err(|e| Error::msg(format!("Sending batch to L1 failed: {e}")))?; + + Ok(()) + } + + pub async fn is_transaction_in_progress(&self) -> Result { + self.transaction_monitor.is_transaction_in_progress().await + } + + pub async fn fetch_protocol_config(&self) -> Result { + let config = self + .realtime_inbox + .getConfig() + .call() + .await + .map_err(|e| anyhow::anyhow!("Failed to call getConfig for RealTimeInbox: {e}"))?; + + info!( + "RealTimeInbox config: basefeeSharingPctg: {}", + config.basefeeSharingPctg, + ); + + Ok(ProtocolConfig::from(&config)) + } + + pub async fn get_last_finalized_block_hash(&self) -> Result { + let result = self + .realtime_inbox + .getLastFinalizedBlockHash() + .call() + .await + .map_err(|e| anyhow::anyhow!("Failed to call getLastFinalizedBlockHash: {e}"))?; + + Ok(result) + } +} + +// Surge: L1 EL ops for Bridge Handler + +use alloy::rpc::types::trace::geth::{CallFrame, CallLogFrame}; + +fn collect_logs_recursive(frame: &CallFrame) -> Vec { + let mut logs = frame.logs.clone(); + + for subcall in &frame.calls { + logs.extend(collect_logs_recursive(subcall)); + } + + logs +} + +pub trait L1BridgeHandlerOps { + async fn find_message_and_signal_slot( + &self, + user_op: UserOp, + ) -> Result)>, anyhow::Error>; + + /// Simulate `Bridge.processMessage(msg, proof)` on L1 and inspect the trace + /// for any `MessageSent` event the invoked L1 callback emits. If it does, + /// the return message is an L1→L2 bridge message that the originating L2 + /// block expects to consume as a fast signal — the slot of that return + /// signal is what the inbox's `requiredReturnSignals` list must include. + /// + /// Returns `Some((return_message, return_signal_slot))` if a return is + /// produced, `None` otherwise. Returns an error only for RPC failures; a + /// callback that reverts during simulation yields `None` (no signal). + async fn simulate_l1_callback_return_signal( + &self, + message_from_l2: Message, + signal_slot_proof: Bytes, + bridge_address: Address, + l2_bridge_address: Address, + ) -> Result)>, anyhow::Error>; +} + +impl L1BridgeHandlerOps for ExecutionLayer { + async fn find_message_and_signal_slot( + &self, + user_op_data: UserOp, + ) -> Result)>, anyhow::Error> { + let tx_request = TransactionRequest::default() + .from(self.preconfer_address) + .to(user_op_data.submitter) + .input(user_op_data.calldata.into()); + + let mut tracer_config = serde_json::Map::new(); + tracer_config.insert("withLog".to_string(), serde_json::Value::Bool(true)); + tracer_config.insert("onlyTopCall".to_string(), serde_json::Value::Bool(false)); + + let tracing_options = GethDebugTracingOptions { + tracer: Some(GethDebugTracerType::BuiltInTracer( + GethDebugBuiltInTracerType::CallTracer, + )), + tracer_config: serde_json::Value::Object(tracer_config).into(), + ..Default::default() + }; + + let call_options = GethDebugTracingCallOptions { + tracing_options, + ..Default::default() + }; + + let trace_result = self + .provider + .debug_trace_call( + tx_request, + BlockId::Number(BlockNumberOrTag::Latest), + call_options, + ) + .await + .map_err(|e| anyhow!("Failed to simulate executeBatch on L1: {e}"))?; + + tracing::debug!("Received trace result: {:?}", trace_result); + + let mut message: Option = None; + let mut slot: Option> = None; + + if let alloy::rpc::types::trace::geth::GethTrace::CallTracer(call_frame) = trace_result { + let all_logs = collect_logs_recursive(&call_frame); + tracing::debug!("Collected {} logs from call trace", all_logs.len()); + + for log in all_logs { + if let Some(topics) = &log.topics + && !topics.is_empty() + { + if topics[0] == MessageSent::SIGNATURE_HASH { + let log_data = alloy::primitives::LogData::new_unchecked( + topics.clone(), + log.data.clone().unwrap_or_default(), + ); + let decoded = MessageSent::decode_log_data(&log_data) + .map_err(|e| anyhow!("Failed to decode MessageSent event L1: {e}"))?; + + message = Some(decoded.message); + } else if topics[0] == SignalSent::SIGNATURE_HASH { + let log_data = alloy::primitives::LogData::new_unchecked( + topics.clone(), + log.data.clone().unwrap_or_default(), + ); + let decoded = SignalSent::decode_log_data(&log_data) + .map_err(|e| anyhow!("Failed to decode SignalSent event L1: {e}"))?; + + slot = Some(decoded.slot); + } + } + } + } + + tracing::debug!("{:?} {:?}", message, slot); + + if let (Some(message), Some(slot)) = (message, slot) { + return Ok(Some((message, slot))); + } + + Ok(None) + } + + async fn simulate_l1_callback_return_signal( + &self, + message_from_l2: Message, + _signal_slot_proof: Bytes, + bridge_address: Address, + _l2_bridge_address: Address, + ) -> Result)>, anyhow::Error> { + use alloy::primitives::{B256, U256, keccak256}; + use alloy::rpc::types::state::{AccountOverride, StateOverride}; + + // Instead of simulating Bridge.processMessage (which requires L1 + // signal verification we can't bypass), we call the L1 callback's + // onMessageInvocation(data) directly with from=bridge. To make + // bridge.context() return the correct values, we state-override the + // bridge's __ctx storage (slots 253-254, see Bridge_Layout.sol): + // slot 253: msgHash (bytes32) + // slot 254: from (address, 20 bytes) | srcChainId (uint64, 8 bytes) + + let bridge = Bridge::new(bridge_address, self.provider.clone()); + let msg_hash: B256 = bridge + .hashMessage(message_from_l2.clone()) + .call() + .await + .map_err(|e| anyhow!("Failed to call Bridge.hashMessage for sim: {e}"))?; + + // Pack slot 254: address `from` (low 20 bytes) + uint64 srcChainId (next 8 bytes) + // Solidity packs struct members right-aligned in the same slot: + // from occupies bytes [0..20), srcChainId occupies bytes [20..28) + let mut slot_254 = [0u8; 32]; + slot_254[12..32].copy_from_slice(message_from_l2.from.as_slice()); + slot_254[4..12].copy_from_slice(&message_from_l2.srcChainId.to_be_bytes()); + let slot_254_value = B256::from(slot_254); + + // message_from_l2.data is already the full ABI-encoded calldata for + // onMessageInvocation(bytes) — exactly what Bridge.processMessage + // would pass to the target. Use it directly. + // Forward message.value as msg.value so payable callbacks receive ETH. + let callback_address = message_from_l2.to; + let tx_request = TransactionRequest::default() + .from(bridge_address) // msg.sender = bridge (passes ONLY_BRIDGE check) + .to(callback_address) + .value(message_from_l2.value) + .input(message_from_l2.data.clone().into()); + + // State-override the bridge's __ctx storage so context() returns + // the correct msgHash, from, and srcChainId. Also give the bridge + // enough ETH balance so the value transfer succeeds in the trace. + let bridge_balance = message_from_l2 + .value + .saturating_add(U256::from(10u64).pow(U256::from(18u64))); + let bridge_ctx_override = AccountOverride::default() + .with_balance(bridge_balance) + .with_state_diff([ + (B256::from(U256::from(253u64)), msg_hash), // __ctx.msgHash + (B256::from(U256::from(254u64)), slot_254_value), // __ctx.from + srcChainId + ]); + let mut state_overrides = StateOverride::default(); + state_overrides.insert(bridge_address, bridge_ctx_override); + + let tracer_config = serde_json::json!({"onlyTopCall": false}); + + let tracing_options = GethDebugTracingOptions { + tracer: Some(GethDebugTracerType::BuiltInTracer( + GethDebugBuiltInTracerType::CallTracer, + )), + tracer_config: tracer_config.into(), + ..Default::default() + }; + + let call_options = GethDebugTracingCallOptions { + tracing_options, + state_overrides: Some(state_overrides), + ..Default::default() + }; + + let trace_result = match self + .provider + .debug_trace_call( + tx_request, + BlockId::Number(BlockNumberOrTag::Latest), + call_options, + ) + .await + { + Ok(t) => t, + Err(e) => { + return Err(anyhow!("L1 callback simulation RPC failed: {e}")); + } + }; + + // Scan the trace for a sendMessage call to the L1 bridge. + let mut return_msg: Option = None; + + if let alloy::rpc::types::trace::geth::GethTrace::CallTracer(call_frame) = trace_result + && let Some((mut msg, caller)) = + find_send_message_in_call_tree(&call_frame, bridge_address) + { + // Patch bridge-assigned fields (from, srcChainId, id) + msg.from = caller; + msg.srcChainId = self.common.chain_id(); + // Query nextMessageId for the id the bridge would assign + let bridge_contract = Bridge::new(bridge_address, self.provider.clone()); + if let Ok(next_id) = bridge_contract.nextMessageId().call().await { + msg.id = next_id; + } + return_msg = Some(msg); + } + + if let Some(m) = return_msg { + // Compute the signal slot: keccak256("SIGNAL", L1_chain_id, L1_bridge, msgHash) + let return_msg_hash: B256 = + bridge.hashMessage(m.clone()).call().await.map_err(|e| { + anyhow!("Failed to call Bridge.hashMessage for return msg: {e}") + })?; + + let l1_chain_id = self.common.chain_id(); + let mut slot_preimage = Vec::with_capacity(6 + 8 + 20 + 32); + slot_preimage.extend_from_slice(b"SIGNAL"); + slot_preimage.extend_from_slice(&l1_chain_id.to_be_bytes()); + slot_preimage.extend_from_slice(bridge_address.as_slice()); + slot_preimage.extend_from_slice(return_msg_hash.as_slice()); + let signal_slot: FixedBytes<32> = keccak256(&slot_preimage); + + tracing::info!( + "L1 callback simulation found return signal: slot={}, destChainId={}", + signal_slot, + m.destChainId + ); + Ok(Some((m, signal_slot))) + } else { + tracing::debug!("L1 callback simulation found no sendMessage call in trace"); + Ok(None) + } + } +} + +/// `Bridge.sendMessage(Message)` selector. +const SEND_MESSAGE_SELECTOR: [u8; 4] = [0x1b, 0xdb, 0x00, 0x37]; + +/// Recursively search call frames for a CALL to `bridge_address` with the +/// `sendMessage` function selector. Returns the decoded `IBridge.Message` +/// and the caller address (msg.sender of the sendMessage call). +fn find_send_message_in_call_tree( + frame: &CallFrame, + bridge_address: Address, +) -> Option<(Message, Address)> { + use alloy::sol_types::SolCall; + + if let Some(to_addr) = frame.to + && to_addr == bridge_address + { + let input = frame.input.as_ref(); + if input.len() >= 4 + && input[0..4] == SEND_MESSAGE_SELECTOR + && let Ok(decoded) = Bridge::sendMessageCall::abi_decode_raw(&input[4..]) + { + return Some((decoded._message, frame.from)); + } + } + + for sub in &frame.calls { + if let Some(result) = find_send_message_in_call_tree(sub, bridge_address) { + return Some(result); + } + } + + None +} diff --git a/realtime/src/l1/mod.rs b/realtime/src/l1/mod.rs new file mode 100644 index 00000000..7bcc9c57 --- /dev/null +++ b/realtime/src/l1/mod.rs @@ -0,0 +1,5 @@ +pub mod bindings; +pub mod config; +pub mod execution_layer; +pub mod proposal_tx_builder; +pub mod protocol_config; diff --git a/realtime/src/l1/proposal_tx_builder.rs b/realtime/src/l1/proposal_tx_builder.rs new file mode 100644 index 00000000..dbb01815 --- /dev/null +++ b/realtime/src/l1/proposal_tx_builder.rs @@ -0,0 +1,358 @@ +use crate::l1::{ + bindings::{ + BlobReference, MOCK_ECDSA_BIT_FLAG, Multicall, ProofType, ProposeInput, ProposeInputV2, + RealTimeInbox, SubProof, + }, + config::ContractAddresses, +}; +use crate::node::proposal_manager::{ + bridge_handler::{L1Call, UserOp}, + proposal::Proposal, +}; +use crate::shared_abi::bindings::Bridge; +use alloy::{ + consensus::SidecarBuilder, + eips::eip7594::BlobTransactionSidecarEip7594, + network::TransactionBuilder7594, + primitives::{ + Address, Bytes, U256, + aliases::{U24, U48}, + }, + providers::DynProvider, + rpc::types::TransactionRequest, + sol_types::SolValue, +}; +use anyhow::Error; +use common::l1::fees_per_gas::FeesPerGas; +use taiko_protocol::shasta::{ + BlobCoder, + manifest::{BlockManifest, DerivationSourceManifest}, +}; +use tracing::{info, warn}; + +pub struct ProposalTxBuilder { + provider: DynProvider, + extra_gas_percentage: u64, + proof_type: ProofType, + mock_mode: bool, +} + +impl ProposalTxBuilder { + pub fn new( + provider: DynProvider, + extra_gas_percentage: u64, + proof_type: ProofType, + mock_mode: bool, + ) -> Self { + Self { + provider, + extra_gas_percentage, + proof_type, + mock_mode, + } + } + + /// Gas estimation is skipped for blob transactions because `eth_estimateGas` + /// cannot simulate blobs — the `BLOBHASH` opcode returns zero during estimation, + /// causing spurious reverts that mask the real outcome. Instead we use a fixed + /// gas limit and rely on the `TransactionMonitor`'s receipt check: if the on-chain + /// execution reverts, the monitor sends `TransactionError::TransactionReverted` + /// through the error channel, and the node's main loop triggers + /// `recover_from_failed_submission` (reorg back to last finalized head). + const BLOB_TX_GAS_LIMIT: u64 = 3_000_000; + + #[allow(clippy::too_many_arguments)] + pub async fn build_propose_tx( + &self, + batch: Proposal, + from: Address, + contract_addresses: ContractAddresses, + ) -> Result { + let tx_blob = self + .build_propose_blob(batch, from, contract_addresses) + .await?; + + let tx_blob_gas = + Self::BLOB_TX_GAS_LIMIT + Self::BLOB_TX_GAS_LIMIT * self.extra_gas_percentage / 100; + + let fees_per_gas = match FeesPerGas::get_fees_per_gas(&self.provider).await { + Ok(fees_per_gas) => fees_per_gas, + Err(e) => { + warn!("Build proposeBatch: Failed to get fees per gas: {}", e); + return Ok(tx_blob); + } + }; + + let tx_blob = fees_per_gas.update_eip4844(tx_blob, tx_blob_gas); + + Ok(tx_blob) + } + + #[allow(clippy::too_many_arguments)] + pub async fn build_propose_blob( + &self, + batch: Proposal, + from: Address, + contract_addresses: ContractAddresses, + ) -> Result { + // Collect required return signals from all l1_calls that expect an L1→L2 + // return signal to be produced by their invoked target. When non-empty, the + // multicall is structured as: + // [tentativePropose, user_ops..., l1_calls..., finalizePropose] + // so that processMessage runs against the tentative state root, its invoked + // L1 callback produces the required return signal via Bridge.sendMessage, + // and finalizePropose verifies those signals at the end. + let required_return_signals: Vec> = batch + .l1_calls + .iter() + .filter_map(|c| c.required_return_signal) + .collect(); + + let use_deferred = !required_return_signals.is_empty(); + + // Build the inbox call(s) + blob sidecar. Returns either a single + // `propose` call (classic flow) or a pair of (tentative, finalize) calls. + let (inbox_calls, blob_sidecar) = self + .build_inbox_calls( + &batch, + contract_addresses.realtime_inbox, + use_deferred, + &required_return_signals, + ) + .await?; + + // If no user ops and no L1 calls and no deferred flow, go direct. + if batch.user_ops.is_empty() && batch.l1_calls.is_empty() && inbox_calls.len() == 1 { + info!("Sending proposal directly to RealTimeInbox (no multicall)"); + let inbox_call = inbox_calls.into_iter().next().ok_or_else(|| { + anyhow::anyhow!("inbox_calls unexpectedly empty after len==1 check") + })?; + let tx = TransactionRequest::default() + .to(contract_addresses.realtime_inbox) + .from(from) + .input(inbox_call.data.into()) + .with_blob_sidecar(blob_sidecar); + return Ok(tx); + } + + let mut multicalls: Vec = vec![]; + + if use_deferred { + // Deferred flow: [user_ops..., tentativePropose, l1_calls..., finalizePropose] + // + // User ops must run before tentativePropose because L1 UserOps are what + // emit the existingSignals that tentativePropose verifies. Ordering them + // after would leave those signals unsent and tentativePropose would revert. + + // 1. User ops (emit existingSignals on L1) + for user_op in &batch.user_ops { + let user_op_call = self.build_user_op_call(user_op.clone()); + info!("Added user op to Multicall: {:?}", &user_op_call); + multicalls.push(user_op_call); + } + + // 2. tentativePropose (inbox_calls[0]) — verifies existingSignals now present + info!("Added tentativePropose to Multicall: {:?}", &inbox_calls[0]); + multicalls.push(inbox_calls[0].clone()); + + // 3. L1 calls (processMessage for L2→L1 signals — each triggers its + // target's L1 callback which produces an L1→L2 return signal) + for l1_call in &batch.l1_calls { + let l1_call_call = + self.build_l1_call_call(l1_call.clone(), contract_addresses.bridge); + info!("Added L1 call to Multicall: {:?}", &l1_call_call); + multicalls.push(l1_call_call); + } + + // 4. finalizePropose (inbox_calls[1]) — verifies requiredReturnSignals + info!("Added finalizePropose to Multicall: {:?}", &inbox_calls[1]); + multicalls.push(inbox_calls[1].clone()); + } else { + // Classic flow: [user_ops..., propose, l1_calls...] + for user_op in &batch.user_ops { + let user_op_call = self.build_user_op_call(user_op.clone()); + info!("Added user op to Multicall: {:?}", &user_op_call); + multicalls.push(user_op_call); + } + + info!("Added proposal to Multicall: {:?}", &inbox_calls[0]); + multicalls.push(inbox_calls[0].clone()); + + for l1_call in &batch.l1_calls { + let l1_call_call = + self.build_l1_call_call(l1_call.clone(), contract_addresses.bridge); + info!("Added L1 call to Multicall: {:?}", &l1_call_call); + multicalls.push(l1_call_call); + } + } + + let multicall = Multicall::new(contract_addresses.proposer_multicall, &self.provider); + let call = multicall.multicall(multicalls); + + let tx = TransactionRequest::default() + .to(contract_addresses.proposer_multicall) + .from(from) + .input(call.calldata().clone().into()) + .with_blob_sidecar(blob_sidecar); + + Ok(tx) + } + + fn build_user_op_call(&self, user_op_data: UserOp) -> Multicall::Call { + Multicall::Call { + target: user_op_data.submitter, + value: U256::ZERO, + data: user_op_data.calldata, + } + } + + /// Build the inbox call(s) + blob sidecar. + /// + /// When `use_deferred` is false, returns `[propose_call]` — the classic single + /// atomic propose path. + /// + /// When `use_deferred` is true, returns `[tentativePropose_call, finalizePropose_call]`. + /// `batch.signal_slots` is split into `existing_signals` (signals already on L1 + /// at proposal time, verified by tentativePropose) and `required_return_signals` + /// (signals produced later in the multicall by L1 callbacks, verified by + /// finalizePropose). The ZK proof commits to the union hash. + async fn build_inbox_calls( + &self, + batch: &Proposal, + inbox_address: Address, + use_deferred: bool, + required_return_signals: &[alloy::primitives::FixedBytes<32>], + ) -> Result<(Vec, BlobTransactionSidecarEip7594), anyhow::Error> { + let mut block_manifests = >::with_capacity(batch.l2_blocks.len()); + for l2_block in &batch.l2_blocks { + block_manifests.push(BlockManifest { + timestamp: l2_block.timestamp_sec, + coinbase: l2_block.coinbase, + anchor_block_number: l2_block.anchor_block_number, + gas_limit: l2_block.gas_limit_without_anchor, + transactions: l2_block + .prebuilt_tx_list + .tx_list + .iter() + .map(|tx| tx.clone().into()) + .collect(), + }); + } + + let manifest = DerivationSourceManifest { + blocks: block_manifests, + }; + + let manifest_data = manifest + .encode_and_compress() + .map_err(|e| Error::msg(format!("Can't encode and compress manifest: {e}")))?; + + let sidecar_builder: SidecarBuilder = SidecarBuilder::from_slice(&manifest_data); + let sidecar: BlobTransactionSidecarEip7594 = sidecar_builder.build_7594()?; + + let inbox = RealTimeInbox::new(inbox_address, self.provider.clone()); + + // Encode the raw proof as SubProof[] for the SurgeVerifier + let raw_proof = batch + .zk_proof + .as_ref() + .ok_or_else(|| anyhow::anyhow!("ZK proof not set on proposal"))? + .clone(); + + let bit_flag = if self.mock_mode { + MOCK_ECDSA_BIT_FLAG + } else { + self.proof_type.proof_bit_flag() + }; + let sub_proofs = vec![SubProof { + proofBitFlag: bit_flag, + data: Bytes::from(raw_proof), + }]; + let proof = Bytes::from(sub_proofs.abi_encode()); + + let blob_reference = BlobReference { + blobStartIndex: 0, + numBlobs: sidecar.blobs.len().try_into()?, + offset: U24::ZERO, + }; + + // Convert L1 Checkpoint type for the inbox call + let checkpoint = crate::l1::bindings::ICheckpointStore::Checkpoint { + blockNumber: batch.checkpoint.blockNumber, + blockHash: batch.checkpoint.blockHash, + stateRoot: batch.checkpoint.stateRoot, + }; + + if !use_deferred { + // Classic propose flow + let propose_input = ProposeInput { + blobReference: blob_reference, + signalSlots: batch.signal_slots.clone(), + maxAnchorBlockNumber: U48::from(batch.max_anchor_block_number), + }; + let encoded_input = Bytes::from(propose_input.abi_encode()); + let call = inbox.propose(encoded_input, checkpoint, proof); + + return Ok(( + vec![Multicall::Call { + target: inbox_address, + value: U256::ZERO, + data: call.calldata().clone(), + }], + sidecar, + )); + } + + // Deferred propose flow — split signal slots. + // `batch.signal_slots` should carry the UNION of existing and required-return + // slots (the anchor on L2 consumes the union as fast signals). We derive + // `existing_signals` by subtracting the required-return list from the union. + let required_set: std::collections::HashSet<_> = + required_return_signals.iter().copied().collect(); + let existing_signals: Vec> = batch + .signal_slots + .iter() + .copied() + .filter(|s| !required_set.contains(s)) + .collect(); + + let propose_input_v2 = ProposeInputV2 { + blobReference: blob_reference, + existingSignals: existing_signals, + requiredReturnSignals: required_return_signals.to_vec(), + maxAnchorBlockNumber: U48::from(batch.max_anchor_block_number), + }; + let encoded_input = Bytes::from(propose_input_v2.abi_encode()); + + let tentative_call = inbox.tentativePropose(encoded_input, checkpoint, proof); + let finalize_call = inbox.finalizePropose(required_return_signals.to_vec()); + + Ok(( + vec![ + Multicall::Call { + target: inbox_address, + value: U256::ZERO, + data: tentative_call.calldata().clone(), + }, + Multicall::Call { + target: inbox_address, + value: U256::ZERO, + data: finalize_call.calldata().clone(), + }, + ], + sidecar, + )) + } + + fn build_l1_call_call(&self, l1_call: L1Call, bridge_address: Address) -> Multicall::Call { + let bridge = Bridge::new(bridge_address, &self.provider); + let call = + bridge.processMessage(l1_call.message_from_l2.clone(), l1_call.signal_slot_proof); + + Multicall::Call { + target: bridge_address, + value: U256::ZERO, + data: call.calldata().clone(), + } + } +} diff --git a/realtime/src/l1/protocol_config.rs b/realtime/src/l1/protocol_config.rs new file mode 100644 index 00000000..7b96307f --- /dev/null +++ b/realtime/src/l1/protocol_config.rs @@ -0,0 +1,33 @@ +use crate::l1::bindings::IRealTimeInbox::Config; +use alloy::primitives::Address; + +#[derive(Clone, Default)] +pub struct ProtocolConfig { + pub basefee_sharing_pctg: u8, + #[allow(dead_code)] + pub proof_verifier: Address, + #[allow(dead_code)] + pub signal_service: Address, +} + +impl From<&Config> for ProtocolConfig { + fn from(config: &Config) -> Self { + Self { + basefee_sharing_pctg: config.basefeeSharingPctg, + proof_verifier: config.proofVerifier, + signal_service: config.signalService, + } + } +} + +impl ProtocolConfig { + pub fn get_basefee_sharing_pctg(&self) -> u8 { + self.basefee_sharing_pctg + } + + /// Use the EVM blockhash() 256-block limit as the max anchor height offset. + #[allow(dead_code)] + pub fn get_max_anchor_height_offset(&self) -> u64 { + 256 + } +} diff --git a/realtime/src/l2/abi/Anchor.json b/realtime/src/l2/abi/Anchor.json new file mode 100644 index 00000000..b1ecd4b7 --- /dev/null +++ b/realtime/src/l2/abi/Anchor.json @@ -0,0 +1 @@ +{"abi":[{"type":"constructor","inputs":[{"name":"_checkpointStore","type":"address","internalType":"contract ICheckpointStore"},{"name":"_l1ChainId","type":"uint64","internalType":"uint64"}],"stateMutability":"nonpayable"},{"type":"function","name":"ANCHOR_GAS_LIMIT","inputs":[],"outputs":[{"name":"","type":"uint64","internalType":"uint64"}],"stateMutability":"view"},{"type":"function","name":"GOLDEN_TOUCH_ADDRESS","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"acceptOwnership","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"anchorV4","inputs":[{"name":"_checkpoint","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"anchorV4WithSignalSlots","inputs":[{"name":"_checkpoint","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]},{"name":"_signalSlots","type":"bytes32[]","internalType":"bytes32[]"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"anchorV5","inputs":[{"name":"_proposalParams","type":"tuple","internalType":"struct Anchor.ProposalParams","components":[{"name":"submissionWindowEnd","type":"uint48","internalType":"uint48"}]},{"name":"_blockParams","type":"tuple","internalType":"struct Anchor.BlockParams","components":[{"name":"anchorBlockNumber","type":"uint48","internalType":"uint48"},{"name":"anchorBlockHash","type":"bytes32","internalType":"bytes32"},{"name":"anchorStateRoot","type":"bytes32","internalType":"bytes32"},{"name":"rawTxListHash","type":"bytes32","internalType":"bytes32"}]}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"blockHashes","inputs":[{"name":"blockNumber","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"blockHash","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"checkpointStore","inputs":[],"outputs":[{"name":"","type":"address","internalType":"contract ICheckpointStore"}],"stateMutability":"view"},{"type":"function","name":"getBlockState","inputs":[],"outputs":[{"name":"","type":"tuple","internalType":"struct Anchor.BlockState","components":[{"name":"anchorBlockNumber","type":"uint48","internalType":"uint48"},{"name":"ancestorsHash","type":"bytes32","internalType":"bytes32"}]}],"stateMutability":"view"},{"type":"function","name":"getPreconfMetadata","inputs":[{"name":"_blockNumber","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"tuple","internalType":"struct Anchor.PreconfMetadata","components":[{"name":"anchorBlockNumber","type":"uint48","internalType":"uint48"},{"name":"submissionWindowEnd","type":"uint48","internalType":"uint48"},{"name":"parentSubmissionWindowEnd","type":"uint48","internalType":"uint48"},{"name":"rawTxListHash","type":"bytes32","internalType":"bytes32"},{"name":"parentRawTxListHash","type":"bytes32","internalType":"bytes32"}]}],"stateMutability":"view"},{"type":"function","name":"impl","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"inNonReentrant","inputs":[],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"init","inputs":[{"name":"_owner","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"l1ChainId","inputs":[],"outputs":[{"name":"","type":"uint64","internalType":"uint64"}],"stateMutability":"view"},{"type":"function","name":"owner","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"pause","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"paused","inputs":[],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"pendingOwner","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"proxiableUUID","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"renounceOwnership","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"resolver","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"transferOwnership","inputs":[{"name":"newOwner","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"unpause","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"upgradeTo","inputs":[{"name":"newImplementation","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"upgradeToAndCall","inputs":[{"name":"newImplementation","type":"address","internalType":"address"},{"name":"data","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"withdraw","inputs":[{"name":"_token","type":"address","internalType":"address"},{"name":"_to","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"event","name":"AdminChanged","inputs":[{"name":"previousAdmin","type":"address","indexed":false,"internalType":"address"},{"name":"newAdmin","type":"address","indexed":false,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Anchored","inputs":[{"name":"prevAnchorBlockNumber","type":"uint48","indexed":false,"internalType":"uint48"},{"name":"anchorBlockNumber","type":"uint48","indexed":false,"internalType":"uint48"},{"name":"ancestorsHash","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"BeaconUpgraded","inputs":[{"name":"beacon","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Initialized","inputs":[{"name":"version","type":"uint8","indexed":false,"internalType":"uint8"}],"anonymous":false},{"type":"event","name":"OwnershipTransferStarted","inputs":[{"name":"previousOwner","type":"address","indexed":true,"internalType":"address"},{"name":"newOwner","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"OwnershipTransferred","inputs":[{"name":"previousOwner","type":"address","indexed":true,"internalType":"address"},{"name":"newOwner","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Paused","inputs":[{"name":"account","type":"address","indexed":false,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Unpaused","inputs":[{"name":"account","type":"address","indexed":false,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Upgraded","inputs":[{"name":"implementation","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Withdrawn","inputs":[{"name":"token","type":"address","indexed":false,"internalType":"address"},{"name":"to","type":"address","indexed":false,"internalType":"address"},{"name":"amount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"error","name":"ACCESS_DENIED","inputs":[]},{"type":"error","name":"AncestorsHashMismatch","inputs":[]},{"type":"error","name":"ETH_TRANSFER_FAILED","inputs":[]},{"type":"error","name":"FUNC_NOT_IMPLEMENTED","inputs":[]},{"type":"error","name":"INVALID_PAUSE_STATUS","inputs":[]},{"type":"error","name":"InvalidAddress","inputs":[]},{"type":"error","name":"InvalidBlockNumber","inputs":[]},{"type":"error","name":"InvalidL1ChainId","inputs":[]},{"type":"error","name":"InvalidL2ChainId","inputs":[]},{"type":"error","name":"InvalidSender","inputs":[]},{"type":"error","name":"REENTRANT_CALL","inputs":[]},{"type":"error","name":"ZERO_ADDRESS","inputs":[]},{"type":"error","name":"ZERO_VALUE","inputs":[]}]} \ No newline at end of file diff --git a/realtime/src/l2/bindings.rs b/realtime/src/l2/bindings.rs new file mode 100644 index 00000000..690f8934 --- /dev/null +++ b/realtime/src/l2/bindings.rs @@ -0,0 +1,10 @@ +#![allow(clippy::too_many_arguments)] + +use alloy::sol; + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + Anchor, + "src/l2/abi/Anchor.json" +); diff --git a/realtime/src/l2/execution_layer.rs b/realtime/src/l2/execution_layer.rs new file mode 100644 index 00000000..1f25f24d --- /dev/null +++ b/realtime/src/l2/execution_layer.rs @@ -0,0 +1,556 @@ +use crate::l2::bindings::{Anchor, ICheckpointStore::Checkpoint}; +use crate::shared_abi::bindings::{ + Bridge::{self, MessageSent}, + HopProof, + IBridge::Message, + SignalService::SignalSent, +}; +use alloy::{ + consensus::{ + SignableTransaction, Transaction as AnchorTransaction, TxEnvelope, transaction::Recovered, + }, + eips::{BlockId, BlockNumberOrTag}, + primitives::{Address, B256, Bytes, FixedBytes}, + providers::{DynProvider, Provider, ext::DebugApi}, + rpc::types::{ + Transaction, TransactionRequest, + trace::geth::{ + CallFrame, GethDebugBuiltInTracerType, GethDebugTracerType, + GethDebugTracingCallOptions, GethDebugTracingOptions, + }, + }, + signers::{Signature, Signer as AlloySigner}, + sol_types::SolEvent, +}; +use anyhow::Error; +use common::shared::{ + alloy_tools, execution_layer::ExecutionLayer as ExecutionLayerCommon, + l2_slot_info_v2::L2SlotInfoV2, +}; +use common::{ + crypto::{GOLDEN_TOUCH_ADDRESS, GOLDEN_TOUCH_PRIVATE_KEY}, + signer::Signer, +}; +use pacaya::l2::config::TaikoConfig; +use std::sync::Arc; +use tracing::{debug, info, warn}; + +pub struct L2ExecutionLayer { + common: ExecutionLayerCommon, + pub provider: DynProvider, + anchor: Anchor::AnchorInstance, + pub bridge: Bridge::BridgeInstance, + pub signal_service: Address, + pub chain_id: u64, + #[allow(dead_code)] + pub config: TaikoConfig, + l2_call_signer: Arc, +} + +impl L2ExecutionLayer { + pub async fn new( + taiko_config: TaikoConfig, + bridge_address: Address, + signal_service: Address, + ) -> Result { + let provider = + alloy_tools::create_alloy_provider_without_wallet(&taiko_config.taiko_geth_url).await?; + + let chain_id = provider + .get_chain_id() + .await + .map_err(|e| anyhow::anyhow!("Failed to get chain ID: {}", e))?; + info!("L2 Chain ID: {}", chain_id); + + let anchor = Anchor::new(taiko_config.taiko_anchor_address, provider.clone()); + let bridge = Bridge::new(bridge_address, provider.clone()); + + let common = + ExecutionLayerCommon::new(provider.clone(), taiko_config.signer.get_address()).await?; + let l2_call_signer = taiko_config.signer.clone(); + + Ok(Self { + common, + provider, + anchor, + bridge, + signal_service, + chain_id, + l2_call_signer, + config: taiko_config, + }) + } + + pub fn common(&self) -> &ExecutionLayerCommon { + &self.common + } + + pub async fn construct_anchor_tx( + &self, + l2_slot_info: &L2SlotInfoV2, + anchor_block_params: (Checkpoint, Vec>), + ) -> Result { + debug!( + "Constructing anchor transaction for block number: {}", + l2_slot_info.parent_id() + 1 + ); + let nonce = self + .provider + .get_transaction_count(GOLDEN_TOUCH_ADDRESS) + .block_id((*l2_slot_info.parent_hash()).into()) + .await + .map_err(|e| { + self.common + .chain_error("Failed to get transaction count", Some(&e.to_string())) + })?; + + let call_builder = self + .anchor + .anchorV4WithSignalSlots(anchor_block_params.0, anchor_block_params.1) + .gas(1_000_000) + .max_fee_per_gas(u128::from(l2_slot_info.base_fee())) + .max_priority_fee_per_gas(0) + .nonce(nonce) + .chain_id(self.chain_id); + + let typed_tx = call_builder + .into_transaction_request() + .build_typed_tx() + .map_err(|_| anyhow::anyhow!("AnchorTX: Failed to build typed transaction"))?; + + let tx_eip1559 = typed_tx + .eip1559() + .ok_or_else(|| anyhow::anyhow!("AnchorTX: Failed to extract EIP-1559 transaction"))?; + + let signature = self.sign_hash_deterministic(tx_eip1559.signature_hash())?; + let sig_tx = tx_eip1559.clone().into_signed(signature); + + let tx_envelope = TxEnvelope::from(sig_tx); + + debug!("AnchorTX transaction hash: {}", tx_envelope.tx_hash()); + + let tx = Transaction { + inner: Recovered::new_unchecked(tx_envelope, GOLDEN_TOUCH_ADDRESS), + block_hash: None, + block_number: None, + transaction_index: None, + effective_gas_price: None, + }; + Ok(tx) + } + + fn sign_hash_deterministic(&self, hash: B256) -> Result { + common::crypto::fixed_k_signer::sign_hash_deterministic(GOLDEN_TOUCH_PRIVATE_KEY, hash) + } + + pub async fn transfer_eth_from_l2_to_l1( + &self, + _amount: u128, + _dest_chain_id: u64, + _preconfer_address: Address, + _bridge_relayer_fee: u64, + ) -> Result<(), Error> { + warn!("Implement bridge transfer logic here"); + Ok(()) + } + + pub async fn get_last_synced_anchor_block_id_from_geth(&self) -> Result { + self.get_latest_anchor_transaction_input() + .await + .map_err(|e| anyhow::anyhow!("get_last_synced_anchor_block_id_from_geth: {e}")) + .and_then(|input| Self::decode_anchor_id_from_tx_data(&input)) + } + + async fn get_latest_anchor_transaction_input(&self) -> Result, Error> { + let block = self.common.get_latest_block_with_txs().await?; + let anchor_tx = match block.transactions.as_transactions() { + Some(txs) => txs.first().ok_or_else(|| { + anyhow::anyhow!( + "get_latest_anchor_transaction_input: Cannot get anchor transaction from block {}", + block.number() + ) + })?, + None => { + return Err(anyhow::anyhow!( + "No transactions in L2 block {}", + block.number() + )); + } + }; + + Ok(anchor_tx.input().to_vec()) + } + + pub fn decode_anchor_id_from_tx_data(data: &[u8]) -> Result { + let tx_data = + ::abi_decode_validate( + data, + ) + .map_err(|e| anyhow::anyhow!("Failed to decode anchor id from tx data: {}", e))?; + Ok(tx_data._checkpoint.blockNumber.to::()) + } + + pub fn get_anchor_tx_data(data: &[u8]) -> Result { + let tx_data = + ::abi_decode_validate( + data, + ) + .map_err(|e| anyhow::anyhow!("Failed to decode anchor tx data: {}", e))?; + Ok(tx_data) + } + + #[allow(dead_code)] + pub async fn get_head_l1_origin(&self) -> Result { + let response = self + .provider + .raw_request::<_, serde_json::Value>( + std::borrow::Cow::Borrowed("taiko_headL1Origin"), + (), + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to fetch taiko_headL1Origin: {}", e))?; + + let hex_str = response + .get("blockID") + .or_else(|| response.get("blockId")) + .and_then(serde_json::Value::as_str) + .ok_or_else(|| { + anyhow::anyhow!("Missing or invalid block id in taiko_headL1Origin response") + })?; + + u64::from_str_radix(hex_str.trim_start_matches("0x"), 16) + .map_err(|e| anyhow::anyhow!("Failed to parse 'blockID' as u64: {}", e)) + } + + #[allow(dead_code)] + pub async fn get_last_synced_block_params_from_geth(&self) -> Result { + self.get_latest_anchor_transaction_input() + .await + .map_err(|e| anyhow::anyhow!("get_last_synced_block_params_from_geth: {e}")) + .and_then(|input| Self::decode_block_params_from_tx_data(&input)) + } + + #[allow(dead_code)] + pub fn decode_block_params_from_tx_data(data: &[u8]) -> Result { + let tx_data = + ::abi_decode_validate( + data, + ) + .map_err(|e| anyhow::anyhow!("Failed to decode block params from tx data: {}", e))?; + Ok(tx_data._checkpoint) + } +} + +// Surge: L2 EL ops for Bridge Handler + +pub trait L2BridgeHandlerOps { + async fn construct_l2_call_tx(&self, message: Message) -> Result; + async fn find_message_and_signal_slot( + &self, + block_id: u64, + ) -> Result)>, anyhow::Error>; + async fn get_hop_proof( + &self, + slot: FixedBytes<32>, + block_id: u64, + state_root: B256, + ) -> Result; +} + +impl L2BridgeHandlerOps for L2ExecutionLayer { + async fn construct_l2_call_tx(&self, message: Message) -> Result { + use alloy::signers::local::PrivateKeySigner; + use std::str::FromStr; + + debug!("Constructing bridge call transaction for L2 call"); + + let signer_address = self.l2_call_signer.get_address(); + + let nonce = self + .provider + .get_transaction_count(signer_address) + .await + .map_err(|e| anyhow::anyhow!("Failed to get nonce for bridge call: {}", e))?; + + let call_builder = self + .bridge + .processMessage(message, Bytes::new()) + .gas(3_000_000) + .max_fee_per_gas(1_000_000_000) + .max_priority_fee_per_gas(0) + .nonce(nonce) + .chain_id(self.chain_id); + + let typed_tx = call_builder + .into_transaction_request() + .build_typed_tx() + .map_err(|_| anyhow::anyhow!("L2 Call Tx: Failed to build typed transaction"))?; + + let tx_eip1559 = typed_tx + .eip1559() + .ok_or_else(|| anyhow::anyhow!("L2 Call Tx: Failed to extract EIP-1559 transaction"))? + .clone(); + + let signature = match self.l2_call_signer.as_ref() { + Signer::Web3signer(web3signer, address) => { + let signature_bytes = web3signer.sign_transaction(&tx_eip1559, *address).await?; + Signature::try_from(signature_bytes.as_slice()) + .map_err(|e| anyhow::anyhow!("Failed to parse signature: {}", e))? + } + Signer::PrivateKey(private_key, _) => { + let signer = PrivateKeySigner::from_str(private_key.as_str())?; + AlloySigner::sign_hash(&signer, &tx_eip1559.signature_hash()).await? + } + }; + + let sig_tx = tx_eip1559.into_signed(signature); + let tx_envelope = TxEnvelope::from(sig_tx); + + debug!("L2 Call transaction hash: {}", tx_envelope.tx_hash()); + + let tx = Transaction { + inner: Recovered::new_unchecked(tx_envelope, signer_address), + block_hash: None, + block_number: None, + transaction_index: None, + effective_gas_price: None, + }; + Ok(tx) + } + + async fn find_message_and_signal_slot( + &self, + block_id: u64, + ) -> Result)>, anyhow::Error> { + use alloy::rpc::types::Filter; + + let bridge_address = *self.bridge.address(); + let signal_service_address = self.signal_service; + + let filter = Filter::new().from_block(block_id).to_block(block_id); + + let bridge_filter = filter + .clone() + .address(bridge_address) + .event_signature(MessageSent::SIGNATURE_HASH); + + let bridge_logs = self + .provider + .get_logs(&bridge_filter) + .await + .map_err(|e| anyhow::anyhow!("Failed to get MessageSent logs from bridge: {e}"))?; + + let signal_filter = filter + .address(signal_service_address) + .event_signature(SignalSent::SIGNATURE_HASH); + + let signal_logs = self.provider.get_logs(&signal_filter).await.map_err(|e| { + anyhow::anyhow!("Failed to get SignalSent logs from signal service: {e}") + })?; + + if bridge_logs.is_empty() || signal_logs.is_empty() { + return Ok(None); + } + + let message = { + let log = bridge_logs + .first() + .ok_or_else(|| anyhow::anyhow!("No bridge logs"))?; + let log_data = alloy::primitives::LogData::new_unchecked( + log.topics().to_vec(), + log.data().data.clone(), + ); + MessageSent::decode_log_data(&log_data) + .map_err(|e| anyhow::anyhow!("Failed to decode MessageSent event: {e}"))? + .message + }; + + let slot = { + let log = signal_logs + .first() + .ok_or_else(|| anyhow::anyhow!("No signal logs"))?; + let log_data = alloy::primitives::LogData::new_unchecked( + log.topics().to_vec(), + log.data().data.clone(), + ); + SignalSent::decode_log_data(&log_data) + .map_err(|e| anyhow::anyhow!("Failed to decode SignalSent event: {e}"))? + .slot + }; + + Ok(Some((message, slot))) + } + + async fn get_hop_proof( + &self, + slot: FixedBytes<32>, + block_id: u64, + state_root: B256, + ) -> Result { + use alloy::sol_types::SolValue; + + let proof = self + .provider + .get_proof(self.signal_service, vec![slot]) + .block_id(block_id.into()) + .await + .map_err(|e| anyhow::anyhow!("eth_getProof failed for signal slot: {e}"))?; + + let storage_proof = proof + .storage_proof + .first() + .ok_or_else(|| anyhow::anyhow!("No storage proof returned for signal slot"))?; + + let hop_proof = HopProof { + chainId: self.chain_id, + blockId: block_id, + rootHash: state_root, + cacheOption: 0, + accountProof: proof.account_proof.clone(), + storageProof: storage_proof.proof.clone(), + }; + + info!( + "Built HopProof: chainId={}, blockId={}, rootHash={}, accountProof_len={}, storageProof_len={}", + hop_proof.chainId, + hop_proof.blockId, + hop_proof.rootHash, + hop_proof.accountProof.len(), + hop_proof.storageProof.len(), + ); + + Ok(Bytes::from(vec![hop_proof].abi_encode_params())) + } +} + +// Surge: L2 mempool tx scanning and simulation + +/// `Bridge.sendMessage(Message)` selector — used for call-based detection +/// in the trace tree because the L2 bridge is behind a DELEGATECALL proxy +/// and the Nethermind callTracer doesn't surface event logs from proxied calls. +const SEND_MESSAGE_SELECTOR: [u8; 4] = [0x1b, 0xdb, 0x00, 0x37]; + +impl L2ExecutionLayer { + /// Trace a transaction to detect any `Bridge.sendMessage` call it makes. + /// Instead of relying on `MessageSent` event logs (which the L2 Nethermind + /// callTracer doesn't emit through DELEGATECALL proxies), we scan the call + /// tree for CALL frames targeting the L2 bridge with the `sendMessage` + /// selector, and decode the Message from the call input. + pub async fn trace_tx_for_outbound_message( + &self, + from: Address, + to: Address, + input: &[u8], + value: Option, + ) -> Result, anyhow::Error> { + let mut tx_request = TransactionRequest::default() + .from(from) + .to(to) + .input(input.to_vec().into()); + + if let Some(v) = value { + tx_request = tx_request.value(v); + } + + let tracer_config = serde_json::json!({ + "onlyTopCall": false + }); + + let tracing_options = GethDebugTracingOptions { + tracer: Some(GethDebugTracerType::BuiltInTracer( + GethDebugBuiltInTracerType::CallTracer, + )), + tracer_config: tracer_config.into(), + ..Default::default() + }; + + let call_options = GethDebugTracingCallOptions { + tracing_options, + ..Default::default() + }; + + let trace_result = match self + .provider + .debug_trace_call( + tx_request, + BlockId::Number(BlockNumberOrTag::Latest), + call_options, + ) + .await + { + Ok(t) => t, + Err(e) => { + return Err(anyhow::anyhow!("L2 tx trace RPC failed: {e}")); + } + }; + + let bridge_address = *self.bridge.address(); + let mut message: Option = None; + let mut send_message_caller: Option
= None; + + if let alloy::rpc::types::trace::geth::GethTrace::CallTracer(call_frame) = trace_result { + // Walk the call tree looking for CALL frames to the bridge with + // the sendMessage selector. The Message struct is ABI-encoded as + // the first (and only) parameter after the 4-byte selector. + if let Some((msg, caller)) = find_send_message_in_calls(&call_frame, bridge_address) { + message = Some(msg); + send_message_caller = Some(caller); + } + } + + if let Some(ref mut m) = message { + // The bridge fills `from`, `srcChainId`, and `id` during sendMessage + // execution, but the call-based detection reads the INPUT before + // those are set. Patch them with what the bridge would assign. + m.from = send_message_caller.unwrap_or(from); + m.srcChainId = self.chain_id; + // For `id`, query the bridge's nextMessageId (this is what it would assign) + if let Ok(next_id) = self.bridge.nextMessageId().call().await { + m.id = next_id; + } + + debug!( + "L2 trace found outbound sendMessage: destChainId={}, to={}, from={}", + m.destChainId, m.to, m.from + ); + } else { + debug!("L2 trace found no outbound sendMessage"); + } + + Ok(message) + } +} + +/// Recursively search call frames for a CALL to `bridge_address` with the +/// `sendMessage` function selector. Returns the decoded Message and the +/// caller address (msg.sender of the sendMessage call). +fn find_send_message_in_calls( + frame: &CallFrame, + bridge_address: Address, +) -> Option<(Message, Address)> { + use crate::shared_abi::bindings::Bridge; + use alloy::sol_types::SolCall; + + // Check this frame: is it a CALL to the bridge with sendMessage selector? + if let Some(to_addr) = frame.to + && to_addr == bridge_address + { + let input = frame.input.as_ref(); + if input.len() >= 4 + && input[0..4] == SEND_MESSAGE_SELECTOR + && let Ok(decoded) = Bridge::sendMessageCall::abi_decode_raw(&input[4..]) + { + // `frame.from` is the msg.sender of this call + let caller = frame.from; + return Some((decoded._message, caller)); + } + } + + for sub in &frame.calls { + if let Some(result) = find_send_message_in_calls(sub, bridge_address) { + return Some(result); + } + } + + None +} diff --git a/realtime/src/l2/mod.rs b/realtime/src/l2/mod.rs new file mode 100644 index 00000000..b0e580e0 --- /dev/null +++ b/realtime/src/l2/mod.rs @@ -0,0 +1,3 @@ +pub mod bindings; +pub mod execution_layer; +pub mod taiko; diff --git a/realtime/src/l2/taiko.rs b/realtime/src/l2/taiko.rs new file mode 100644 index 00000000..e2c9ed6f --- /dev/null +++ b/realtime/src/l2/taiko.rs @@ -0,0 +1,368 @@ +#![allow(dead_code)] + +use super::execution_layer::L2ExecutionLayer; +use crate::l1::protocol_config::ProtocolConfig; +use crate::l2::bindings::{Anchor, ICheckpointStore::Checkpoint}; +use crate::node::proposal_manager::l2_block_payload::L2BlockV2Payload; +use alloy::primitives::FixedBytes; +use alloy::{ + consensus::BlockHeader, + eips::BlockNumberOrTag, + primitives::{Address, B256}, + rpc::types::Block, +}; +use anyhow::Error; +use common::shared::l2_slot_info_v2::L2SlotContext; +use common::{ + l1::slot_clock::SlotClock, + l2::{ + engine::L2Engine, + taiko_driver::{ + OperationType, TaikoDriver, TaikoDriverConfig, + models::{BuildPreconfBlockRequestBody, BuildPreconfBlockResponse, ExecutableData}, + }, + traits::Bridgeable, + }, + metrics::Metrics, + shared::{ + l2_slot_info_v2::L2SlotInfoV2, + l2_tx_lists::{self, PreBuiltTxList}, + }, +}; +use pacaya::l2::config::TaikoConfig; +use std::sync::Arc; +use taiko_alethia_reth::validation::ANCHOR_V3_V4_GAS_LIMIT; +use tracing::{debug, trace}; + +pub struct Taiko { + protocol_config: ProtocolConfig, + l2_execution_layer: Arc, + driver: Arc, + slot_clock: Arc, + coinbase: String, + l2_engine: L2Engine, +} + +impl Taiko { + pub async fn new( + slot_clock: Arc, + protocol_config: ProtocolConfig, + metrics: Arc, + taiko_config: TaikoConfig, + l2_engine: L2Engine, + l2_bridge_address: Address, + l2_signal_service_address: Address, + ) -> Result { + let driver_config: TaikoDriverConfig = TaikoDriverConfig { + driver_url: taiko_config.driver_url.clone(), + rpc_driver_preconf_timeout: taiko_config.rpc_driver_preconf_timeout, + rpc_driver_status_timeout: taiko_config.rpc_driver_status_timeout, + rpc_driver_retry_timeout: taiko_config.rpc_driver_retry_timeout, + jwt_secret_bytes: taiko_config.jwt_secret_bytes, + }; + Ok(Self { + protocol_config, + l2_execution_layer: Arc::new( + L2ExecutionLayer::new( + taiko_config.clone(), + l2_bridge_address, + l2_signal_service_address, + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create L2ExecutionLayer: {}", e))?, + ), + driver: Arc::new(TaikoDriver::new(&driver_config, metrics).await?), + slot_clock, + coinbase: format!("0x{}", hex::encode(taiko_config.signer.get_address())), + l2_engine, + }) + } + + pub fn get_driver(&self) -> Arc { + self.driver.clone() + } + + pub fn l2_execution_layer(&self) -> Arc { + self.l2_execution_layer.clone() + } + + pub async fn get_pending_l2_tx_list_from_l2_engine( + &self, + base_fee: u64, + batches_ready_to_send: u64, + gas_limit: u64, + ) -> Result, Error> { + self.l2_engine + .get_pending_l2_tx_list(base_fee, batches_ready_to_send, gas_limit) + .await + } + + pub fn get_protocol_config(&self) -> &ProtocolConfig { + &self.protocol_config + } + + pub async fn get_latest_l2_block_id(&self) -> Result { + self.l2_execution_layer.common().get_latest_block_id().await + } + + pub async fn get_l2_block_by_number( + &self, + number: u64, + full_txs: bool, + ) -> Result { + self.l2_execution_layer + .common() + .get_block_by_number(number, full_txs) + .await + } + + pub async fn fetch_l2_blocks_until_latest( + &self, + start_block: u64, + full_txs: bool, + ) -> Result, Error> { + let start_time = std::time::Instant::now(); + let end_block = self.get_latest_l2_block_id().await?; + let mut blocks = Vec::with_capacity(usize::try_from(end_block - start_block + 1)?); + for block_number in start_block..=end_block { + let block = self.get_l2_block_by_number(block_number, full_txs).await?; + blocks.push(block); + } + debug!( + "Fetched L2 blocks from {} to {} in {} ms", + start_block, + end_block, + start_time.elapsed().as_millis() + ); + Ok(blocks) + } + + pub async fn get_transaction_by_hash( + &self, + hash: B256, + ) -> Result { + self.l2_execution_layer + .common() + .get_transaction_by_hash(hash) + .await + } + + pub async fn get_l2_block_hash(&self, number: u64) -> Result { + self.l2_execution_layer + .common() + .get_block_hash(number) + .await + } + + /// Scan backward from L2 head to find the block number matching a given hash. + /// Used during recovery to resolve `lastFinalizedBlockHash` from L1 to an L2 block number. + pub async fn find_l2_block_number_by_hash(&self, block_hash: B256) -> Result { + let head = self.get_latest_l2_block_id().await?; + for n in (0..=head).rev() { + let hash = self.get_l2_block_hash(n).await?; + if hash == block_hash { + return Ok(n); + } + } + Err(anyhow::anyhow!( + "L2 block with hash {} not found on Geth (scanned {} blocks)", + block_hash, + head + 1 + )) + } + + pub async fn get_l2_slot_info(&self) -> Result { + self.get_l2_slot_info_by_parent_block(BlockNumberOrTag::Latest) + .await + } + + pub async fn get_l2_slot_info_by_parent_block( + &self, + parent: BlockNumberOrTag, + ) -> Result { + let l2_slot_timestamp = self.slot_clock.get_l2_slot_begin_timestamp()?; + let parent_block = self + .l2_execution_layer + .common() + .get_block_header(parent) + .await?; + let parent_id = parent_block.header.number(); + let parent_hash = parent_block.header.hash; + let parent_gas_limit = parent_block.header.gas_limit(); + let parent_timestamp = parent_block.header.timestamp(); + + let parent_gas_limit_without_anchor = if parent_id != 0 { + parent_gas_limit + .checked_sub(ANCHOR_V3_V4_GAS_LIMIT) + .ok_or_else(|| { + anyhow::anyhow!( + "parent_gas_limit {} is less than ANCHOR_V3_V4_GAS_LIMIT {}", + parent_gas_limit, + ANCHOR_V3_V4_GAS_LIMIT + ) + })? + } else { + parent_gas_limit + }; + + let base_fee: u64 = self.get_base_fee(parent_block).await?; + + trace!( + timestamp = %l2_slot_timestamp, + parent_hash = %parent_hash, + parent_gas_limit_without_anchor = %parent_gas_limit_without_anchor, + parent_timestamp = %parent_timestamp, + base_fee = %base_fee, + "L2 slot info" + ); + + Ok(L2SlotInfoV2::new( + base_fee, + l2_slot_timestamp, + parent_id, + parent_hash, + parent_gas_limit_without_anchor, + parent_timestamp, + )) + } + + async fn get_base_fee(&self, parent_block: Block) -> Result { + if parent_block.header.number() == 0 { + return Ok(taiko_alethia_reth::eip4396::SHASTA_INITIAL_BASE_FEE); + } + + let grandparent_number = parent_block.header.number() - 1; + let grandparent_timestamp = self + .l2_execution_layer + .common() + .get_block_header(BlockNumberOrTag::Number(grandparent_number)) + .await? + .header + .timestamp(); + + let timestamp_diff = parent_block + .header + .timestamp() + .checked_sub(grandparent_timestamp) + .ok_or_else(|| anyhow::anyhow!("Timestamp underflow occurred"))?; + + let parent_base_fee_per_gas = + parent_block.header.inner.base_fee_per_gas.ok_or_else(|| { + anyhow::anyhow!( + "get_base_fee: Parent block missing base fee per gas for block {}", + parent_block.header.number() + ) + })?; + let base_fee = taiko_alethia_reth::eip4396::calculate_next_block_eip4396_base_fee( + &parent_block.header.inner, + timestamp_diff, + parent_base_fee_per_gas, + taiko_protocol::shasta::constants::min_base_fee_for_chain( + self.l2_execution_layer.common().chain_id(), + ), + ); + + Ok(base_fee) + } + + #[allow(clippy::too_many_arguments)] + pub async fn advance_head_to_new_l2_block( + &self, + l2_block_payload: L2BlockV2Payload, + l2_slot_context: &L2SlotContext, + anchor_signal_slots: Vec>, + operation_type: OperationType, + ) -> Result { + tracing::debug!( + "Submitting new L2 block to the Taiko driver with {} txs", + l2_block_payload.tx_list.len() + ); + + let anchor_block_params = ( + Checkpoint { + blockNumber: l2_block_payload.anchor_block_id.try_into()?, + blockHash: l2_block_payload.anchor_block_hash, + stateRoot: l2_block_payload.anchor_state_root, + }, + anchor_signal_slots, + ); + + let anchor_tx = self + .l2_execution_layer + .construct_anchor_tx(&l2_slot_context.info, anchor_block_params) + .await + .map_err(|e| { + anyhow::anyhow!( + "advance_head_to_new_l2_block: Failed to construct anchor tx: {}", + e + ) + })?; + let tx_list = std::iter::once(anchor_tx) + .chain(l2_block_payload.tx_list.into_iter()) + .collect::>(); + + let tx_list_bytes = l2_tx_lists::encode_and_compress(&tx_list)?; + + let sharing_pctg = self.protocol_config.get_basefee_sharing_pctg(); + + // RealTime: 7 bytes — basefee_sharing_pctg + 6 zero bytes (no proposal_id) + let extra_data = format!("0x{:02x}000000000000", sharing_pctg); + + let executable_data = ExecutableData { + base_fee_per_gas: l2_slot_context.info.base_fee(), + block_number: l2_slot_context.info.parent_id() + 1, + extra_data, + fee_recipient: l2_block_payload.coinbase.to_string(), + gas_limit: l2_block_payload.gas_limit_without_anchor + ANCHOR_V3_V4_GAS_LIMIT, + parent_hash: format!("0x{}", hex::encode(l2_slot_context.info.parent_hash())), + timestamp: l2_block_payload.timestamp_sec, + transactions: format!("0x{}", hex::encode(tx_list_bytes)), + }; + + let request_body = BuildPreconfBlockRequestBody { + executable_data, + end_of_sequencing: l2_slot_context.end_of_sequencing, + is_forced_inclusion: false, + }; + + self.driver + .preconf_blocks(request_body, operation_type) + .await + } + + pub async fn reorg_stale_block( + &self, + new_head_block_number: u64, + ) -> Result { + self.driver.reorg_stale_block(new_head_block_number).await + } + + pub fn decode_anchor_id_from_tx_data(data: &[u8]) -> Result { + L2ExecutionLayer::decode_anchor_id_from_tx_data(data) + } + + pub fn get_anchor_tx_data(data: &[u8]) -> Result { + L2ExecutionLayer::get_anchor_tx_data(data) + } +} + +impl Bridgeable for Taiko { + async fn get_balance(&self, address: Address) -> Result { + self.l2_execution_layer + .common() + .get_account_balance(address) + .await + } + + async fn transfer_eth_from_l2_to_l1( + &self, + amount: u128, + dest_chain_id: u64, + address: Address, + bridge_relayer_fee: u64, + ) -> Result<(), Error> { + self.l2_execution_layer + .transfer_eth_from_l2_to_l1(amount, dest_chain_id, address, bridge_relayer_fee) + .await + } +} diff --git a/realtime/src/lib.rs b/realtime/src/lib.rs new file mode 100644 index 00000000..e10697a6 --- /dev/null +++ b/realtime/src/lib.rs @@ -0,0 +1,171 @@ +mod chain_monitor; +mod l1; +mod l2; +mod node; +pub mod raiko; +mod shared_abi; +mod utils; + +use crate::utils::config::RealtimeConfig; +use anyhow::Error; +use common::{ + batch_builder::BatchBuilderConfig, + config::Config, + config::ConfigTrait, + fork_info::ForkInfo, + l1::{self as common_l1, traits::PreconferProvider}, + l2::engine::{L2Engine, L2EngineConfig}, + metrics, + utils::cancellation_token::CancellationToken, +}; +use l1::execution_layer::ExecutionLayer; +use node::Node; +use std::sync::Arc; +use tokio::sync::mpsc; +use tracing::info; + +pub async fn create_realtime_node( + config: Config, + metrics: Arc, + cancel_token: CancellationToken, + fork_info: ForkInfo, +) -> Result<(), Error> { + info!("Creating RealTime node"); + + let realtime_config = RealtimeConfig::read_env_variables() + .map_err(|e| anyhow::anyhow!("Failed to read RealTime configuration: {}", e))?; + info!("RealTime config: {}", realtime_config); + + let (transaction_error_sender, transaction_error_receiver) = mpsc::channel(100); + let ethereum_l1 = common_l1::ethereum_l1::EthereumL1::::new( + common_l1::config::EthereumL1Config::new(&config).await?, + l1::config::EthereumL1Config::try_from(realtime_config.clone())?, + transaction_error_sender, + metrics.clone(), + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create EthereumL1: {}", e))?; + + let ethereum_l1 = Arc::new(ethereum_l1); + + let taiko_config = pacaya::l2::config::TaikoConfig::new(&config) + .await + .map_err(|e| anyhow::anyhow!("Failed to create TaikoConfig: {}", e))?; + + let l2_engine = L2Engine::new(L2EngineConfig::new( + &config, + taiko_config.signer.get_address(), + )?) + .map_err(|e| anyhow::anyhow!("Failed to create L2Engine: {}", e))?; + let protocol_config = ethereum_l1.execution_layer.fetch_protocol_config().await?; + + let taiko = crate::l2::taiko::Taiko::new( + ethereum_l1.slot_clock.clone(), + protocol_config.clone(), + metrics.clone(), + taiko_config, + l2_engine, + config.taiko_bridge_address, + realtime_config.l2_signal_service, + ) + .await?; + let taiko = Arc::new(taiko); + + let node_config = node::config::NodeConfig { + preconf_heartbeat_ms: config.preconf_heartbeat_ms, + handover_window_slots: 8, + handover_start_buffer_ms: 500, + l1_height_lag: 8, + simulate_not_submitting_at_the_end_of_epoch: false, + }; + + let max_blocks_per_batch = if config.max_blocks_per_batch == 0 { + taiko_protocol::shasta::constants::DERIVATION_SOURCE_MAX_BLOCKS.try_into()? + } else { + config.max_blocks_per_batch + }; + + // Use 256-block limit for anchor offset + let max_anchor_height_offset = 256u64; + + let batch_builder_config = BatchBuilderConfig { + max_bytes_size_of_batch: config.max_bytes_size_of_batch, + max_blocks_per_batch, + l1_slot_duration_sec: config.l1_slot_duration_sec, + max_time_shift_between_blocks_sec: config.max_time_shift_between_blocks_sec, + max_anchor_height_offset: max_anchor_height_offset + - config.max_anchor_height_offset_reduction, + default_coinbase: ethereum_l1.execution_layer.get_preconfer_address(), + preconf_min_txs: config.preconf_min_txs, + preconf_max_skipped_l2_slots: config.preconf_max_skipped_l2_slots, + proposal_max_time_sec: config.proposal_max_time_sec, + }; + + // Initialize chain monitor for ProposedAndProved events + let chain_monitor = Arc::new( + chain_monitor::RealtimeChainMonitor::new( + config + .l1_rpc_urls + .first() + .ok_or_else(|| anyhow::anyhow!("L1 RPC URL is required"))? + .clone(), + config.taiko_geth_rpc_url.clone(), + realtime_config.realtime_inbox, + cancel_token.clone(), + "ProposedAndProved", + chain_monitor::print_proposed_and_proved_info, + metrics.clone(), + ) + .map_err(|e| anyhow::anyhow!("Failed to create RealtimeChainMonitor: {}", e))?, + ); + chain_monitor + .start() + .await + .map_err(|e| anyhow::anyhow!("Failed to start RealtimeChainMonitor: {}", e))?; + + // Read the last finalized block hash from L1 + let last_finalized_block_hash = ethereum_l1 + .execution_layer + .get_last_finalized_block_hash() + .await?; + info!( + "Initial lastFinalizedBlockHash: {}", + last_finalized_block_hash + ); + + let preconf_only = realtime_config.preconf_only; + let proof_request_bypass = realtime_config.proof_request_bypass; + let bridge_rpc_addr = realtime_config.bridge_rpc_addr.clone(); + let raiko_client = raiko::RaikoClient::new(&realtime_config); + + let l1_chain_id = { + use common::l1::traits::ELTrait; + ethereum_l1.execution_layer.common().chain_id() + }; + + let node = Node::new( + node_config, + cancel_token.clone(), + ethereum_l1.clone(), + taiko.clone(), + metrics.clone(), + batch_builder_config, + transaction_error_receiver, + fork_info, + last_finalized_block_hash, + raiko_client, + protocol_config.basefee_sharing_pctg, + preconf_only, + proof_request_bypass, + bridge_rpc_addr, + l1_chain_id, + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create Node: {}", e))?; + + node.entrypoint() + .await + .map_err(|e| anyhow::anyhow!("Failed to start Node: {}", e))?; + + Ok(()) +} diff --git a/realtime/src/node/config.rs b/realtime/src/node/config.rs new file mode 100644 index 00000000..c984b59e --- /dev/null +++ b/realtime/src/node/config.rs @@ -0,0 +1,8 @@ +#[derive(Debug, Clone)] +pub struct NodeConfig { + pub preconf_heartbeat_ms: u64, + pub handover_window_slots: u64, + pub handover_start_buffer_ms: u64, + pub l1_height_lag: u64, + pub simulate_not_submitting_at_the_end_of_epoch: bool, +} diff --git a/realtime/src/node/mod.rs b/realtime/src/node/mod.rs new file mode 100644 index 00000000..35030696 --- /dev/null +++ b/realtime/src/node/mod.rs @@ -0,0 +1,525 @@ +pub mod config; +pub mod proposal_manager; +use crate::node::config::NodeConfig; +use anyhow::Error; +use common::{ + fork_info::ForkInfo, + l1::{ethereum_l1::EthereumL1, transaction_error::TransactionError}, + l2::taiko_driver::{TaikoDriver, models::BuildPreconfBlockResponse}, + metrics::Metrics, + shared::{l2_slot_info_v2::L2SlotContext, l2_tx_lists::PreBuiltTxList}, + utils::{self as common_utils, cancellation_token::CancellationToken}, +}; +use pacaya::node::operator::{Operator, Status as OperatorStatus}; +use std::sync::Arc; +use tracing::{debug, error, info, warn}; + +use crate::l1::execution_layer::ExecutionLayer; +use crate::l2::taiko::Taiko; +use common::batch_builder::BatchBuilderConfig; +use common::l1::traits::PreconferProvider; +use common::shared::head_verifier::HeadVerifier; +use common::shared::l2_slot_info_v2::L2SlotInfoV2; +use proposal_manager::BatchManager; + +use tokio::{ + sync::mpsc::{Receiver, error::TryRecvError}, + time::{Duration, sleep}, +}; + +pub struct Node { + config: NodeConfig, + cancel_token: CancellationToken, + ethereum_l1: Arc>, + taiko: Arc, + watchdog: common_utils::watchdog::Watchdog, + operator: Operator, + #[allow(dead_code)] + metrics: Arc, + proposal_manager: BatchManager, + head_verifier: HeadVerifier, + transaction_error_channel: Receiver, + preconf_only: bool, +} + +impl Node { + #[allow(clippy::too_many_arguments)] + pub async fn new( + config: NodeConfig, + cancel_token: CancellationToken, + ethereum_l1: Arc>, + taiko: Arc, + metrics: Arc, + batch_builder_config: BatchBuilderConfig, + transaction_error_channel: Receiver, + fork_info: ForkInfo, + last_finalized_block_hash: alloy::primitives::B256, + raiko_client: crate::raiko::RaikoClient, + basefee_sharing_pctg: u8, + preconf_only: bool, + proof_request_bypass: bool, + bridge_rpc_addr: String, + l1_chain_id: u64, + ) -> Result { + let operator = Operator::new( + ethereum_l1.execution_layer.clone(), + ethereum_l1.slot_clock.clone(), + taiko.get_driver(), + config.handover_window_slots, + config.handover_start_buffer_ms, + config.simulate_not_submitting_at_the_end_of_epoch, + cancel_token.clone(), + fork_info.clone(), + 0, + ) + .map_err(|e| anyhow::anyhow!("Failed to create Operator: {}", e))?; + let watchdog = common_utils::watchdog::Watchdog::new( + cancel_token.clone(), + ethereum_l1.slot_clock.get_l2_slots_per_epoch() / 2, + ); + let head_verifier = HeadVerifier::default(); + + let proposal_manager = BatchManager::new( + config.l1_height_lag, + batch_builder_config, + ethereum_l1.clone(), + taiko.clone(), + metrics.clone(), + cancel_token.clone(), + last_finalized_block_hash, + raiko_client, + basefee_sharing_pctg, + proof_request_bypass, + bridge_rpc_addr, + l1_chain_id, + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create BatchManager: {}", e))?; + + let start = std::time::Instant::now(); + common::blob::build_default_kzg_settings(); + info!( + "Setup build_default_kzg_settings in {} milliseconds", + start.elapsed().as_millis() + ); + + Ok(Self { + config, + cancel_token, + ethereum_l1, + taiko, + watchdog, + operator, + metrics, + proposal_manager, + head_verifier, + transaction_error_channel, + preconf_only, + }) + } + + pub async fn entrypoint(mut self) -> Result<(), Error> { + info!("Starting RealTime node"); + + if let Err(err) = self.warmup().await { + error!("Failed to warm up node: {}. Shutting down.", err); + self.cancel_token.cancel_on_critical_error(); + return Err(anyhow::anyhow!(err)); + } + + info!("Node warmup successful"); + + tokio::spawn(async move { + self.preconfirmation_loop().await; + }); + + Ok(()) + } + + async fn preconfirmation_loop(&mut self) { + debug!("Main preconfirmation loop started"); + common_utils::synchronization::synchronize_with_l1_slot_start(&self.ethereum_l1).await; + + let mut interval = + tokio::time::interval(Duration::from_millis(self.config.preconf_heartbeat_ms)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + loop { + interval.tick().await; + + if self.cancel_token.is_cancelled() { + info!("Shutdown signal received, exiting main loop..."); + return; + } + + if let Err(err) = self.main_block_preconfirmation_step().await { + error!("Failed to execute main block preconfirmation step: {}", err); + self.watchdog.increment(); + } else { + self.watchdog.reset(); + } + } + } + + async fn main_block_preconfirmation_step(&mut self) -> Result<(), Error> { + let (l2_slot_info, current_status, pending_tx_list) = + self.get_slot_info_and_status().await?; + + if !self.preconf_only { + // Poll for completed async submissions (non-blocking) + if let Some(result) = self.proposal_manager.poll_submission_result() { + match result { + Ok(()) => info!("Async submission completed successfully"), + Err(e) => { + if let Some(transaction_error) = e.downcast_ref::() { + self.handle_transaction_error( + transaction_error, + ¤t_status, + &l2_slot_info, + ) + .await?; + } else { + warn!( + "Async submission failed: {}. Reorging preconfirmed L2 blocks.", + e + ); + self.recover_from_failed_submission().await?; + } + // Return early — l2_slot_info is stale after reorg recovery. + // The next heartbeat will pick up fresh state. + return Ok(()); + } + } + } + + // Check for transaction errors (reverts detected after mining) + match self.transaction_error_channel.try_recv() { + Ok(error) => { + self.handle_transaction_error(&error, ¤t_status, &l2_slot_info) + .await?; + // Return early — l2_slot_info is stale after reorg recovery. + return Ok(()); + } + Err(err) => match err { + TryRecvError::Empty => {} + TryRecvError::Disconnected => { + self.cancel_token.cancel_on_critical_error(); + return Err(anyhow::anyhow!("Transaction error channel disconnected")); + } + }, + } + } + + if current_status.is_preconfirmation_start_slot() { + self.head_verifier + .set(l2_slot_info.parent_id(), *l2_slot_info.parent_hash()) + .await; + } + + // Preconfirmation phase — skip if a proof request or submission is already in progress + if current_status.is_preconfer() + && current_status.is_driver_synced() + && !self.proposal_manager.is_submission_in_progress() + { + if !self + .head_verifier + .verify(l2_slot_info.parent_id(), l2_slot_info.parent_hash()) + .await + { + self.head_verifier.log_error().await; + warn!("Unexpected L2 head detected. Attempting recovery via reorg."); + self.recover_from_failed_submission().await?; + return Ok(()); + } + + let l2_slot_context = L2SlotContext { + info: l2_slot_info.clone(), + end_of_sequencing: current_status.is_end_of_sequencing(), + }; + + if self + .proposal_manager + .should_new_block_be_created(&pending_tx_list, &l2_slot_context) + && (pending_tx_list + .as_ref() + .is_some_and(|pre_built_list| !pre_built_list.tx_list.is_empty()) + || self.proposal_manager.has_pending_user_ops().await) + { + let preconfed_block = self + .proposal_manager + .preconfirm_block(pending_tx_list, &l2_slot_context) + .await?; + + self.verify_preconfed_block(preconfed_block).await?; + } + } + + // Submission phase + if self.preconf_only { + // PRECONF_ONLY mode: drop finalized batches without proving/proposing + self.proposal_manager.drain_finalized_batches(); + } else if current_status.is_submitter() + && !self.proposal_manager.is_submission_in_progress() + && let Err(err) = self + .proposal_manager + .try_start_submission(current_status.is_preconfer()) + .await + { + if let Some(transaction_error) = err.downcast_ref::() { + self.handle_transaction_error(transaction_error, ¤t_status, &l2_slot_info) + .await?; + } else { + return Err(err); + } + } + + // Cleanup + if !current_status.is_submitter() + && !current_status.is_preconfer() + && self.proposal_manager.has_batches() + { + error!( + "Resetting batch builder. has batches: {}", + self.proposal_manager.has_batches(), + ); + self.proposal_manager.reset_builder().await?; + } + + Ok(()) + } + + async fn recover_from_failed_submission(&mut self) -> Result<(), Error> { + self.proposal_manager.reorg_unproposed_blocks().await?; + self.proposal_manager.reset_builder().await?; + + let l2_slot_info = self.taiko.get_l2_slot_info().await?; + self.head_verifier + .set(l2_slot_info.parent_id(), *l2_slot_info.parent_hash()) + .await; + + info!("Recovery complete. Resuming preconfirmation loop."); + Ok(()) + } + + async fn handle_transaction_error( + &mut self, + error: &TransactionError, + _current_status: &OperatorStatus, + _l2_slot_info: &L2SlotInfoV2, + ) -> Result<(), Error> { + match error { + TransactionError::ReanchorRequired => { + warn!("Unexpected ReanchorRequired error received"); + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!( + "ReanchorRequired error received unexpectedly, exiting" + )) + } + TransactionError::NotConfirmed => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!( + "Transaction not confirmed for a long time, exiting" + )) + } + TransactionError::UnsupportedTransactionType => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!("Unsupported transaction type")) + } + TransactionError::GetBlockNumberFailed => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!("Failed to get block number from L1")) + } + TransactionError::EstimationTooEarly => { + warn!("Transaction estimation too early"); + Ok(()) + } + TransactionError::InsufficientFunds => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!( + "Transaction reverted with InsufficientFunds error" + )) + } + TransactionError::EstimationFailed => { + warn!("L1 transaction estimation failed. Reorging preconfirmed L2 blocks."); + self.recover_from_failed_submission().await + } + TransactionError::TransactionReverted => { + warn!("L1 transaction reverted. Reorging preconfirmed L2 blocks."); + self.recover_from_failed_submission().await + } + TransactionError::OldestForcedInclusionDue => { + // No forced inclusions in RealTime, but handle gracefully + warn!("OldestForcedInclusionDue received in RealTime mode, ignoring"); + Ok(()) + } + TransactionError::NotTheOperatorInCurrentEpoch => { + warn!("Propose batch transaction executed too late."); + Ok(()) + } + TransactionError::BuildFailed => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!("Transaction build failed, exiting")) + } + } + } + + async fn get_slot_info_and_status( + &mut self, + ) -> Result<(L2SlotInfoV2, OperatorStatus, Option), Error> { + let l2_slot_info = self.taiko.get_l2_slot_info().await; + let current_status = match &l2_slot_info { + Ok(info) => self.operator.get_status(info).await, + Err(_) => Err(anyhow::anyhow!("Failed to get L2 slot info")), + }; + + let gas_limit_without_anchor = match &l2_slot_info { + Ok(info) => info.parent_gas_limit_without_anchor(), + Err(_) => { + error!("Failed to get L2 slot info set gas_limit_without_anchor to 0"); + 0u64 + } + }; + + let pending_tx_list = if gas_limit_without_anchor != 0 { + let batches_ready_to_send = 0; + match &l2_slot_info { + Ok(info) => { + self.taiko + .get_pending_l2_tx_list_from_l2_engine( + info.base_fee(), + batches_ready_to_send, + gas_limit_without_anchor, + ) + .await + } + Err(_) => Err(anyhow::anyhow!("Failed to get L2 slot info")), + } + } else { + Ok(None) + }; + + self.print_current_slots_info( + ¤t_status, + &pending_tx_list, + &l2_slot_info, + self.proposal_manager.get_number_of_batches(), + )?; + + Ok((l2_slot_info?, current_status?, pending_tx_list?)) + } + + async fn verify_preconfed_block( + &self, + l2_block: BuildPreconfBlockResponse, + ) -> Result<(), Error> { + if !self + .head_verifier + .verify_next_and_set(l2_block.number, l2_block.hash, l2_block.parent_hash) + .await + { + self.head_verifier.log_error().await; + self.cancel_token.cancel_on_critical_error(); + return Err(anyhow::anyhow!( + "Unexpected L2 head after preconfirmation. Restarting node..." + )); + } + Ok(()) + } + + fn print_current_slots_info( + &self, + current_status: &Result, + pending_tx_list: &Result, Error>, + l2_slot_info: &Result, + batches_number: u64, + ) -> Result<(), Error> { + let l1_slot = self.ethereum_l1.slot_clock.get_current_slot()?; + info!(target: "heartbeat", + "| Epoch: {:<6} | Slot: {:<2} | L2 Slot: {:<2} | {}{} Batches: {batches_number} | {} |", + self.ethereum_l1.slot_clock.get_epoch_from_slot(l1_slot), + self.ethereum_l1.slot_clock.slot_of_epoch(l1_slot), + self.ethereum_l1 + .slot_clock + .get_current_l2_slot_within_l1_slot()?, + if let Ok(pending_tx_list) = pending_tx_list { + format!( + "Txs: {:<4} |", + pending_tx_list + .as_ref() + .map_or(0, |tx_list| tx_list.tx_list.len()) + ) + } else { + "Txs: unknown |".to_string() + }, + if let Ok(l2_slot_info) = l2_slot_info { + format!( + " Fee: {:<7} | L2: {:<6} | Time: {:<10} | Hash: {} |", + l2_slot_info.base_fee(), + l2_slot_info.parent_id(), + l2_slot_info.slot_timestamp(), + &l2_slot_info.parent_hash().to_string()[..8] + ) + } else { + " L2 slot info unknown |".to_string() + }, + if let Ok(status) = current_status { + status.to_string() + } else { + "Unknown".to_string() + }, + ); + Ok(()) + } + + async fn warmup(&mut self) -> Result<(), Error> { + info!("Warmup RealTime node"); + + // Wait for RealTimeInbox activation (lastFinalizedBlockHash != 0) + loop { + let hash = self + .ethereum_l1 + .execution_layer + .get_last_finalized_block_hash() + .await?; + if hash != alloy::primitives::B256::ZERO { + info!("RealTimeInbox is active, lastFinalizedBlockHash: {}", hash); + break; + } + warn!("RealTimeInbox not yet activated. Waiting..."); + sleep(Duration::from_secs(12)).await; + } + + // Wait for the last sent transaction to be executed + self.wait_for_sent_transactions().await?; + + // Reorg any preconfirmed-but-unproposed L2 blocks back to the last proposed block + if !self.preconf_only { + self.proposal_manager.reorg_unproposed_blocks().await?; + } + + Ok(()) + } + + async fn wait_for_sent_transactions(&self) -> Result<(), Error> { + loop { + let nonce_latest: u64 = self + .ethereum_l1 + .execution_layer + .get_preconfer_nonce_latest() + .await?; + let nonce_pending: u64 = self + .ethereum_l1 + .execution_layer + .get_preconfer_nonce_pending() + .await?; + if nonce_pending == nonce_latest { + break; + } + debug!( + "Waiting for sent transactions to be executed. Nonce Latest: {nonce_latest}, Nonce Pending: {nonce_pending}" + ); + sleep(Duration::from_secs(6)).await; + } + + Ok(()) + } +} diff --git a/realtime/src/node/proposal_manager/async_submitter.rs b/realtime/src/node/proposal_manager/async_submitter.rs new file mode 100644 index 00000000..9f4c88c8 --- /dev/null +++ b/realtime/src/node/proposal_manager/async_submitter.rs @@ -0,0 +1,499 @@ +use crate::l1::execution_layer::ExecutionLayer; +use crate::node::proposal_manager::bridge_handler::{UserOpStatus, UserOpStatusStore}; +use crate::node::proposal_manager::proposal::Proposal; +use crate::raiko::{ + RaikoBlobSlice, RaikoCheckpoint, RaikoClient, RaikoDerivationSource, RaikoProofRequest, +}; +use alloy::consensus::SidecarBuilder; +use alloy::primitives::B256; +use anyhow::Error; +use common::l1::ethereum_l1::EthereumL1; +use std::sync::Arc; +use taiko_protocol::shasta::BlobCoder; +use taiko_protocol::shasta::manifest::{BlockManifest, DerivationSourceManifest}; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; +use tracing::info; + +pub struct SubmissionResult { + pub new_last_finalized_block_hash: B256, + pub new_last_finalized_block_number: u64, +} + +struct InFlightSubmission { + result_rx: oneshot::Receiver>, + handle: JoinHandle<()>, +} + +pub struct AsyncSubmitter { + in_flight: Option, + raiko_client: RaikoClient, + basefee_sharing_pctg: u8, + ethereum_l1: Arc>, + proof_request_bypass: bool, +} + +impl AsyncSubmitter { + pub fn new( + raiko_client: RaikoClient, + basefee_sharing_pctg: u8, + ethereum_l1: Arc>, + proof_request_bypass: bool, + ) -> Self { + Self { + in_flight: None, + raiko_client, + basefee_sharing_pctg, + ethereum_l1, + proof_request_bypass, + } + } + + pub fn is_busy(&self) -> bool { + self.in_flight.is_some() + } + + /// Non-blocking check for completed submission. Returns None if idle or still in progress. + pub fn try_recv_result(&mut self) -> Option> { + let in_flight = self.in_flight.as_mut()?; + match in_flight.result_rx.try_recv() { + Ok(result) => { + self.in_flight = None; + Some(result) + } + Err(oneshot::error::TryRecvError::Empty) => None, + Err(oneshot::error::TryRecvError::Closed) => { + self.in_flight = None; + Some(Err(anyhow::anyhow!( + "Submission task panicked or was dropped" + ))) + } + } + } + + /// Submit a proposal asynchronously. Spawns a background task that fetches the ZK proof + /// from Raiko and then sends the L1 transaction. Results are retrieved via `try_recv_result`. + pub fn submit(&mut self, proposal: Proposal, status_store: Option) { + assert!( + !self.is_busy(), + "Cannot submit while another submission is in flight" + ); + + let (result_tx, result_rx) = oneshot::channel(); + let raiko_client = self.raiko_client.clone(); + let basefee_sharing_pctg = self.basefee_sharing_pctg; + let ethereum_l1 = self.ethereum_l1.clone(); + let proof_request_bypass = self.proof_request_bypass; + + // Collect user-op IDs before moving `proposal` so the catch-all below can + // mark them as Rejected if `submission_task` returns an error before the + // status is updated (e.g. blob encoding / sidecar building failures). + let all_user_op_ids: Vec = proposal + .user_ops + .iter() + .map(|op| op.id) + .chain(proposal.l2_user_op_ids.iter().copied()) + .collect(); + let fallback_store = status_store.clone(); + + let handle = tokio::spawn(async move { + let result = submission_task( + proposal, + &raiko_client, + basefee_sharing_pctg, + ethereum_l1, + status_store, + proof_request_bypass, + ) + .await; + + // Catch-all: if submission_task errored, ensure every user op is marked + // Rejected. The task itself handles Raiko and L1-send errors, but + // pre-proof failures (manifest encoding, sidecar building) bail via `?` + // before any status update — leaving ops stuck at Pending forever. + if let Err(ref e) = result + && let Some(ref store) = fallback_store + { + let reason = format!("Submission failed: {}", e); + for id in &all_user_op_ids { + store.set( + *id, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } + } + + let _ = result_tx.send(result); + }); + + self.in_flight = Some(InFlightSubmission { result_rx, handle }); + } + + pub fn abort(&mut self) { + if let Some(in_flight) = self.in_flight.take() { + in_flight.handle.abort(); + } + } +} + +async fn submission_task( + mut proposal: Proposal, + raiko_client: &RaikoClient, + basefee_sharing_pctg: u8, + ethereum_l1: Arc>, + status_store: Option, + proof_request_bypass: bool, +) -> Result { + // Step 1: Fetch ZK proof from Raiko (or bypass) + if proposal.zk_proof.is_none() { + let l2_block_numbers: Vec = + (proposal.checkpoint.blockNumber.to::() - u64::try_from(proposal.l2_blocks.len())? + + 1..=proposal.checkpoint.blockNumber.to::()) + .collect(); + + // Build the blob sidecar (same as proposal_tx_builder) to get blob hashes and raw data + let mut block_manifests = Vec::with_capacity(proposal.l2_blocks.len()); + for l2_block in &proposal.l2_blocks { + block_manifests.push(BlockManifest { + timestamp: l2_block.timestamp_sec, + coinbase: l2_block.coinbase, + anchor_block_number: l2_block.anchor_block_number, + gas_limit: l2_block.gas_limit_without_anchor, + transactions: l2_block + .prebuilt_tx_list + .tx_list + .iter() + .map(|tx| tx.clone().into()) + .collect(), + }); + } + let manifest = DerivationSourceManifest { + blocks: block_manifests, + }; + let manifest_data = manifest.encode_and_compress()?; + let sidecar_builder: SidecarBuilder = SidecarBuilder::from_slice(&manifest_data); + let sidecar: alloy::eips::eip7594::BlobTransactionSidecarEip7594 = + sidecar_builder.build_7594()?; + + // Extract versioned blob hashes + let blob_hashes: Vec = sidecar + .versioned_hashes() + .map(|h| format!("0x{}", hex::encode(h))) + .collect(); + + // Extract raw blob data (each blob is 131072 bytes, hex-encoded with 0x prefix) + let blobs: Vec = sidecar + .blobs + .iter() + .map(|blob| format!("0x{}", hex::encode::<&[u8]>(blob.as_ref()))) + .collect(); + + // Build sources array with a single DerivationSource entry + let sources = vec![RaikoDerivationSource { + is_forced_inclusion: false, + blob_slice: RaikoBlobSlice { + blob_hashes, + offset: 0, + timestamp: 0, + }, + }]; + + let request = RaikoProofRequest { + l2_block_numbers, + proof_type: raiko_client.proof_type.raiko_proof_type().to_string(), + max_anchor_block_number: proposal.max_anchor_block_number, + last_finalized_block_hash: format!( + "0x{}", + hex::encode(proposal.last_finalized_block_hash) + ), + basefee_sharing_pctg, + network: None, + l1_network: None, + prover: None, + signal_slots: proposal + .signal_slots + .iter() + .map(|s| format!("0x{}", hex::encode(s))) + .collect(), + sources, + blobs, + checkpoint: Some(RaikoCheckpoint { + block_number: proposal.checkpoint.blockNumber.to::(), + block_hash: format!("0x{}", hex::encode(proposal.checkpoint.blockHash)), + state_root: format!("0x{}", hex::encode(proposal.checkpoint.stateRoot)), + }), + blob_proof_type: "proof_of_equivalence".to_string(), + }; + + if proof_request_bypass { + let json = serde_json::to_string_pretty(&request)?; + let raiko_url = format!("{}/v3/proof/batch/realtime", raiko_client.base_url); + + std::fs::write("/tmp/raiko_request.json", &json)?; + + let api_key_header = raiko_client + .api_key + .as_ref() + .map(|k| format!(" -H 'X-API-KEY: {}' \\\n", k)) + .unwrap_or_default(); + let curl_script = format!( + "#!/bin/bash\n\ + # Generated by Catalyst — send this to your Raiko instance\n\ + # Usage: RAIKO_URL=http://your-raiko:8080 bash /tmp/raiko_curl.sh\n\n\ + RAIKO_URL=\"${{RAIKO_URL:-{raiko_url}}}\"\n\n\ + curl -X POST \"$RAIKO_URL\" \\\n\ + {api_key_header}\ + \x20 -H 'Content-Type: application/json' \\\n\ + \x20 -d @/tmp/raiko_request.json\n" + ); + std::fs::write("/tmp/raiko_curl.sh", &curl_script)?; + + info!( + "PROOF_REQUEST_BYPASS: Raiko request dumped.\n\ + Request JSON: /tmp/raiko_request.json\n\ + Curl script: /tmp/raiko_curl.sh\n\ + Raiko URL: {}\n\ + Skipping Raiko call and L1 submission.", + raiko_url + ); + + return Ok(SubmissionResult { + new_last_finalized_block_hash: proposal.checkpoint.blockHash, + new_last_finalized_block_number: proposal.checkpoint.blockNumber.to::(), + }); + } + + // Set user op status to ProvingBlock before requesting proof from Raiko + if let Some(ref store) = status_store { + for op in &proposal.user_ops { + store.set( + op.id, + &UserOpStatus::ProvingBlock { + block_id: proposal.checkpoint.blockNumber.to::(), + }, + ); + } + // Also track L2 direct UserOps + for id in &proposal.l2_user_op_ids { + store.set( + *id, + &UserOpStatus::ProvingBlock { + block_id: proposal.checkpoint.blockNumber.to::(), + }, + ); + } + // L2→L1→L2 mempool-picked txs tracked by L2 tx hash + for tx_hash in &proposal.l2_mempool_tx_hashes { + store.set_by_hash( + *tx_hash, + &UserOpStatus::ProvingBlock { + block_id: proposal.checkpoint.blockNumber.to::(), + }, + ); + } + } + + let proof = match raiko_client.get_proof(&request).await { + Ok(proof) => proof, + Err(e) => { + if let Some(ref store) = status_store { + let reason = format!("Proof generation failed: {}", e); + for op in &proposal.user_ops { + store.set( + op.id, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } + for id in &proposal.l2_user_op_ids { + store.set( + *id, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } + for tx_hash in &proposal.l2_mempool_tx_hashes { + store.set_by_hash( + *tx_hash, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } + } + return Err(e); + } + }; + proposal.zk_proof = Some(proof); + } + + // Step 2: Send L1 transaction + let mut user_op_ids: Vec = proposal.user_ops.iter().map(|op| op.id).collect(); + user_op_ids.extend(&proposal.l2_user_op_ids); + let l2_mempool_tx_hashes: Vec = proposal.l2_mempool_tx_hashes.clone(); + let has_tracked_entries = + (!user_op_ids.is_empty() || !l2_mempool_tx_hashes.is_empty()) && status_store.is_some(); + + let (tx_hash_sender, tx_hash_receiver) = if has_tracked_entries { + let (s, r) = tokio::sync::oneshot::channel(); + (Some(s), Some(r)) + } else { + (None, None) + }; + let (tx_result_sender, tx_result_receiver) = if has_tracked_entries { + let (s, r) = tokio::sync::oneshot::channel(); + (Some(s), Some(r)) + } else { + (None, None) + }; + + if let Err(err) = ethereum_l1 + .execution_layer + .send_batch_to_l1(proposal.clone(), tx_hash_sender, tx_result_sender) + .await + { + // Mark all tracked entries (L1/L2 UserOps and mempool-picked L2 txs) as rejected + if let Some(ref store) = status_store { + let reason = format!("L1 multicall failed: {}", err); + for op in &proposal.user_ops { + store.set( + op.id, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } + for id in &proposal.l2_user_op_ids { + store.set( + *id, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } + for tx_hash in &proposal.l2_mempool_tx_hashes { + store.set_by_hash( + *tx_hash, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } + } + return Err(err); + } + + // Step 3: After successful submission, the new lastFinalizedBlockHash is the checkpoint's blockHash + let new_last_finalized_block_hash = proposal.checkpoint.blockHash; + let new_last_finalized_block_number = proposal.checkpoint.blockNumber.to::(); + + // Step 4: Spawn user-op status tracker + if let (Some(hash_rx), Some(result_rx), Some(store)) = + (tx_hash_receiver, tx_result_receiver, status_store) + { + tokio::spawn(async move { + let tx_hash = match hash_rx.await { + Ok(tx_hash) => { + for id in &user_op_ids { + store.set(*id, &UserOpStatus::Processing { tx_hash }); + } + for l2_tx_hash in &l2_mempool_tx_hashes { + store.set_by_hash(*l2_tx_hash, &UserOpStatus::Processing { tx_hash }); + } + Some(tx_hash) + } + Err(_) => { + for id in &user_op_ids { + store.set( + *id, + &UserOpStatus::Rejected { + reason: "Transaction failed to send".to_string(), + }, + ); + } + for l2_tx_hash in &l2_mempool_tx_hashes { + store.set_by_hash( + *l2_tx_hash, + &UserOpStatus::Rejected { + reason: "Transaction failed to send".to_string(), + }, + ); + } + None + } + }; + + if tx_hash.is_some() { + match result_rx.await { + Ok(true) => { + for id in &user_op_ids { + store.set(*id, &UserOpStatus::Executed); + } + for l2_tx_hash in &l2_mempool_tx_hashes { + store.set_by_hash(*l2_tx_hash, &UserOpStatus::Executed); + } + } + Ok(false) => { + for id in &user_op_ids { + store.set( + *id, + &UserOpStatus::Rejected { + reason: "L1 multicall reverted".to_string(), + }, + ); + } + for l2_tx_hash in &l2_mempool_tx_hashes { + store.set_by_hash( + *l2_tx_hash, + &UserOpStatus::Rejected { + reason: "L1 multicall reverted".to_string(), + }, + ); + } + } + Err(_) => { + for id in &user_op_ids { + store.set( + *id, + &UserOpStatus::Rejected { + reason: "Transaction monitor dropped".to_string(), + }, + ); + } + for l2_tx_hash in &l2_mempool_tx_hashes { + store.set_by_hash( + *l2_tx_hash, + &UserOpStatus::Rejected { + reason: "Transaction monitor dropped".to_string(), + }, + ); + } + } + } + } + + // Clean up status entries after 60s (client should have polled by then) + let cleanup_store = store.clone(); + let cleanup_ids = user_op_ids.clone(); + let cleanup_hashes = l2_mempool_tx_hashes.clone(); + tokio::spawn(async move { + tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; + for id in &cleanup_ids { + cleanup_store.remove(*id); + } + for tx_hash in &cleanup_hashes { + cleanup_store.remove_by_hash(*tx_hash); + } + }); + }); + } + + Ok(SubmissionResult { + new_last_finalized_block_hash, + new_last_finalized_block_number, + }) +} diff --git a/realtime/src/node/proposal_manager/batch_builder.rs b/realtime/src/node/proposal_manager/batch_builder.rs new file mode 100644 index 00000000..851eb7eb --- /dev/null +++ b/realtime/src/node/proposal_manager/batch_builder.rs @@ -0,0 +1,360 @@ +use crate::l1::bindings::ICheckpointStore::Checkpoint; +use crate::node::proposal_manager::{ + bridge_handler::{L1Call, UserOp}, + l2_block_payload::L2BlockV2Payload, + proposal::Proposal, +}; +use alloy::primitives::{B256, FixedBytes}; +use anyhow::Error; +use common::metrics::Metrics; +use common::{ + batch_builder::BatchBuilderConfig, + shared::l2_block_v2::{L2BlockV2, L2BlockV2Draft}, +}; +use common::{l1::slot_clock::SlotClock, shared::anchor_block_info::AnchorBlockInfo}; +use std::{collections::VecDeque, sync::Arc}; +use tracing::{debug, info, trace, warn}; + +pub struct BatchBuilder { + config: BatchBuilderConfig, + proposals_to_send: VecDeque, + current_proposal: Option, + slot_clock: Arc, + #[allow(dead_code)] + metrics: Arc, +} + +impl BatchBuilder { + pub fn new( + config: BatchBuilderConfig, + slot_clock: Arc, + metrics: Arc, + ) -> Self { + Self { + config, + proposals_to_send: VecDeque::new(), + current_proposal: None, + slot_clock, + metrics, + } + } + + pub fn get_config(&self) -> &BatchBuilderConfig { + &self.config + } + + pub fn can_consume_l2_block(&mut self, l2_draft_block: &L2BlockV2Draft) -> bool { + let is_time_shift_expired = self.is_time_shift_expired(l2_draft_block.timestamp_sec); + self.current_proposal.as_mut().is_some_and(|batch| { + let new_block_count = match u16::try_from(batch.l2_blocks.len() + 1) { + Ok(n) => n, + Err(_) => return false, + }; + + let mut new_total_bytes = + batch.total_bytes + l2_draft_block.prebuilt_tx_list.bytes_length; + + if !self.config.is_within_bytes_limit(new_total_bytes) { + batch.compress(); + new_total_bytes = batch.total_bytes + l2_draft_block.prebuilt_tx_list.bytes_length; + if !self.config.is_within_bytes_limit(new_total_bytes) { + let start = std::time::Instant::now(); + let mut batch_clone = batch.clone(); + batch_clone.add_l2_draft_block(l2_draft_block.clone()); + batch_clone.compress(); + new_total_bytes = batch_clone.total_bytes; + debug!( + "can_consume_l2_block: Second compression took {} ms, new total bytes: {}", + start.elapsed().as_millis(), + new_total_bytes + ); + } + } + + self.config.is_within_bytes_limit(new_total_bytes) + && self.config.is_within_block_limit(new_block_count) + && !is_time_shift_expired + }) + } + + pub fn create_new_batch( + &mut self, + anchor_block: AnchorBlockInfo, + last_finalized_block_hash: B256, + ) { + self.finalize_current_batch(); + + self.current_proposal = Some(Proposal { + l2_blocks: vec![], + total_bytes: 0, + coinbase: self.config.default_coinbase, + max_anchor_block_number: anchor_block.id(), + max_anchor_block_hash: anchor_block.hash(), + max_anchor_state_root: anchor_block.state_root(), + checkpoint: Checkpoint::default(), + last_finalized_block_hash, + user_ops: vec![], + l2_user_op_ids: vec![], + l2_mempool_tx_hashes: vec![], + signal_slots: vec![], + l1_calls: vec![], + zk_proof: None, + }); + } + + pub fn add_l2_draft_block( + &mut self, + l2_draft_block: L2BlockV2Draft, + ) -> Result { + if let Some(current_proposal) = self.current_proposal.as_mut() { + let payload = current_proposal.add_l2_draft_block(l2_draft_block); + + debug!( + "Added L2 draft block to batch: l2 blocks: {}, total bytes: {}", + current_proposal.l2_blocks.len(), + current_proposal.total_bytes + ); + Ok(payload) + } else { + Err(anyhow::anyhow!("No current batch")) + } + } + + /// Add a pre-built L2BlockV2 directly to the current proposal. + /// Used during recovery to bypass the draft/payload flow. + #[allow(dead_code)] + pub fn add_recovered_l2_block(&mut self, l2_block: L2BlockV2) -> Result<(), Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.total_bytes += l2_block.prebuilt_tx_list.bytes_length; + current_proposal.l2_blocks.push(l2_block); + Ok(()) + } else { + Err(anyhow::anyhow!("No current batch for recovered block")) + } + } + + pub fn add_user_op(&mut self, user_op_data: UserOp) -> Result<&Proposal, Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.user_ops.push(user_op_data.clone()); + info!("Added user op: {:?}", user_op_data); + Ok(current_proposal) + } else { + Err(anyhow::anyhow!("No current batch")) + } + } + + #[allow(dead_code)] + pub fn add_l2_user_op_id(&mut self, id: u64) -> Result<(), Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.l2_user_op_ids.push(id); + Ok(()) + } else { + Err(anyhow::anyhow!("No current batch for L2 user op id")) + } + } + + pub fn add_l2_mempool_tx_hash(&mut self, tx_hash: B256) -> Result<(), Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.l2_mempool_tx_hashes.push(tx_hash); + Ok(()) + } else { + Err(anyhow::anyhow!("No current batch for L2 mempool tx hash")) + } + } + + pub fn add_signal_slot(&mut self, signal_slot: FixedBytes<32>) -> Result<&Proposal, Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.signal_slots.push(signal_slot); + info!("Added signal slot: {:?}", signal_slot); + Ok(current_proposal) + } else { + Err(anyhow::anyhow!("No current batch")) + } + } + + pub fn add_l1_call(&mut self, l1_call: L1Call) -> Result<&Proposal, Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.l1_calls.push(l1_call.clone()); + info!("Added L1 call: {:?}", l1_call); + Ok(current_proposal) + } else { + Err(anyhow::anyhow!("No current batch")) + } + } + + pub fn set_proposal_checkpoint(&mut self, checkpoint: Checkpoint) -> Result<&Proposal, Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.checkpoint = checkpoint.clone(); + debug!("Update proposal checkpoint: {:?}", checkpoint); + Ok(current_proposal) + } else { + Err(anyhow::anyhow!("No current batch")) + } + } + + pub fn get_current_proposal_last_block_timestamp(&self) -> Option { + self.current_proposal + .as_ref() + .and_then(|p| p.l2_blocks.last().map(|b| b.timestamp_sec)) + } + + pub fn remove_last_l2_block(&mut self) { + if let Some(current_proposal) = self.current_proposal.as_mut() { + let removed_block = current_proposal.l2_blocks.pop(); + if let Some(removed_block) = removed_block { + current_proposal.total_bytes -= removed_block.prebuilt_tx_list.bytes_length; + if current_proposal.l2_blocks.is_empty() { + self.current_proposal = None; + } + debug!( + "Removed L2 block from batch: {} txs, {} bytes", + removed_block.prebuilt_tx_list.tx_list.len(), + removed_block.prebuilt_tx_list.bytes_length + ); + } + } + } + + pub fn is_empty(&self) -> bool { + trace!( + "batch_builder::is_empty: current_proposal is none: {}, proposals_to_send len: {}", + self.current_proposal.is_none(), + self.proposals_to_send.len() + ); + self.current_proposal.is_none() && self.proposals_to_send.is_empty() + } + + /// Finalize the current batch if appropriate for submission. + pub fn finalize_if_needed(&mut self, submit_only_full_batches: bool) { + if self.current_proposal.is_some() + && (!submit_only_full_batches + || !self.config.is_within_block_limit( + u16::try_from( + self.current_proposal + .as_ref() + .map(|b| b.l2_blocks.len()) + .unwrap_or(0), + ) + .unwrap_or(u16::MAX) + + 1, + )) + { + self.finalize_current_batch(); + } + } + + /// Pop the oldest finalized batch, stamping it with the current last_finalized_block_hash. + pub fn pop_oldest_batch(&mut self, last_finalized_block_hash: B256) -> Option { + if let Some(mut batch) = self.proposals_to_send.pop_front() { + batch.last_finalized_block_hash = last_finalized_block_hash; + Some(batch) + } else { + None + } + } + + /// Re-queue a batch at the front (e.g., when submission couldn't start). + pub fn push_front_batch(&mut self, batch: Proposal) { + self.proposals_to_send.push_front(batch); + } + + pub fn is_time_shift_expired(&self, current_l2_slot_timestamp: u64) -> bool { + if let Some(current_proposal) = self.current_proposal.as_ref() + && let Some(last_block) = current_proposal.l2_blocks.last() + { + return current_l2_slot_timestamp - last_block.timestamp_sec + > self.config.max_time_shift_between_blocks_sec; + } + false + } + + pub fn is_time_shift_between_blocks_expiring(&self, current_l2_slot_timestamp: u64) -> bool { + if let Some(current_proposal) = self.current_proposal.as_ref() + && let Some(last_block) = current_proposal.l2_blocks.last() + { + if current_l2_slot_timestamp < last_block.timestamp_sec { + warn!("Preconfirmation timestamp is before the last block timestamp"); + return false; + } + return self.is_the_last_l1_slot_to_add_an_empty_l2_block( + current_l2_slot_timestamp, + last_block.timestamp_sec, + ); + } + false + } + + fn is_the_last_l1_slot_to_add_an_empty_l2_block( + &self, + current_l2_slot_timestamp: u64, + last_block_timestamp: u64, + ) -> bool { + current_l2_slot_timestamp - last_block_timestamp + >= self.config.max_time_shift_between_blocks_sec - self.config.l1_slot_duration_sec + } + + pub fn is_greater_than_max_anchor_height_offset(&self) -> Result { + if let Some(current_proposal) = self.current_proposal.as_ref() { + let current_l1_block = self.slot_clock.get_current_slot()?; + if current_l1_block > current_proposal.max_anchor_block_number { + let offset = current_l1_block - current_proposal.max_anchor_block_number; + return Ok(offset > self.config.max_anchor_height_offset); + } + } + Ok(false) + } + + fn is_empty_block_required(&self, preconfirmation_timestamp: u64) -> bool { + self.is_time_shift_between_blocks_expiring(preconfirmation_timestamp) + } + + pub fn get_number_of_batches(&self) -> u64 { + self.proposals_to_send.len() as u64 + + if self.current_proposal.is_some() { + 1 + } else { + 0 + } + } + + pub fn finalize_current_batch(&mut self) { + if let Some(batch) = self.current_proposal.take() + && !batch.l2_blocks.is_empty() + { + self.proposals_to_send.push_back(batch); + } + } + + pub fn should_new_block_be_created( + &self, + pending_tx_list: &Option, + current_l2_slot_timestamp: u64, + end_of_sequencing: bool, + ) -> bool { + let number_of_pending_txs = pending_tx_list + .as_ref() + .map(|tx_list| tx_list.tx_list.len()) + .unwrap_or(0) as u64; + + if self.is_empty_block_required(current_l2_slot_timestamp) || end_of_sequencing { + return true; + } + + if number_of_pending_txs >= self.config.preconf_min_txs { + return true; + } + + if let Some(current_proposal) = self.current_proposal.as_ref() + && let Some(last_block) = current_proposal.l2_blocks.last() + { + let number_of_l2_slots = + (current_l2_slot_timestamp.saturating_sub(last_block.timestamp_sec)) * 1000 + / self.slot_clock.get_preconf_heartbeat_ms(); + return number_of_l2_slots > self.config.preconf_max_skipped_l2_slots; + } + + true + } +} + +use common::shared::l2_tx_lists::PreBuiltTxList; diff --git a/realtime/src/node/proposal_manager/bridge_handler.rs b/realtime/src/node/proposal_manager/bridge_handler.rs new file mode 100644 index 00000000..aa7bcfe5 --- /dev/null +++ b/realtime/src/node/proposal_manager/bridge_handler.rs @@ -0,0 +1,519 @@ +use crate::l2::taiko::Taiko; +use crate::shared_abi::bindings::IBridge::Message; +use crate::{ + l1::execution_layer::{ExecutionLayer, L1BridgeHandlerOps}, + l2::execution_layer::L2BridgeHandlerOps, +}; +use alloy::primitives::{Address, B256, Bytes, FixedBytes}; +use anyhow::Result; +use common::{l1::ethereum_l1::EthereumL1, utils::cancellation_token::CancellationToken}; +use jsonrpsee::server::{RpcModule, ServerBuilder}; +use serde::{Deserialize, Serialize}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::{net::SocketAddr, sync::Arc}; +use tokio::sync::mpsc::{self, Receiver}; +use tracing::{debug, error, info, warn}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "status")] +pub enum UserOpStatus { + Pending, + Processing { tx_hash: FixedBytes<32> }, + ProvingBlock { block_id: u64 }, + Rejected { reason: String }, + Executed, +} + +/// Disk-backed user op status store using sled. +/// +/// Two keyspaces live in this store: +/// - default tree: keyed by `u64` UserOp id (L1→L2→L1 path). +/// - `by_hash` tree: keyed by L2 tx hash `B256` (L2→L1→L2 mempool-picked txs). +#[derive(Clone)] +pub struct UserOpStatusStore { + db: sled::Db, + by_hash: sled::Tree, +} + +impl UserOpStatusStore { + pub fn open(path: &str) -> Result { + let db = sled::open(path) + .map_err(|e| anyhow::anyhow!("Failed to open user op status store: {}", e))?; + let by_hash = db + .open_tree("by_hash") + .map_err(|e| anyhow::anyhow!("Failed to open by_hash tree: {}", e))?; + Ok(Self { db, by_hash }) + } + + pub fn set(&self, id: u64, status: &UserOpStatus) { + if let Ok(value) = serde_json::to_vec(status) + && let Err(e) = self.db.insert(id.to_be_bytes(), value) + { + error!("Failed to write user op status: {}", e); + } + } + + pub fn get(&self, id: u64) -> Option { + self.db + .get(id.to_be_bytes()) + .ok() + .flatten() + .and_then(|v| serde_json::from_slice(&v).ok()) + } + + pub fn remove(&self, id: u64) { + let _ = self.db.remove(id.to_be_bytes()); + } + + pub fn set_by_hash(&self, hash: B256, status: &UserOpStatus) { + if let Ok(value) = serde_json::to_vec(status) + && let Err(e) = self.by_hash.insert(hash.as_slice(), value) + { + error!("Failed to write tx status by hash: {}", e); + } + } + + pub fn get_by_hash(&self, hash: B256) -> Option { + self.by_hash + .get(hash.as_slice()) + .ok() + .flatten() + .and_then(|v| serde_json::from_slice(&v).ok()) + } + + pub fn remove_by_hash(&self, hash: B256) { + let _ = self.by_hash.remove(hash.as_slice()); + } +} + +#[derive(Debug, Clone, Deserialize)] +pub struct UserOp { + #[serde(default)] + pub id: u64, + pub submitter: Address, + pub calldata: Bytes, +} + +// Data required to build the L1 call transaction initiated by an L2 contract via the bridge +#[derive(Clone, Debug)] +pub struct L1Call { + pub message_from_l2: Message, + pub signal_slot_proof: Bytes, + /// Optional: if the L1 callback triggered by `processMessage` produces an + /// L1→L2 return signal that the same L2 block consumes as a fast signal, + /// this is that signal slot. When present, the inbox must defer finalization + /// of the proposal until this slot is populated on L1 — triggering the + /// tentativePropose + finalizePropose multicall shape. + pub required_return_signal: Option>, +} + +// Data required to build the L2 call transaction initiated by an L1 contract via the bridge +#[derive(Clone, Debug)] +pub struct L2Call { + pub message_from_l1: Message, + pub signal_slot_on_l2: FixedBytes<32>, +} + +/// Routed L1→L2 UserOp: triggers an L2 bridge call via processMessage. +pub struct RoutedUserOp { + pub user_op: UserOp, + pub l2_call: L2Call, +} + +#[derive(Debug, Deserialize)] +struct TxStatusRequest { + #[serde(default, rename = "userOpId")] + user_op_id: Option, + #[serde(default, rename = "txHash")] + tx_hash: Option, +} + +#[derive(Clone)] +struct BridgeRpcContext { + tx: mpsc::Sender, + status_store: UserOpStatusStore, + next_id: Arc, + ethereum_l1: Arc>, + taiko: Arc, + last_finalized_block_number: Arc, +} + +pub struct BridgeHandler { + ethereum_l1: Arc>, + taiko: Arc, + rx: Receiver, + status_store: UserOpStatusStore, + #[allow(dead_code)] + l1_chain_id: u64, +} + +impl BridgeHandler { + pub async fn new( + addr: SocketAddr, + ethereum_l1: Arc>, + taiko: Arc, + cancellation_token: CancellationToken, + l1_chain_id: u64, + last_finalized_block_number: Arc, + ) -> Result { + let (tx, rx) = mpsc::channel::(1024); + let status_store = UserOpStatusStore::open("data/user_op_status")?; + + let rpc_context = BridgeRpcContext { + tx, + status_store: status_store.clone(), + next_id: Arc::new(AtomicU64::new(1)), + ethereum_l1: ethereum_l1.clone(), + taiko: taiko.clone(), + last_finalized_block_number, + }; + + let server = ServerBuilder::default() + .build(addr) + .await + .map_err(|e| anyhow::anyhow!("Failed to build RPC server: {}", e))?; + + let mut module = RpcModule::new(rpc_context); + + module.register_async_method("surge_sendUserOp", |params, ctx, _| async move { + let mut user_op: UserOp = params.parse()?; + let id = ctx.next_id.fetch_add(1, Ordering::Relaxed); + user_op.id = id; + + info!( + "Received UserOp: id={}, submitter={:?}, calldata_len={}", + id, + user_op.submitter, + user_op.calldata.len() + ); + + ctx.status_store.set(id, &UserOpStatus::Pending); + + ctx.tx.send(user_op).await.map_err(|e| { + error!("Failed to send UserOp to queue: {}", e); + ctx.status_store.remove(id); + jsonrpsee::types::ErrorObjectOwned::owned( + -32000, + "Failed to queue user operation", + Some(format!("{}", e)), + ) + })?; + + Ok::(id) + })?; + + module.register_async_method("surge_userOpStatus", |params, ctx, _| async move { + let id: u64 = params.one()?; + + match ctx.status_store.get(id) { + Some(status) => Ok::( + serde_json::to_value(status).map_err(|e| { + jsonrpsee::types::ErrorObjectOwned::owned( + -32603, + "Serialization error", + Some(format!("{}", e)), + ) + })?, + ), + None => Err(jsonrpsee::types::ErrorObjectOwned::owned( + -32001, + "UserOp not found", + Some(format!("No user operation with id {}", id)), + )), + } + })?; + + module.register_async_method("surge_txStatus", |params, ctx, _| async move { + let request: TxStatusRequest = params.parse()?; + + match (request.user_op_id, request.tx_hash) { + (Some(id), None) => { + // Existing userOpId lookup via status store + match ctx.status_store.get(id) { + Some(status) => serde_json::to_value(status).map_err(|e| { + jsonrpsee::types::ErrorObjectOwned::owned( + -32603, + "Serialization error", + Some(format!("{}", e)), + ) + }), + None => Err(jsonrpsee::types::ErrorObjectOwned::owned( + -32001, + "UserOp not found", + Some(format!("No user operation with id {}", id)), + )), + } + } + (None, Some(hash)) => { + // Prefer the explicit status store for mempool-picked L2→L1→L2 txs — + // it carries the full `sequencing → proving → proposing → complete` + // lifecycle that async_submitter writes. + if let Some(status) = ctx.status_store.get_by_hash(hash) { + return serde_json::to_value(status).map_err(|e| { + jsonrpsee::types::ErrorObjectOwned::owned( + -32603, + "Serialization error", + Some(format!("{}", e)), + ) + }); + } + + // Fallback: derive from on-chain state (used for L1→L2→L1 UserOp + // polling by hash, where no store entry exists). + let tx = ctx.taiko.get_transaction_by_hash(hash).await.map_err(|e| { + debug!("Transaction {} not found on L2: {}", hash, e); + jsonrpsee::types::ErrorObjectOwned::owned( + -32001, + "Transaction not found", + Some(format!("L2 transaction {} not found: {}", hash, e)), + ) + })?; + + let block_number = tx.block_number.ok_or_else(|| { + jsonrpsee::types::ErrorObjectOwned::owned( + -32001, + "Transaction pending", + Some("Transaction has not been included in a block yet".to_string()), + ) + })?; + + let finalized = ctx.last_finalized_block_number.load(Ordering::Relaxed); + + let status = if block_number <= finalized { + UserOpStatus::Executed + } else { + UserOpStatus::ProvingBlock { + block_id: block_number, + } + }; + + serde_json::to_value(status).map_err(|e| { + jsonrpsee::types::ErrorObjectOwned::owned( + -32603, + "Serialization error", + Some(format!("{}", e)), + ) + }) + } + _ => Err(jsonrpsee::types::ErrorObjectOwned::owned( + -32602, + "Invalid params", + Some("Provide exactly one of 'userOpId' or 'txHash'".to_string()), + )), + } + })?; + + // surge_simulateReturnMessage: given a raw L2 tx (from, to, data), + // trace it for an L2→L1 outbound, simulate the L1 callback, and return + // the IBridge.Message that the L1 callback would produce. Users call this + // before submitting to the L2 mempool so they can embed the correct + // returnMessage in their calldata. + module.register_async_method( + "surge_simulateReturnMessage", + |params, ctx, _| async move { + use crate::l1::execution_layer::L1BridgeHandlerOps; + + #[derive(serde::Deserialize)] + struct SimRequest { + from: Address, + to: Address, + data: Bytes, + /// ETH value to attach to the traced tx (required for payable + /// L2 entry points like swapETHForTokenViaL1). + #[serde(default)] + value: Option, + } + + let req: SimRequest = params.one()?; + info!( + "surge_simulateReturnMessage: from={}, to={}, data_len={}, value={:?}", + req.from, + req.to, + req.data.len(), + req.value, + ); + + let l2_el = ctx.taiko.l2_execution_layer(); + + // Step 1: trace the L2 tx for outbound Bridge.sendMessage + let outbound = l2_el + .trace_tx_for_outbound_message(req.from, req.to, &req.data, req.value) + .await + .map_err(|e| { + jsonrpsee::types::ErrorObjectOwned::owned( + -32000, + "L2 trace failed", + Some(format!("{e}")), + ) + })? + .ok_or_else(|| { + jsonrpsee::types::ErrorObjectOwned::owned( + -32001, + "No outbound Bridge.sendMessage found in trace", + None::, + ) + })?; + + // Step 2: simulate the L1 callback + let l1_el = &ctx.ethereum_l1.execution_layer; + let bridge_addr = l1_el.contract_addresses().bridge; + let l2_bridge_addr = *l2_el.bridge.address(); + + let (return_msg, return_slot) = l1_el + .simulate_l1_callback_return_signal( + outbound, + Bytes::new(), + bridge_addr, + l2_bridge_addr, + ) + .await + .map_err(|e| { + jsonrpsee::types::ErrorObjectOwned::owned( + -32000, + "L1 callback simulation failed", + Some(format!("{e}")), + ) + })? + .ok_or_else(|| { + jsonrpsee::types::ErrorObjectOwned::owned( + -32002, + "L1 callback produced no return message", + None::, + ) + })?; + + // Return the Message struct fields + signal slot as JSON + Ok::(serde_json::json!({ + "message": { + "id": return_msg.id, + "fee": return_msg.fee, + "gasLimit": return_msg.gasLimit, + "from": format!("{}", return_msg.from), + "srcChainId": return_msg.srcChainId, + "srcOwner": format!("{}", return_msg.srcOwner), + "destChainId": return_msg.destChainId, + "destOwner": format!("{}", return_msg.destOwner), + "to": format!("{}", return_msg.to), + "value": format!("{}", return_msg.value), + "data": format!("0x{}", hex::encode(&return_msg.data)), + }, + "signalSlot": format!("{}", return_slot), + })) + }, + )?; + + info!("Bridge handler RPC server starting on {}", addr); + let handle = server.start(module); + + tokio::spawn(async move { + cancellation_token.cancelled().await; + info!("Cancellation token triggered, stopping bridge handler RPC server"); + handle.stop().ok(); + }); + + Ok(Self { + ethereum_l1, + taiko, + rx, + status_store, + l1_chain_id, + }) + } + + pub fn status_store(&self) -> UserOpStatusStore { + self.status_store.clone() + } + + /// Dequeue the next UserOp, simulate on L1 to extract the bridge message + /// (L1→L2 deposit). UserOps always target L1. + pub async fn next_user_op(&mut self) -> Result, anyhow::Error> { + let Ok(user_op) = self.rx.try_recv() else { + return Ok(None); + }; + + // L1 UserOp — simulate on L1 to extract bridge message + if let Some((message_from_l1, signal_slot_on_l2)) = self + .ethereum_l1 + .execution_layer + .find_message_and_signal_slot(user_op.clone()) + .await? + { + return Ok(Some(RoutedUserOp { + user_op, + l2_call: L2Call { + message_from_l1, + signal_slot_on_l2, + }, + })); + } + + warn!( + "UserOp id={} targets L1 but no bridge message found", + user_op.id + ); + self.status_store.set( + user_op.id, + &UserOpStatus::Rejected { + reason: "L1 UserOp with no bridge message".to_string(), + }, + ); + Ok(None) + } + + /// Build an L1Call for a Bridge.sendMessage emitted in the just-preconfirmed + /// L2 block. The mempool scan is the single source of truth for the return + /// signal: if it found one, its slot was injected into the L2 anchor's fast + /// signals and must be carried here as the inbox's `requiredReturnSignal`. + /// We do not re-simulate — any drift between the two simulations would make + /// the anchor slot disagree with the inbox's verified slot, which reverts + /// `_verifySignalSlots` (classic) or `finalizePropose` (deferred). + pub async fn find_l1_call( + &mut self, + block_id: u64, + state_root: B256, + required_return_signal: Option>, + ) -> Result, anyhow::Error> { + let l2_el = self.taiko.l2_execution_layer(); + + // Retry briefly: the L2 RPC may lag indexing the just-preconfirmed + // block's logs. Without this, `find_message_and_signal_slot` returns + // None on the hot path and we skip the L1 call — causing classic + // propose to revert with `SignalSlotNotSent` if the mempool scan + // already injected a slot into the anchor. + let mut attempt = 0u32; + let message_and_slot = loop { + if let Some(pair) = l2_el.find_message_and_signal_slot(block_id).await? { + break Some(pair); + } + attempt += 1; + if attempt >= 5 { + break None; + } + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + }; + + if let Some((message_from_l2, signal_slot)) = message_and_slot { + let signal_slot_proof = l2_el + .get_hop_proof(signal_slot, block_id, state_root) + .await?; + + if required_return_signal.is_some() { + info!( + "Adding L1 call with pre-simulated required return signal — will use deferred finalize" + ); + } + + return Ok(Some(L1Call { + message_from_l2, + signal_slot_proof, + required_return_signal, + })); + } + + Ok(None) + } + + pub fn has_pending_user_ops(&self) -> bool { + !self.rx.is_empty() + } +} diff --git a/realtime/src/node/proposal_manager/l2_block_payload.rs b/realtime/src/node/proposal_manager/l2_block_payload.rs new file mode 100644 index 00000000..6cb8e07a --- /dev/null +++ b/realtime/src/node/proposal_manager/l2_block_payload.rs @@ -0,0 +1,12 @@ +use alloy::primitives::B256; +use alloy::rpc::types::Transaction; + +pub struct L2BlockV2Payload { + pub coinbase: alloy::primitives::Address, + pub tx_list: Vec, + pub timestamp_sec: u64, + pub gas_limit_without_anchor: u64, + pub anchor_block_id: u64, + pub anchor_block_hash: B256, + pub anchor_state_root: B256, +} diff --git a/realtime/src/node/proposal_manager/mod.rs b/realtime/src/node/proposal_manager/mod.rs new file mode 100644 index 00000000..84b3e029 --- /dev/null +++ b/realtime/src/node/proposal_manager/mod.rs @@ -0,0 +1,646 @@ +mod async_submitter; +mod batch_builder; +pub mod bridge_handler; +pub mod l2_block_payload; +pub mod proposal; + +use crate::l1::bindings::ICheckpointStore::Checkpoint; +use crate::l1::execution_layer::L1BridgeHandlerOps; +use crate::l2::execution_layer::L2BridgeHandlerOps; +use crate::node::proposal_manager::bridge_handler::UserOp; +use crate::raiko::RaikoClient; +use crate::{l1::execution_layer::ExecutionLayer, l2::taiko::Taiko}; +use alloy::consensus::Transaction as _; +use alloy::primitives::aliases::U48; +use alloy::primitives::{B256, FixedBytes}; +use anyhow::Error; +use async_submitter::AsyncSubmitter; +use batch_builder::BatchBuilder; +use bridge_handler::BridgeHandler; +use common::metrics::Metrics; +use common::{batch_builder::BatchBuilderConfig, shared::l2_slot_info_v2::L2SlotContext}; +use common::{ + l1::{ethereum_l1::EthereumL1, traits::ELTrait}, + l2::taiko_driver::{OperationType, models::BuildPreconfBlockResponse}, + shared::{ + anchor_block_info::AnchorBlockInfo, l2_block_v2::L2BlockV2Draft, + l2_tx_lists::PreBuiltTxList, + }, + utils::cancellation_token::CancellationToken, +}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::{net::SocketAddr, sync::Arc}; +use tokio::sync::Mutex; +use tracing::{debug, error, info, warn}; + +use crate::node::L2SlotInfoV2; + +const MIN_ANCHOR_OFFSET: u64 = 2; + +pub struct BatchManager { + batch_builder: BatchBuilder, + async_submitter: AsyncSubmitter, + bridge_handler: Arc>, + ethereum_l1: Arc>, + pub taiko: Arc, + l1_height_lag: u64, + #[allow(dead_code)] + metrics: Arc, + #[allow(dead_code)] + cancel_token: CancellationToken, + last_finalized_block_hash: B256, + last_finalized_block_number: Arc, + /// L1→L2 return signal slot discovered during Pass 2 (L2Direct pre-sim). + /// Pushed into the L2 block's anchor fast signals before real execution + /// so that `bridge.processMessage(returnMsg, "")` in the UserOp succeeds. + /// Cleared after each block build. + pending_return_signal: Option>, + /// L2 mempool tx hash paired with `pending_return_signal` — the tx that + /// triggered the L2→L1→L2 path. Recorded so the UI can poll `surge_txStatus` + /// by hash and see the full proposal lifecycle (sequencing → proving → + /// proposing → complete). Cleared after each block build. + pending_mempool_tx_hash: Option, +} + +impl BatchManager { + #[allow(clippy::too_many_arguments)] + pub async fn new( + l1_height_lag: u64, + config: BatchBuilderConfig, + ethereum_l1: Arc>, + taiko: Arc, + metrics: Arc, + cancel_token: CancellationToken, + last_finalized_block_hash: B256, + raiko_client: RaikoClient, + basefee_sharing_pctg: u8, + proof_request_bypass: bool, + bridge_rpc_addr: String, + l1_chain_id: u64, + ) -> Result { + info!( + "Batch builder config:\n\ + max_bytes_size_of_batch: {}\n\ + max_blocks_per_batch: {}\n\ + l1_slot_duration_sec: {}\n\ + max_time_shift_between_blocks_sec: {}\n\ + max_anchor_height_offset: {}", + config.max_bytes_size_of_batch, + config.max_blocks_per_batch, + config.l1_slot_duration_sec, + config.max_time_shift_between_blocks_sec, + config.max_anchor_height_offset, + ); + + let bridge_addr: SocketAddr = bridge_rpc_addr.parse().map_err(|e| { + anyhow::anyhow!( + "Failed to parse BRIDGE_RPC_ADDR '{}': {}", + bridge_rpc_addr, + e + ) + })?; + + let last_finalized_block_number = Arc::new(AtomicU64::new(0)); + + let bridge_handler = Arc::new(Mutex::new( + BridgeHandler::new( + bridge_addr, + ethereum_l1.clone(), + taiko.clone(), + cancel_token.clone(), + l1_chain_id, + last_finalized_block_number.clone(), + ) + .await?, + )); + + let async_submitter = AsyncSubmitter::new( + raiko_client, + basefee_sharing_pctg, + ethereum_l1.clone(), + proof_request_bypass, + ); + + Ok(Self { + batch_builder: BatchBuilder::new( + config, + ethereum_l1.slot_clock.clone(), + metrics.clone(), + ), + async_submitter, + bridge_handler, + ethereum_l1, + taiko, + l1_height_lag, + metrics, + cancel_token, + last_finalized_block_hash, + last_finalized_block_number, + pending_return_signal: None, + pending_mempool_tx_hash: None, + }) + } + + /// Non-blocking poll: check if the in-flight submission has completed. + /// On success, updates `last_finalized_block_hash`. Returns None if idle or still in progress. + pub fn poll_submission_result(&mut self) -> Option> { + match self.async_submitter.try_recv_result() { + Some(Ok(result)) => { + info!( + "Submission completed. New last finalized block: number={}, hash={}", + result.new_last_finalized_block_number, result.new_last_finalized_block_hash, + ); + self.last_finalized_block_hash = result.new_last_finalized_block_hash; + self.last_finalized_block_number + .store(result.new_last_finalized_block_number, Ordering::Relaxed); + Some(Ok(())) + } + Some(Err(e)) => Some(Err(e)), + None => None, + } + } + + /// Kick off an async submission if there's a finalized batch ready and the submitter is idle. + pub async fn try_start_submission( + &mut self, + submit_only_full_batches: bool, + ) -> Result<(), Error> { + if self.async_submitter.is_busy() { + return Ok(()); + } + + self.batch_builder + .finalize_if_needed(submit_only_full_batches); + + let Some(batch) = self + .batch_builder + .pop_oldest_batch(self.last_finalized_block_hash) + else { + return Ok(()); + }; + + // Check no L1 tx already in progress + if self + .ethereum_l1 + .execution_layer + .is_transaction_in_progress() + .await? + { + debug!("Cannot submit batch, L1 transaction already in progress. Re-queuing."); + self.batch_builder.push_front_batch(batch); + return Ok(()); + } + + let status_store = self.bridge_handler.lock().await.status_store(); + + info!( + "Starting async submission: {} blocks, last_finalized_block_hash: {}", + batch.l2_blocks.len(), + batch.last_finalized_block_hash, + ); + + self.async_submitter.submit(batch, Some(status_store)); + Ok(()) + } + + pub fn is_submission_in_progress(&self) -> bool { + self.async_submitter.is_busy() + } + + /// Drop all finalized batches without submitting. Used in PRECONF_ONLY mode. + pub fn drain_finalized_batches(&mut self) { + self.batch_builder.finalize_if_needed(false); + while let Some(batch) = self + .batch_builder + .pop_oldest_batch(self.last_finalized_block_hash) + { + info!( + "PRECONF_ONLY: dropping batch with {} blocks", + batch.l2_blocks.len(), + ); + } + } + + pub fn should_new_block_be_created( + &self, + pending_tx_list: &Option, + l2_slot_context: &L2SlotContext, + ) -> bool { + self.batch_builder.should_new_block_be_created( + pending_tx_list, + l2_slot_context.info.slot_timestamp(), + l2_slot_context.end_of_sequencing, + ) + } + + pub async fn preconfirm_block( + &mut self, + pending_tx_list: Option, + l2_slot_context: &L2SlotContext, + ) -> Result { + let result = self + .add_new_l2_block( + pending_tx_list.unwrap_or_else(PreBuiltTxList::empty), + l2_slot_context, + OperationType::Preconfirm, + ) + .await?; + if self + .batch_builder + .is_greater_than_max_anchor_height_offset()? + { + info!("Maximum allowed anchor height offset exceeded, finalizing current batch."); + self.batch_builder.finalize_current_batch(); + } + + Ok(result) + } + + async fn add_new_l2_block( + &mut self, + prebuilt_tx_list: PreBuiltTxList, + l2_slot_context: &L2SlotContext, + operation_type: OperationType, + ) -> Result { + let timestamp = l2_slot_context.info.slot_timestamp(); + if let Some(last_block_timestamp) = self + .batch_builder + .get_current_proposal_last_block_timestamp() + && timestamp == last_block_timestamp + { + return Err(anyhow::anyhow!( + "Cannot add another block with the same timestamp as the last block, timestamp: {timestamp}, last block timestamp: {last_block_timestamp}" + )); + } + + info!( + "Adding new L2 block id: {}, timestamp: {}", + l2_slot_context.info.parent_id() + 1, + timestamp, + ); + + let l2_draft_block = L2BlockV2Draft { + prebuilt_tx_list: prebuilt_tx_list.clone(), + timestamp_sec: timestamp, + gas_limit_without_anchor: l2_slot_context.info.parent_gas_limit_without_anchor(), + }; + + if !self.batch_builder.can_consume_l2_block(&l2_draft_block) { + let _ = self.create_new_batch().await?; + } + + let preconfed_block = self + .add_draft_block_to_proposal(l2_draft_block, l2_slot_context, operation_type) + .await?; + + Ok(preconfed_block) + } + + pub async fn has_pending_user_ops(&self) -> bool { + self.bridge_handler.lock().await.has_pending_user_ops() + } + + /// Process pending L1 UserOps: simulate on L1 to extract bridge message, + /// then insert processMessage tx into the L2 block. + async fn add_pending_user_ops_to_draft_block( + &mut self, + l2_draft_block: &mut L2BlockV2Draft, + ) -> Result)>, anyhow::Error> { + let routed = { + let mut handler = self.bridge_handler.lock().await; + handler.next_user_op().await? + }; + + let Some(routed) = routed else { + return Ok(None); + }; + + info!("Processing L1→L2 deposit: UserOp id={}", routed.user_op.id); + + let l2_call_bridge_tx = self + .taiko + .l2_execution_layer() + .construct_l2_call_tx(routed.l2_call.message_from_l1) + .await?; + + info!("Inserting processMessage tx into L2 block"); + l2_draft_block + .prebuilt_tx_list + .tx_list + .push(l2_call_bridge_tx); + + Ok(Some((routed.user_op, routed.l2_call.signal_slot_on_l2))) + } + + /// Scan mempool transactions for any that emit `Bridge.sendMessage` (L2→L1 + /// outbound). For each such tx, simulate the L1 callback to discover an + /// L1→L2 return signal. If found, inject the return signal into the anchor's + /// fast signals so the tx's `bridge.processMessage(returnMsg)` call succeeds + /// on L2, and record the slot for the deferred-finalize multicall. + async fn scan_mempool_for_outbound_signals( + &mut self, + pending_tx_list: &mut common::shared::l2_tx_lists::PreBuiltTxList, + ) { + use alloy::primitives::Bytes; + + let l2_el = self.taiko.l2_execution_layer(); + let l1_el = &self.ethereum_l1.execution_layer; + + for tx in &pending_tx_list.tx_list { + let from = tx.inner.signer(); + let Some(to) = tx.inner.to() else { + continue; // skip contract creation txs + }; + let input = tx.inner.input(); + + // Trace the tx to check for outbound bridge.sendMessage. + // Forward the tx value so payable entry points (swapETHForTokenViaL1) + // don't revert with ZERO_AMOUNT during the trace. + let tx_value = tx.inner.value(); + let outbound = match l2_el + .trace_tx_for_outbound_message(from, to, input, Some(tx_value)) + .await + { + Ok(Some(msg)) => msg, + Ok(None) => continue, + Err(e) => { + debug!("Mempool tx trace failed: {e}"); + continue; + } + }; + + info!( + "Mempool tx from={} emits L2→L1 outbound to destChainId={}", + from, outbound.destChainId + ); + + // Simulate the L1 callback to find the return signal + let bridge_addr = l1_el.contract_addresses().bridge; + let l2_bridge_addr = *l2_el.bridge.address(); + match l1_el + .simulate_l1_callback_return_signal( + outbound, + Bytes::new(), + bridge_addr, + l2_bridge_addr, + ) + .await + { + Ok(Some((_return_msg, return_slot))) => { + let tx_hash = *tx.inner.tx_hash(); + info!( + "L1 callback simulation found return signal slot={} for L2 tx {} — injecting into anchor", + return_slot, tx_hash, + ); + self.pending_return_signal = Some(return_slot); + self.pending_mempool_tx_hash = Some(tx_hash); + // Only handle one L2→L1→L2 tx per block for now + break; + } + Ok(None) => { + debug!("L1 callback produced no return signal"); + } + Err(e) => { + warn!("L1 callback simulation failed: {e}"); + } + } + } + } + + async fn add_draft_block_to_proposal( + &mut self, + mut l2_draft_block: L2BlockV2Draft, + l2_slot_context: &L2SlotContext, + operation_type: OperationType, + ) -> Result { + let mut anchor_signal_slots: Vec> = vec![]; + + // Process L1→L2 UserOps (via surge_sendUserOp RPC) + debug!("Checking for pending UserOps (L1→L2 deposits)"); + if let Some((user_op_data, signal_slot)) = self + .add_pending_user_ops_to_draft_block(&mut l2_draft_block) + .await? + { + self.batch_builder.add_user_op(user_op_data)?; + self.batch_builder.add_signal_slot(signal_slot)?; + anchor_signal_slots.push(signal_slot); + } else { + debug!("No L1→L2 UserOps pending"); + } + + // Scan mempool txs for L2→L1→L2 outbound signals (e.g. flash loans). + // If found, the L1 callback is simulated and the return signal is + // injected into the anchor so the tx succeeds on L2. + self.scan_mempool_for_outbound_signals(&mut l2_draft_block.prebuilt_tx_list) + .await; + + // Copy rather than take — the pre-simulated slot is passed as a hint + // to `find_l1_call` after preconf so the L1Call's requiredReturnSignal + // matches the slot we inject into the anchor. Cleared below. + let pending_return_slot_hint = self.pending_return_signal; + if let Some(return_slot) = self.pending_return_signal.take() { + info!( + "Injecting L2→L1→L2 return signal into anchor fast signals: slot={}", + return_slot + ); + self.batch_builder.add_signal_slot(return_slot)?; + anchor_signal_slots.push(return_slot); + } + + if let Some(tx_hash) = self.pending_mempool_tx_hash.take() { + self.batch_builder.add_l2_mempool_tx_hash(tx_hash)?; + let status_store = self.bridge_handler.lock().await.status_store(); + status_store.set_by_hash( + tx_hash, + &crate::node::proposal_manager::bridge_handler::UserOpStatus::Pending, + ); + info!( + "Tracking L2→L1→L2 mempool tx {} under status store (Pending)", + tx_hash + ); + } + + let payload = self.batch_builder.add_l2_draft_block(l2_draft_block)?; + + match self + .taiko + .advance_head_to_new_l2_block( + payload, + l2_slot_context, + anchor_signal_slots, + operation_type, + ) + .await + { + Ok(preconfed_block) => { + self.batch_builder.set_proposal_checkpoint(Checkpoint { + blockNumber: U48::from(preconfed_block.number), + stateRoot: preconfed_block.state_root, + blockHash: preconfed_block.hash, + })?; + + debug!("Checking for initiated L1 calls"); + if let Some(l1_call) = self + .bridge_handler + .lock() + .await + .find_l1_call( + preconfed_block.number, + preconfed_block.state_root, + pending_return_slot_hint, + ) + .await? + { + self.batch_builder.add_l1_call(l1_call)?; + } else { + debug!("No L1 calls initiated"); + } + + Ok(preconfed_block) + } + Err(err) => { + error!("Failed to advance head to new L2 block: {}", err); + self.remove_last_l2_block(); + Err(anyhow::anyhow!( + "Failed to advance head to new L2 block: {}", + err + )) + } + } + } + + async fn create_new_batch(&mut self) -> Result { + let last_anchor_id = self + .taiko + .l2_execution_layer() + .get_last_synced_anchor_block_id_from_geth() + .await + .unwrap_or_else(|e| { + warn!("Failed to get last synced anchor block ID from Taiko Geth: {e}"); + 0 + }); + let anchor_block_info = AnchorBlockInfo::from_chain_state( + self.ethereum_l1.execution_layer.common(), + self.l1_height_lag, + last_anchor_id, + MIN_ANCHOR_OFFSET, + ) + .await?; + + let anchor_block_id = anchor_block_info.id(); + // Use B256::ZERO as placeholder -- real last_finalized_block_hash is stamped at submission time + self.batch_builder + .create_new_batch(anchor_block_info, B256::ZERO); + + Ok(anchor_block_id) + } + + fn remove_last_l2_block(&mut self) { + self.batch_builder.remove_last_l2_block(); + } + + pub async fn reset_builder(&mut self) -> Result<(), Error> { + warn!("Resetting batch builder"); + + self.async_submitter.abort(); + + self.batch_builder = batch_builder::BatchBuilder::new( + self.batch_builder.get_config().clone(), + self.ethereum_l1.slot_clock.clone(), + self.metrics.clone(), + ); + + Ok(()) + } + + pub fn has_batches(&self) -> bool { + !self.batch_builder.is_empty() + } + + pub fn get_number_of_batches(&self) -> u64 { + self.batch_builder.get_number_of_batches() + } + + /// Reorg all unproposed L2 blocks back to the last proposed block. + /// Called on startup to clean up any preconfirmed-but-unproposed blocks. + pub async fn reorg_unproposed_blocks(&mut self) -> Result<(), Error> { + let last_finalized_hash = self + .ethereum_l1 + .execution_layer + .get_last_finalized_block_hash() + .await?; + + if last_finalized_hash == B256::ZERO { + info!("No finalized block hash on L1 (genesis). Nothing to reorg."); + return Ok(()); + } + + let last_proposed_block_number = match self + .taiko + .find_l2_block_number_by_hash(last_finalized_hash) + .await + { + Ok(n) => n, + Err(_) => { + info!( + "lastFinalizedBlockHash {} not found on L2 — treating as no blocks proposed yet", + last_finalized_hash + ); + 0 + } + }; + + let l2_head = self.taiko.get_latest_l2_block_id().await?; + + // Always update the shared finalized block number for RPC status queries + self.last_finalized_block_number + .store(last_proposed_block_number, Ordering::Relaxed); + + if l2_head <= last_proposed_block_number { + info!( + "No unproposed blocks: L2 head {} <= last proposed {}", + l2_head, last_proposed_block_number + ); + return Ok(()); + } + + let gap = l2_head - last_proposed_block_number; + warn!( + "Detected {} unproposed L2 blocks ({} to {}). Reorging to last proposed block {}.", + gap, + last_proposed_block_number + 1, + l2_head, + last_proposed_block_number + ); + + let reorg_result = self + .taiko + .reorg_stale_block(last_proposed_block_number) + .await?; + info!( + "Reorg complete: new head hash={}, blocks removed={}", + reorg_result.new_head_block_hash, reorg_result.blocks_removed + ); + + self.last_finalized_block_hash = last_finalized_hash; + Ok(()) + } + + #[allow(dead_code)] + pub async fn reanchor_block( + &mut self, + pending_tx_list: PreBuiltTxList, + l2_slot_info: L2SlotInfoV2, + ) -> Result { + let l2_slot_context = L2SlotContext { + info: l2_slot_info, + end_of_sequencing: false, + }; + + let block = self + .add_new_l2_block(pending_tx_list, &l2_slot_context, OperationType::Reanchor) + .await?; + + Ok(block) + } +} diff --git a/realtime/src/node/proposal_manager/proposal.rs b/realtime/src/node/proposal_manager/proposal.rs new file mode 100644 index 00000000..438e43b4 --- /dev/null +++ b/realtime/src/node/proposal_manager/proposal.rs @@ -0,0 +1,117 @@ +use crate::l1::bindings::ICheckpointStore::Checkpoint; +use crate::node::proposal_manager::{ + bridge_handler::{L1Call, UserOp}, + l2_block_payload::L2BlockV2Payload, +}; +use alloy::primitives::{Address, B256, FixedBytes}; +use common::shared::l2_block_v2::{L2BlockV2, L2BlockV2Draft}; +use std::collections::VecDeque; +use std::time::Instant; +use taiko_protocol::shasta::manifest::{BlockManifest, DerivationSourceManifest}; +use tracing::{debug, warn}; + +#[allow(dead_code)] +pub type Proposals = VecDeque; + +#[derive(Default, Clone)] +pub struct Proposal { + pub l2_blocks: Vec, + pub total_bytes: u64, + pub coinbase: Address, + + // RealTime: maxAnchor instead of anchor + pub max_anchor_block_number: u64, + pub max_anchor_block_hash: B256, + pub max_anchor_state_root: B256, + + // Proof fields + pub checkpoint: Checkpoint, + pub last_finalized_block_hash: B256, + + // Surge POC fields (carried over) + pub user_ops: Vec, + pub l2_user_op_ids: Vec, + /// L2 tx hashes for mempool-picked outbound txs (L2→L1→L2 path). Status + /// transitions for these are written to `UserOpStatusStore::set_by_hash` + /// so the UI can poll `surge_txStatus` by tx hash and see the same + /// sequencing → proving → proposing → complete lifecycle as UserOps. + pub l2_mempool_tx_hashes: Vec, + pub signal_slots: Vec>, + pub l1_calls: Vec, + + // ZK proof (populated after Raiko call) + pub zk_proof: Option>, +} + +impl Proposal { + pub fn compress(&mut self) { + let start = Instant::now(); + + let mut block_manifests = >::with_capacity(self.l2_blocks.len()); + for l2_block in &self.l2_blocks { + block_manifests.push(BlockManifest { + timestamp: l2_block.timestamp_sec, + coinbase: l2_block.coinbase, + anchor_block_number: l2_block.anchor_block_number, + gas_limit: l2_block.gas_limit_without_anchor, + transactions: l2_block + .prebuilt_tx_list + .tx_list + .iter() + .map(|tx| tx.clone().into()) + .collect(), + }); + } + + let manifest = DerivationSourceManifest { + blocks: block_manifests, + }; + + let manifest_data = match manifest.encode_and_compress() { + Ok(data) => data, + Err(err) => { + warn!("Failed to compress proposal manifest: {err}"); + return; + } + }; + + debug!( + "Proposal compression completed in {} ms. Total bytes before: {}. Total bytes after: {}.", + start.elapsed().as_millis(), + self.total_bytes, + manifest_data.len() + ); + + self.total_bytes = manifest_data.len() as u64; + } + + fn create_block_from_draft(&mut self, l2_draft_block: L2BlockV2Draft) -> L2BlockV2 { + L2BlockV2::new_from( + l2_draft_block.prebuilt_tx_list, + l2_draft_block.timestamp_sec, + self.coinbase, + self.max_anchor_block_number, + l2_draft_block.gas_limit_without_anchor, + ) + } + + pub fn add_l2_block(&mut self, l2_block: L2BlockV2) -> L2BlockV2Payload { + let l2_payload = L2BlockV2Payload { + coinbase: self.coinbase, + tx_list: l2_block.prebuilt_tx_list.tx_list.clone(), + timestamp_sec: l2_block.timestamp_sec, + gas_limit_without_anchor: l2_block.gas_limit_without_anchor, + anchor_block_id: self.max_anchor_block_number, + anchor_block_hash: self.max_anchor_block_hash, + anchor_state_root: self.max_anchor_state_root, + }; + self.total_bytes += l2_block.prebuilt_tx_list.bytes_length; + self.l2_blocks.push(l2_block); + l2_payload + } + + pub fn add_l2_draft_block(&mut self, l2_draft_block: L2BlockV2Draft) -> L2BlockV2Payload { + let l2_block = self.create_block_from_draft(l2_draft_block); + self.add_l2_block(l2_block) + } +} diff --git a/realtime/src/raiko/mod.rs b/realtime/src/raiko/mod.rs new file mode 100644 index 00000000..1b4f7ede --- /dev/null +++ b/realtime/src/raiko/mod.rs @@ -0,0 +1,184 @@ +use crate::l1::bindings::ProofType; +use crate::utils::config::RealtimeConfig; +use anyhow::Error; +use reqwest::Client; +use serde::{Deserialize, Serialize}; +use std::time::Duration; +use tracing::{debug, info, warn}; + +#[derive(Clone)] +pub struct RaikoClient { + client: Client, + pub base_url: String, + pub api_key: Option, + pub proof_type: ProofType, + #[allow(dead_code)] + l2_network: String, + #[allow(dead_code)] + l1_network: String, + poll_interval: Duration, + max_retries: u32, +} + +#[derive(Serialize)] +pub struct RaikoProofRequest { + pub l2_block_numbers: Vec, + pub proof_type: String, + pub max_anchor_block_number: u64, + pub last_finalized_block_hash: String, + pub basefee_sharing_pctg: u8, + #[serde(skip_serializing_if = "Option::is_none")] + pub network: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub l1_network: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub prover: Option, + pub signal_slots: Vec, + pub sources: Vec, + pub blobs: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub checkpoint: Option, + pub blob_proof_type: String, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct RaikoDerivationSource { + pub is_forced_inclusion: bool, + pub blob_slice: RaikoBlobSlice, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct RaikoBlobSlice { + pub blob_hashes: Vec, + pub offset: u32, + pub timestamp: u64, +} + +#[derive(Serialize, Deserialize)] +pub struct RaikoCheckpoint { + pub block_number: u64, + pub block_hash: String, + pub state_root: String, +} + +#[derive(Deserialize)] +pub struct RaikoResponse { + pub status: String, + #[serde(default)] + pub data: Option, + #[serde(default)] + pub proof_type: Option, + #[serde(default)] + pub batch_id: Option, + #[serde(default)] + pub error: Option, + #[serde(default)] + pub message: Option, +} + +#[derive(Deserialize)] +#[serde(untagged)] +pub enum RaikoData { + Proof { proof: RaikoProof }, + Status { status: String }, +} + +#[derive(Deserialize)] +pub struct RaikoProof { + pub proof: Option, + #[serde(default)] + pub input: Option, + #[serde(default)] + pub quote: Option, + #[serde(default)] + pub uuid: Option, + #[serde(default)] + pub kzg_proof: Option, +} + +impl RaikoClient { + pub fn new(config: &RealtimeConfig) -> Self { + Self { + client: Client::new(), + base_url: config.raiko_url.clone(), + api_key: config.raiko_api_key.clone(), + proof_type: config.proof_type, + l2_network: config.raiko_network.clone(), + l1_network: config.raiko_l1_network.clone(), + poll_interval: Duration::from_millis(config.raiko_poll_interval_ms), + max_retries: config.raiko_max_retries, + } + } + + /// Request a proof and poll until ready. + /// Returns the raw proof bytes. + pub async fn get_proof(&self, request: &RaikoProofRequest) -> Result, Error> { + let url = format!("{}/v3/proof/batch/realtime", self.base_url); + + for attempt in 0..self.max_retries { + let mut req = self.client.post(&url).json(request); + + if let Some(ref key) = self.api_key { + req = req.header("X-API-KEY", key); + } + + let resp = req.send().await?; + let http_status = resp.status(); + let raw_body = resp.text().await?; + debug!( + "Raiko response (attempt {}): HTTP {} | body: {}", + attempt + 1, + http_status, + raw_body + ); + let body: RaikoResponse = serde_json::from_str(&raw_body).map_err(|e| { + anyhow::anyhow!( + "Failed to parse Raiko response (HTTP {}): {} | body: {}", + http_status, + e, + raw_body + ) + })?; + + if body.status == "error" { + return Err(anyhow::anyhow!( + "Raiko proof failed: {}", + body.message.unwrap_or_default() + )); + } + + match body.data { + Some(RaikoData::Proof { proof: proof_obj }) => { + let proof_hex = proof_obj.proof.ok_or_else(|| { + anyhow::anyhow!("Raiko returned proof object with null proof field") + })?; + info!("ZK proof received (attempt {})", attempt + 1); + let proof_bytes = hex::decode(proof_hex.trim_start_matches("0x"))?; + return Ok(proof_bytes); + } + Some(RaikoData::Status { ref status }) if status == "ZKAnyNotDrawn" => { + warn!("Raiko: ZK prover not drawn for this request"); + return Err(anyhow::anyhow!("ZK prover not drawn")); + } + Some(RaikoData::Status { ref status }) => { + debug!( + "Raiko status: {}, polling... (attempt {})", + status, + attempt + 1 + ); + tokio::time::sleep(self.poll_interval).await; + } + None => { + return Err(anyhow::anyhow!("Raiko: unexpected empty response")); + } + } + } + + Err(anyhow::anyhow!( + "Raiko: proof not ready after {} attempts", + self.max_retries + )) + } +} diff --git a/realtime/src/shared_abi/Bridge.json b/realtime/src/shared_abi/Bridge.json new file mode 100644 index 00000000..8f768573 --- /dev/null +++ b/realtime/src/shared_abi/Bridge.json @@ -0,0 +1,738 @@ +{ + "abi": [ + { + "type": "function", + "name": "context", + "inputs": [], + "outputs": [ + { + "name": "ctx_", + "type": "tuple", + "internalType": "struct IBridge.Context", + "components": [ + { + "name": "msgHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "failMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "hashMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "pure" + }, + { + "type": "function", + "name": "isMessageSent", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "nextMessageId", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint64", + "internalType": "uint64" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "processMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + }, + { + "name": "_proof", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [ + { + "name": "", + "type": "uint8", + "internalType": "enum IBridge.Status" + }, + { + "name": "", + "type": "uint8", + "internalType": "enum IBridge.StatusReason" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "recallMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + }, + { + "name": "_proof", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "retryMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + }, + { + "name": "_isLastAttempt", + "type": "bool", + "internalType": "bool" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "sendMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "outputs": [ + { + "name": "msgHash_", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "message_", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "stateMutability": "payable" + }, + { + "type": "event", + "name": "MessageSent", + "inputs": [ + { + "name": "msgHash", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + }, + { + "name": "message", + "type": "tuple", + "indexed": false, + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "MessageStatusChanged", + "inputs": [ + { + "name": "msgHash", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + }, + { + "name": "status", + "type": "uint8", + "indexed": false, + "internalType": "enum IBridge.Status" + } + ], + "anonymous": false + } + ] +} \ No newline at end of file diff --git a/realtime/src/shared_abi/SignalService.json b/realtime/src/shared_abi/SignalService.json new file mode 100644 index 00000000..05a35bb2 --- /dev/null +++ b/realtime/src/shared_abi/SignalService.json @@ -0,0 +1 @@ +{"abi":[{"type":"function","name":"getCheckpoint","inputs":[{"name":"_blockNumber","type":"uint48","internalType":"uint48"}],"outputs":[{"name":"","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]}],"stateMutability":"view"},{"type":"function","name":"isSignalSent","inputs":[{"name":"_app","type":"address","internalType":"address"},{"name":"_signal","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"isSignalSent","inputs":[{"name":"_signalSlot","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"proveSignalReceived","inputs":[{"name":"_chainId","type":"uint64","internalType":"uint64"},{"name":"_app","type":"address","internalType":"address"},{"name":"_signal","type":"bytes32","internalType":"bytes32"},{"name":"_proof","type":"bytes","internalType":"bytes"}],"outputs":[{"name":"numCacheOps_","type":"uint256","internalType":"uint256"}],"stateMutability":"nonpayable"},{"type":"function","name":"saveCheckpoint","inputs":[{"name":"_checkpoint","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"sendSignal","inputs":[{"name":"_signal","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"slot_","type":"bytes32","internalType":"bytes32"}],"stateMutability":"nonpayable"},{"type":"function","name":"setSignalsReceived","inputs":[{"name":"_signalSlots","type":"bytes32[]","internalType":"bytes32[]"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"verifySignalReceived","inputs":[{"name":"_chainId","type":"uint64","internalType":"uint64"},{"name":"_app","type":"address","internalType":"address"},{"name":"_signal","type":"bytes32","internalType":"bytes32"},{"name":"_proof","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"view"},{"type":"event","name":"CheckpointSaved","inputs":[{"name":"blockNumber","type":"uint48","indexed":true,"internalType":"uint48"},{"name":"blockHash","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"SignalSent","inputs":[{"name":"app","type":"address","indexed":false,"internalType":"address"},{"name":"signal","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"slot","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"value","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false}]} diff --git a/realtime/src/shared_abi/bindings.rs b/realtime/src/shared_abi/bindings.rs new file mode 100644 index 00000000..56bc1268 --- /dev/null +++ b/realtime/src/shared_abi/bindings.rs @@ -0,0 +1,33 @@ +#![allow(clippy::too_many_arguments)] + +use alloy::sol; + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + #[derive(Debug)] + Bridge, + "src/shared_abi/Bridge.json" +); + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + #[derive(Debug)] + SignalService, + "src/shared_abi/SignalService.json" +); + +// HopProof encoding struct for cross-chain signal verification via storage proofs. +// Not part of the SignalService ABI directly — it is the encoding format for the +// `_proof` bytes parameter in proveSignalReceived / verifySignalReceived. +sol! { + struct HopProof { + uint64 chainId; + uint64 blockId; + bytes32 rootHash; + uint8 cacheOption; + bytes[] accountProof; + bytes[] storageProof; + } +} diff --git a/realtime/src/shared_abi/mod.rs b/realtime/src/shared_abi/mod.rs new file mode 100644 index 00000000..90c70dcc --- /dev/null +++ b/realtime/src/shared_abi/mod.rs @@ -0,0 +1 @@ +pub mod bindings; diff --git a/realtime/src/utils/config.rs b/realtime/src/utils/config.rs new file mode 100644 index 00000000..90ca566d --- /dev/null +++ b/realtime/src/utils/config.rs @@ -0,0 +1,121 @@ +use crate::l1::bindings::ProofType; +use alloy::primitives::Address; +use anyhow::Error; +use common::config::{ConfigTrait, address_parse_error}; +use std::str::FromStr; + +#[derive(Debug, Clone)] +pub struct RealtimeConfig { + pub realtime_inbox: Address, + pub proposer_multicall: Address, + pub bridge: Address, + /// L1 SignalService — needed for L1 callback simulation + /// (state_override on `_receivedSignals` to pass fast-signal check). + pub signal_service: Address, + /// L2 SignalService address — used on the L2 side for signal operations. + pub l2_signal_service: Address, + pub raiko_url: String, + pub raiko_api_key: Option, + pub proof_type: ProofType, + pub raiko_network: String, + pub raiko_l1_network: String, + pub raiko_poll_interval_ms: u64, + pub raiko_max_retries: u32, + pub bridge_rpc_addr: String, + pub preconf_only: bool, + pub proof_request_bypass: bool, + /// When true, overrides the SubProof bit flag to MOCK_ECDSA (0b00000001) + /// regardless of `proof_type`. Allows using a real Raiko proof type string + /// while routing on-chain to the DummyProofVerifier. + pub mock_mode: bool, +} + +impl ConfigTrait for RealtimeConfig { + fn read_env_variables() -> Result { + let read_contract_address = |env_var: &str| -> Result { + let address_str = std::env::var(env_var) + .map_err(|e| anyhow::anyhow!("Failed to read {}: {}", env_var, e))?; + Address::from_str(&address_str) + .map_err(|e| address_parse_error(env_var, e, &address_str)) + }; + + let realtime_inbox = read_contract_address("REALTIME_INBOX_ADDRESS")?; + let proposer_multicall = read_contract_address("PROPOSER_MULTICALL_ADDRESS")?; + let bridge = read_contract_address("L1_BRIDGE_ADDRESS")?; + let signal_service = read_contract_address("L1_SIGNAL_SERVICE_ADDRESS")?; + let l2_signal_service = read_contract_address("L2_SIGNAL_SERVICE_ADDRESS")?; + + let raiko_url = + std::env::var("RAIKO_URL").unwrap_or_else(|_| "http://localhost:8080".to_string()); + let raiko_api_key = std::env::var("RAIKO_API_KEY").ok(); + let proof_type: ProofType = std::env::var("PROOF_TYPE") + .unwrap_or_else(|_| "sp1".to_string()) + .parse()?; + let raiko_network = + std::env::var("RAIKO_L2_NETWORK").unwrap_or_else(|_| "taiko_mainnet".to_string()); + let raiko_l1_network = + std::env::var("RAIKO_L1_NETWORK").unwrap_or_else(|_| "ethereum".to_string()); + + let raiko_poll_interval_ms: u64 = std::env::var("RAIKO_POLL_INTERVAL_MS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(2000); + + let raiko_max_retries: u32 = std::env::var("RAIKO_MAX_RETRIES") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(60); + + let bridge_rpc_addr = + std::env::var("BRIDGE_RPC_ADDR").unwrap_or_else(|_| "0.0.0.0:4545".to_string()); + + let preconf_only = std::env::var("PRECONF_ONLY") + .map(|v| v.to_lowercase() != "false" && v != "0") + .unwrap_or(true); + + let proof_request_bypass = std::env::var("PROOF_REQUEST_BYPASS") + .map(|v| v.to_lowercase() != "false" && v != "0") + .unwrap_or(false); + + let mock_mode = std::env::var("MOCK_MODE") + .map(|v| v.to_lowercase() != "false" && v != "0") + .unwrap_or(false); + + Ok(RealtimeConfig { + realtime_inbox, + proposer_multicall, + bridge, + signal_service, + l2_signal_service, + raiko_url, + raiko_api_key, + proof_type, + raiko_network, + raiko_l1_network, + raiko_poll_interval_ms, + raiko_max_retries, + bridge_rpc_addr, + preconf_only, + proof_request_bypass, + mock_mode, + }) + } +} + +use std::fmt; +impl fmt::Display for RealtimeConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "RealTime inbox: {:#?}", self.realtime_inbox)?; + writeln!(f, "Proposer multicall: {:#?}", self.proposer_multicall)?; + writeln!(f, "Raiko URL: {}", self.raiko_url)?; + writeln!( + f, + "Proof type: {} (bit flag: {})", + self.proof_type, + self.proof_type.proof_bit_flag() + )?; + writeln!(f, "Preconf only: {}", self.preconf_only)?; + writeln!(f, "Proof request bypass: {}", self.proof_request_bypass)?; + Ok(()) + } +} diff --git a/realtime/src/utils/mod.rs b/realtime/src/utils/mod.rs new file mode 100644 index 00000000..ef68c369 --- /dev/null +++ b/realtime/src/utils/mod.rs @@ -0,0 +1 @@ +pub mod config; From 5a0719887f114d99241c281a39f2aa83b6607016 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Tue, 28 Apr 2026 11:13:08 +0530 Subject: [PATCH 02/13] refac(realtime): activate fork via REALTIME_TIMESTAMP_SEC instead of FORK env var Drops the bespoke `FORK` env-var override + `Fork::FromStr` impl in favour of the existing per-fork timestamp pattern. Adds `config.realtime_timestamp_sec` (default 99999999999) which is then threaded through `ForkInfoConfig`, matching how Shasta and Permissionless are activated. To run in realtime mode, set REALTIME_TIMESTAMP_SEC=0 (or any past timestamp) at startup. Co-Authored-By: Claude Opus 4.7 (1M context) --- common/src/config/mod.rs | 8 ++++++++ common/src/fork_info/config.rs | 2 +- common/src/fork_info/fork.rs | 14 -------------- common/src/fork_info/mod.rs | 9 --------- 4 files changed, 9 insertions(+), 24 deletions(-) diff --git a/common/src/config/mod.rs b/common/src/config/mod.rs index 424ce554..07a5c4bf 100644 --- a/common/src/config/mod.rs +++ b/common/src/config/mod.rs @@ -72,6 +72,7 @@ pub struct Config { pub fork_switch_transition_period_sec: u64, pub shasta_timestamp_sec: u64, pub permissionless_timestamp_sec: u64, + pub realtime_timestamp_sec: u64, // Whitelist monitor pub whitelist_monitor_interval_sec: u64, // Watchdog @@ -432,6 +433,10 @@ impl Config { .unwrap_or("99999999999".to_string()) .parse::() .map_err(|e| anyhow::anyhow!("PERMISSIONLESS_TIMESTAMP_SEC must be a number: {}", e))?; + let realtime_timestamp_sec = std::env::var("REALTIME_TIMESTAMP_SEC") + .unwrap_or("99999999999".to_string()) + .parse::() + .map_err(|e| anyhow::anyhow!("REALTIME_TIMESTAMP_SEC must be a number: {}", e))?; let whitelist_monitor_interval_sec = std::env::var("WHITELIST_MONITOR_INTERVAL_SEC") .unwrap_or("60".to_string()) @@ -513,6 +518,7 @@ impl Config { fork_switch_transition_period_sec, shasta_timestamp_sec, permissionless_timestamp_sec, + realtime_timestamp_sec, whitelist_monitor_interval_sec, watchdog_max_counter, internal_server_ip, @@ -567,6 +573,7 @@ bridge transaction fee: {}wei fork switch transition time: {}s shasta timestamp: {}s permissionless timestamp: {}s +realtime timestamp: {}s whitelist monitor interval: {}s watchdog max counter: {} internal server IP: {} @@ -629,6 +636,7 @@ internal server port: {} config.fork_switch_transition_period_sec, config.shasta_timestamp_sec, config.permissionless_timestamp_sec, + config.realtime_timestamp_sec, config.whitelist_monitor_interval_sec, config.watchdog_max_counter, std::net::Ipv4Addr::from(config.internal_server_ip), diff --git a/common/src/fork_info/config.rs b/common/src/fork_info/config.rs index 75dad7b4..ff33d11b 100644 --- a/common/src/fork_info/config.rs +++ b/common/src/fork_info/config.rs @@ -28,7 +28,7 @@ impl From<&Config> for ForkInfoConfig { .map(|f| match f { Fork::Shasta => Duration::from_secs(config.shasta_timestamp_sec), Fork::Permissionless => Duration::from_secs(config.permissionless_timestamp_sec), - Fork::Realtime => Duration::from_secs(99999999999), // Only activated via FORK=realtime + Fork::Realtime => Duration::from_secs(config.realtime_timestamp_sec), }) .collect(); Self { diff --git a/common/src/fork_info/fork.rs b/common/src/fork_info/fork.rs index 4e07bd43..38e5c30b 100644 --- a/common/src/fork_info/fork.rs +++ b/common/src/fork_info/fork.rs @@ -1,5 +1,4 @@ use std::fmt::{Display, Formatter, Result as FmtResult}; -use std::str::FromStr; use strum::{EnumIter, IntoEnumIterator}; #[derive(Clone, Debug, PartialEq, Eq, EnumIter)] @@ -9,19 +8,6 @@ pub enum Fork { Realtime, } -impl FromStr for Fork { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "shasta" => Ok(Fork::Shasta), - "permissionless" => Ok(Fork::Permissionless), - "realtime" => Ok(Fork::Realtime), - _ => Err(anyhow::anyhow!("Unknown fork: {}", s)), - } - } -} - impl Fork { pub fn next(&self) -> Option { Fork::iter().skip_while(|f| f != self).nth(1) diff --git a/common/src/fork_info/mod.rs b/common/src/fork_info/mod.rs index a80729a7..d6603846 100644 --- a/common/src/fork_info/mod.rs +++ b/common/src/fork_info/mod.rs @@ -3,10 +3,8 @@ pub mod fork; use anyhow::Error; use config::ForkInfoConfig; pub use fork::Fork; -use std::str::FromStr; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use strum::IntoEnumIterator; -use tracing::info; #[derive(Debug, Clone)] pub struct ForkInfo { @@ -25,13 +23,6 @@ impl Default for ForkInfo { impl ForkInfo { pub fn from_config(config: ForkInfoConfig) -> Result { - // FORK env var overrides timestamp-based fork detection - if let Ok(fork_override) = std::env::var("FORK") { - let fork = Fork::from_str(&fork_override)?; - info!("FORK env var set, overriding fork detection to: {}", fork); - return Ok(Self { fork, config }); - } - let current_timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?; let fork = Self::choose_current_fork(&config, current_timestamp.as_secs())?; Ok(Self { fork, config }) From ad6ed5c621c045117cecf7d56fbdc4455146ce7d Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Tue, 28 Apr 2026 11:16:06 +0530 Subject: [PATCH 03/13] chore: drop stale RUSTSEC-2026-0002 advisory ignore Cargo.lock resolution after the realtime crate's deps no longer pulls in the affected `lru` version, so cargo-deny's `advisory-not-detected` warning fails the audit. Removing the now unused ignore entry. Co-Authored-By: Claude Opus 4.7 (1M context) --- deny.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/deny.toml b/deny.toml index 881ead72..fea636f9 100644 --- a/deny.toml +++ b/deny.toml @@ -19,7 +19,6 @@ ignore = [ "RUSTSEC-2024-0436", # paste - no longer maintained "RUSTSEC-2023-0071", # https://github.com/NethermindEth/Catalyst/issues/735 "RUSTSEC-2025-0141", # unmaintained advisory detected, used by alethia-reth - "RUSTSEC-2026-0002", # lru IterMut soundness; transitive (discv5/kona), upgrade when deps allow ] [licenses] From 7359158a4e82a847f1c80b6b804c88e661df4405 Mon Sep 17 00:00:00 2001 From: Maciej Skrzypkowski Date: Tue, 28 Apr 2026 16:16:15 +0200 Subject: [PATCH 04/13] to_string istead of raiko_proof_type --- realtime/src/l1/bindings.rs | 16 ++++++---------- .../src/node/proposal_manager/async_submitter.rs | 2 +- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/realtime/src/l1/bindings.rs b/realtime/src/l1/bindings.rs index 85d0be4c..a792c60b 100644 --- a/realtime/src/l1/bindings.rs +++ b/realtime/src/l1/bindings.rs @@ -73,15 +73,6 @@ impl ProofType { ProofType::Zisk => 1 << 3, } } - - /// Returns the proof type string expected by Raiko. - pub fn raiko_proof_type(&self) -> &'static str { - match self { - ProofType::Risc0 => "risc0", - ProofType::Sp1 => "sp1", - ProofType::Zisk => "zisk", - } - } } /// SurgeVerifier MOCK_ECDSA bit flag — used when `MOCK_MODE=true`. @@ -105,6 +96,11 @@ impl std::str::FromStr for ProofType { impl std::fmt::Display for ProofType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(self.raiko_proof_type()) + let s = match self { + ProofType::Risc0 => "risc0", + ProofType::Sp1 => "sp1", + ProofType::Zisk => "zisk", + }; + f.write_str(s) } } diff --git a/realtime/src/node/proposal_manager/async_submitter.rs b/realtime/src/node/proposal_manager/async_submitter.rs index 9f4c88c8..61c9683c 100644 --- a/realtime/src/node/proposal_manager/async_submitter.rs +++ b/realtime/src/node/proposal_manager/async_submitter.rs @@ -202,7 +202,7 @@ async fn submission_task( let request = RaikoProofRequest { l2_block_numbers, - proof_type: raiko_client.proof_type.raiko_proof_type().to_string(), + proof_type: raiko_client.proof_type.to_string(), max_anchor_block_number: proposal.max_anchor_block_number, last_finalized_block_hash: format!( "0x{}", From 3102d45cfe614859c8897639c8e3cf52deeda88b Mon Sep 17 00:00:00 2001 From: Maciej Skrzypkowski Date: Tue, 28 Apr 2026 16:38:11 +0200 Subject: [PATCH 05/13] Safer iterative version of logs collecting from the CallFrame --- realtime/src/l1/execution_layer.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs index 9f7628a7..4cb85e3a 100644 --- a/realtime/src/l1/execution_layer.rs +++ b/realtime/src/l1/execution_layer.rs @@ -269,11 +269,13 @@ impl ExecutionLayer { use alloy::rpc::types::trace::geth::{CallFrame, CallLogFrame}; -fn collect_logs_recursive(frame: &CallFrame) -> Vec { - let mut logs = frame.logs.clone(); +fn collect_all_logs(frame: &CallFrame) -> Vec { + let mut logs = Vec::new(); + let mut stack = vec![frame]; - for subcall in &frame.calls { - logs.extend(collect_logs_recursive(subcall)); + while let Some(f) = stack.pop() { + logs.extend(f.logs.iter().cloned()); + stack.extend(f.calls.iter()); } logs @@ -346,7 +348,7 @@ impl L1BridgeHandlerOps for ExecutionLayer { let mut slot: Option> = None; if let alloy::rpc::types::trace::geth::GethTrace::CallTracer(call_frame) = trace_result { - let all_logs = collect_logs_recursive(&call_frame); + let all_logs = collect_all_logs(&call_frame); tracing::debug!("Collected {} logs from call trace", all_logs.len()); for log in all_logs { From 945988d1a428028b23f3fe29cd91b6607c06d513 Mon Sep 17 00:00:00 2001 From: Maciej Skrzypkowski Date: Tue, 28 Apr 2026 16:51:51 +0200 Subject: [PATCH 06/13] message zip with slot for find_message_and_signal_slot --- realtime/src/l1/execution_layer.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs index 4cb85e3a..67eb60f7 100644 --- a/realtime/src/l1/execution_layer.rs +++ b/realtime/src/l1/execution_layer.rs @@ -380,11 +380,7 @@ impl L1BridgeHandlerOps for ExecutionLayer { tracing::debug!("{:?} {:?}", message, slot); - if let (Some(message), Some(slot)) = (message, slot) { - return Ok(Some((message, slot))); - } - - Ok(None) + Ok(message.zip(slot)) } async fn simulate_l1_callback_return_signal( From ac0b91d987618db8dbed3adb7a856a0954fc7ad1 Mon Sep 17 00:00:00 2001 From: Maciej Skrzypkowski Date: Tue, 28 Apr 2026 17:04:08 +0200 Subject: [PATCH 07/13] simplified simulate_l1_callback_return_signal --- realtime/src/l1/execution_layer.rs | 85 ++++++++++++++---------------- 1 file changed, 39 insertions(+), 46 deletions(-) diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs index 67eb60f7..c9516a32 100644 --- a/realtime/src/l1/execution_layer.rs +++ b/realtime/src/l1/execution_layer.rs @@ -458,7 +458,7 @@ impl L1BridgeHandlerOps for ExecutionLayer { ..Default::default() }; - let trace_result = match self + let trace_result = self .provider .debug_trace_call( tx_request, @@ -466,56 +466,49 @@ impl L1BridgeHandlerOps for ExecutionLayer { call_options, ) .await - { - Ok(t) => t, - Err(e) => { - return Err(anyhow!("L1 callback simulation RPC failed: {e}")); - } + .map_err(|e| anyhow!("L1 callback simulation RPC failed: {e}"))?; + + let alloy::rpc::types::trace::geth::GethTrace::CallTracer(call_frame) = trace_result else { + tracing::debug!("L1 callback simulation found no sendMessage call in trace"); + return Ok(None); }; - // Scan the trace for a sendMessage call to the L1 bridge. - let mut return_msg: Option = None; + let Some((mut msg, caller)) = find_send_message_in_call_tree(&call_frame, bridge_address) + else { + tracing::debug!("L1 callback simulation found no sendMessage call in trace"); + return Ok(None); + }; - if let alloy::rpc::types::trace::geth::GethTrace::CallTracer(call_frame) = trace_result - && let Some((mut msg, caller)) = - find_send_message_in_call_tree(&call_frame, bridge_address) - { - // Patch bridge-assigned fields (from, srcChainId, id) - msg.from = caller; - msg.srcChainId = self.common.chain_id(); - // Query nextMessageId for the id the bridge would assign - let bridge_contract = Bridge::new(bridge_address, self.provider.clone()); - if let Ok(next_id) = bridge_contract.nextMessageId().call().await { - msg.id = next_id; - } - return_msg = Some(msg); + // Patch bridge-assigned fields (from, srcChainId, id) + msg.from = caller; + msg.srcChainId = self.common.chain_id(); + // Query nextMessageId for the id the bridge would assign + let bridge_contract = Bridge::new(bridge_address, self.provider.clone()); + if let Ok(next_id) = bridge_contract.nextMessageId().call().await { + msg.id = next_id; } - if let Some(m) = return_msg { - // Compute the signal slot: keccak256("SIGNAL", L1_chain_id, L1_bridge, msgHash) - let return_msg_hash: B256 = - bridge.hashMessage(m.clone()).call().await.map_err(|e| { - anyhow!("Failed to call Bridge.hashMessage for return msg: {e}") - })?; - - let l1_chain_id = self.common.chain_id(); - let mut slot_preimage = Vec::with_capacity(6 + 8 + 20 + 32); - slot_preimage.extend_from_slice(b"SIGNAL"); - slot_preimage.extend_from_slice(&l1_chain_id.to_be_bytes()); - slot_preimage.extend_from_slice(bridge_address.as_slice()); - slot_preimage.extend_from_slice(return_msg_hash.as_slice()); - let signal_slot: FixedBytes<32> = keccak256(&slot_preimage); - - tracing::info!( - "L1 callback simulation found return signal: slot={}, destChainId={}", - signal_slot, - m.destChainId - ); - Ok(Some((m, signal_slot))) - } else { - tracing::debug!("L1 callback simulation found no sendMessage call in trace"); - Ok(None) - } + // Compute the signal slot: keccak256("SIGNAL", L1_chain_id, L1_bridge, msgHash) + let return_msg_hash: B256 = bridge + .hashMessage(msg.clone()) + .call() + .await + .map_err(|e| anyhow!("Failed to call Bridge.hashMessage for return msg: {e}"))?; + + let l1_chain_id = self.common.chain_id(); + let mut slot_preimage = Vec::with_capacity(6 + 8 + 20 + 32); + slot_preimage.extend_from_slice(b"SIGNAL"); + slot_preimage.extend_from_slice(&l1_chain_id.to_be_bytes()); + slot_preimage.extend_from_slice(bridge_address.as_slice()); + slot_preimage.extend_from_slice(return_msg_hash.as_slice()); + let signal_slot: FixedBytes<32> = keccak256(&slot_preimage); + + tracing::info!( + "L1 callback simulation found return signal: slot={}, destChainId={}", + signal_slot, + msg.destChainId + ); + Ok(Some((msg, signal_slot))) } } From 65651ce36f39d8991007a32ab4cdbc278a0e9b50 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Wed, 29 Apr 2026 10:51:58 +0530 Subject: [PATCH 08/13] refac(realtime): remove dead code instead of suppressing warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removes all `#[allow(dead_code)]` markers in realtime/ and the items they were guarding, rather than silencing warnings. Touches: - ProtocolConfig: drop unused proof_verifier, signal_service fields and get_max_anchor_height_offset getter. - RaikoClient: drop l2_network/l1_network fields and the corresponding RAIKO_L2_NETWORK / RAIKO_L1_NETWORK env reads — they were stored but never sent in proof requests (request body uses None). - ContractAddresses + EthereumL1Config: drop signal_service field, L1_SIGNAL_SERVICE_ADDRESS env read, and the dead raiko_client chain (RaikoClient is constructed at lib.rs and routed through AsyncSubmitter, not ExecutionLayer). - BridgeHandler: drop the l1_chain_id field and the unused parameter chain back to Node::new (lib.rs). - BatchBuilder: drop unused `metrics` field/param + add_recovered_l2_block, add_l2_user_op_id methods. - BatchManager: drop unused metrics, cancel_token fields and reanchor_block method. - Node: drop unused metrics field/param. - L2ExecutionLayer: drop unused TaikoConfig field, get_head_l1_origin, get_last_synced_block_params_from_geth, decode_block_params_from_tx_data, get_anchor_tx_data methods. - Taiko: drop unused coinbase field, get_protocol_config, get_l2_block_by_number, fetch_l2_blocks_until_latest, decode_anchor_id_from_tx_data, get_anchor_tx_data wrappers. - proposal::Proposals type alias. Net: -249 / +5 lines, no new warnings, all 122 tests still pass. Co-Authored-By: Claude Opus 4.7 (1M context) --- realtime/src/l1/config.rs | 8 --- realtime/src/l1/execution_layer.rs | 11 ---- realtime/src/l1/protocol_config.rs | 13 ----- realtime/src/l2/execution_layer.rs | 53 ------------------- realtime/src/l2/taiko.rs | 52 +----------------- realtime/src/lib.rs | 7 --- realtime/src/node/mod.rs | 8 --- .../node/proposal_manager/batch_builder.rs | 38 +------------ .../node/proposal_manager/bridge_handler.rs | 4 -- realtime/src/node/proposal_manager/mod.rs | 37 +------------ .../src/node/proposal_manager/proposal.rs | 4 -- realtime/src/raiko/mod.rs | 6 --- realtime/src/utils/config.rs | 13 ----- 13 files changed, 5 insertions(+), 249 deletions(-) diff --git a/realtime/src/l1/config.rs b/realtime/src/l1/config.rs index ad6db1d2..c03374ba 100644 --- a/realtime/src/l1/config.rs +++ b/realtime/src/l1/config.rs @@ -1,5 +1,4 @@ use crate::l1::bindings::ProofType; -use crate::raiko::RaikoClient; use crate::utils::config::RealtimeConfig; use alloy::primitives::Address; @@ -8,33 +7,26 @@ pub struct ContractAddresses { pub realtime_inbox: Address, pub proposer_multicall: Address, pub bridge: Address, - #[allow(dead_code)] - pub signal_service: Address, } pub struct EthereumL1Config { pub realtime_inbox: Address, pub proposer_multicall: Address, pub bridge: Address, - pub signal_service: Address, pub proof_type: ProofType, pub mock_mode: bool, - pub raiko_client: RaikoClient, } impl TryFrom for EthereumL1Config { type Error = anyhow::Error; fn try_from(config: RealtimeConfig) -> Result { - let raiko_client = RaikoClient::new(&config); Ok(EthereumL1Config { realtime_inbox: config.realtime_inbox, proposer_multicall: config.proposer_multicall, bridge: config.bridge, - signal_service: config.signal_service, proof_type: config.proof_type, mock_mode: config.mock_mode, - raiko_client, }) } } diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs index c9516a32..5d7230cf 100644 --- a/realtime/src/l1/execution_layer.rs +++ b/realtime/src/l1/execution_layer.rs @@ -3,7 +3,6 @@ use super::proposal_tx_builder::ProposalTxBuilder; use super::protocol_config::ProtocolConfig; use crate::l1::bindings::RealTimeInbox::{self, RealTimeInboxInstance}; use crate::node::proposal_manager::proposal::Proposal; -use crate::raiko::RaikoClient; use crate::shared_abi::bindings::{ Bridge, Bridge::MessageSent, IBridge::Message, SignalService::SignalSent, }; @@ -45,8 +44,6 @@ pub struct ExecutionLayer { pub transaction_monitor: TransactionMonitor, contract_addresses: ContractAddresses, realtime_inbox: RealTimeInboxInstance, - #[allow(dead_code)] - raiko_client: RaikoClient, proof_type: crate::l1::bindings::ProofType, mock_mode: bool, extra_gas_percentage: u64, @@ -100,12 +97,10 @@ impl ELTrait for ExecutionLayer { realtime_inbox: specific_config.realtime_inbox, proposer_multicall: specific_config.proposer_multicall, bridge: specific_config.bridge, - signal_service: specific_config.signal_service, }; let proof_type = specific_config.proof_type; let mock_mode = specific_config.mock_mode; - let raiko_client = specific_config.raiko_client; let extra_gas_percentage = common_config.extra_gas_percentage; Ok(Self { @@ -115,7 +110,6 @@ impl ELTrait for ExecutionLayer { transaction_monitor, contract_addresses, realtime_inbox, - raiko_client, proof_type, mock_mode, extra_gas_percentage, @@ -182,11 +176,6 @@ impl PreconfOperator for ExecutionLayer { } impl ExecutionLayer { - #[allow(dead_code)] - pub fn get_raiko_client(&self) -> &RaikoClient { - &self.raiko_client - } - /// Returns a clone of the configured contract addresses (L1 inbox, /// bridge, signal service, proposer multicall). Useful for callers that /// need to reference these during block building. diff --git a/realtime/src/l1/protocol_config.rs b/realtime/src/l1/protocol_config.rs index 7b96307f..16722bd9 100644 --- a/realtime/src/l1/protocol_config.rs +++ b/realtime/src/l1/protocol_config.rs @@ -1,21 +1,14 @@ use crate::l1::bindings::IRealTimeInbox::Config; -use alloy::primitives::Address; #[derive(Clone, Default)] pub struct ProtocolConfig { pub basefee_sharing_pctg: u8, - #[allow(dead_code)] - pub proof_verifier: Address, - #[allow(dead_code)] - pub signal_service: Address, } impl From<&Config> for ProtocolConfig { fn from(config: &Config) -> Self { Self { basefee_sharing_pctg: config.basefeeSharingPctg, - proof_verifier: config.proofVerifier, - signal_service: config.signalService, } } } @@ -24,10 +17,4 @@ impl ProtocolConfig { pub fn get_basefee_sharing_pctg(&self) -> u8 { self.basefee_sharing_pctg } - - /// Use the EVM blockhash() 256-block limit as the max anchor height offset. - #[allow(dead_code)] - pub fn get_max_anchor_height_offset(&self) -> u64 { - 256 - } } diff --git a/realtime/src/l2/execution_layer.rs b/realtime/src/l2/execution_layer.rs index 1f25f24d..7e022952 100644 --- a/realtime/src/l2/execution_layer.rs +++ b/realtime/src/l2/execution_layer.rs @@ -42,8 +42,6 @@ pub struct L2ExecutionLayer { pub bridge: Bridge::BridgeInstance, pub signal_service: Address, pub chain_id: u64, - #[allow(dead_code)] - pub config: TaikoConfig, l2_call_signer: Arc, } @@ -77,7 +75,6 @@ impl L2ExecutionLayer { signal_service, chain_id, l2_call_signer, - config: taiko_config, }) } @@ -189,56 +186,6 @@ impl L2ExecutionLayer { .map_err(|e| anyhow::anyhow!("Failed to decode anchor id from tx data: {}", e))?; Ok(tx_data._checkpoint.blockNumber.to::()) } - - pub fn get_anchor_tx_data(data: &[u8]) -> Result { - let tx_data = - ::abi_decode_validate( - data, - ) - .map_err(|e| anyhow::anyhow!("Failed to decode anchor tx data: {}", e))?; - Ok(tx_data) - } - - #[allow(dead_code)] - pub async fn get_head_l1_origin(&self) -> Result { - let response = self - .provider - .raw_request::<_, serde_json::Value>( - std::borrow::Cow::Borrowed("taiko_headL1Origin"), - (), - ) - .await - .map_err(|e| anyhow::anyhow!("Failed to fetch taiko_headL1Origin: {}", e))?; - - let hex_str = response - .get("blockID") - .or_else(|| response.get("blockId")) - .and_then(serde_json::Value::as_str) - .ok_or_else(|| { - anyhow::anyhow!("Missing or invalid block id in taiko_headL1Origin response") - })?; - - u64::from_str_radix(hex_str.trim_start_matches("0x"), 16) - .map_err(|e| anyhow::anyhow!("Failed to parse 'blockID' as u64: {}", e)) - } - - #[allow(dead_code)] - pub async fn get_last_synced_block_params_from_geth(&self) -> Result { - self.get_latest_anchor_transaction_input() - .await - .map_err(|e| anyhow::anyhow!("get_last_synced_block_params_from_geth: {e}")) - .and_then(|input| Self::decode_block_params_from_tx_data(&input)) - } - - #[allow(dead_code)] - pub fn decode_block_params_from_tx_data(data: &[u8]) -> Result { - let tx_data = - ::abi_decode_validate( - data, - ) - .map_err(|e| anyhow::anyhow!("Failed to decode block params from tx data: {}", e))?; - Ok(tx_data._checkpoint) - } } // Surge: L2 EL ops for Bridge Handler diff --git a/realtime/src/l2/taiko.rs b/realtime/src/l2/taiko.rs index e2c9ed6f..eb2f68f8 100644 --- a/realtime/src/l2/taiko.rs +++ b/realtime/src/l2/taiko.rs @@ -1,8 +1,6 @@ -#![allow(dead_code)] - use super::execution_layer::L2ExecutionLayer; use crate::l1::protocol_config::ProtocolConfig; -use crate::l2::bindings::{Anchor, ICheckpointStore::Checkpoint}; +use crate::l2::bindings::ICheckpointStore::Checkpoint; use crate::node::proposal_manager::l2_block_payload::L2BlockV2Payload; use alloy::primitives::FixedBytes; use alloy::{ @@ -32,14 +30,13 @@ use common::{ use pacaya::l2::config::TaikoConfig; use std::sync::Arc; use taiko_alethia_reth::validation::ANCHOR_V3_V4_GAS_LIMIT; -use tracing::{debug, trace}; +use tracing::trace; pub struct Taiko { protocol_config: ProtocolConfig, l2_execution_layer: Arc, driver: Arc, slot_clock: Arc, - coinbase: String, l2_engine: L2Engine, } @@ -73,7 +70,6 @@ impl Taiko { ), driver: Arc::new(TaikoDriver::new(&driver_config, metrics).await?), slot_clock, - coinbase: format!("0x{}", hex::encode(taiko_config.signer.get_address())), l2_engine, }) } @@ -97,46 +93,10 @@ impl Taiko { .await } - pub fn get_protocol_config(&self) -> &ProtocolConfig { - &self.protocol_config - } - pub async fn get_latest_l2_block_id(&self) -> Result { self.l2_execution_layer.common().get_latest_block_id().await } - pub async fn get_l2_block_by_number( - &self, - number: u64, - full_txs: bool, - ) -> Result { - self.l2_execution_layer - .common() - .get_block_by_number(number, full_txs) - .await - } - - pub async fn fetch_l2_blocks_until_latest( - &self, - start_block: u64, - full_txs: bool, - ) -> Result, Error> { - let start_time = std::time::Instant::now(); - let end_block = self.get_latest_l2_block_id().await?; - let mut blocks = Vec::with_capacity(usize::try_from(end_block - start_block + 1)?); - for block_number in start_block..=end_block { - let block = self.get_l2_block_by_number(block_number, full_txs).await?; - blocks.push(block); - } - debug!( - "Fetched L2 blocks from {} to {} in {} ms", - start_block, - end_block, - start_time.elapsed().as_millis() - ); - Ok(blocks) - } - pub async fn get_transaction_by_hash( &self, hash: B256, @@ -336,14 +296,6 @@ impl Taiko { ) -> Result { self.driver.reorg_stale_block(new_head_block_number).await } - - pub fn decode_anchor_id_from_tx_data(data: &[u8]) -> Result { - L2ExecutionLayer::decode_anchor_id_from_tx_data(data) - } - - pub fn get_anchor_tx_data(data: &[u8]) -> Result { - L2ExecutionLayer::get_anchor_tx_data(data) - } } impl Bridgeable for Taiko { diff --git a/realtime/src/lib.rs b/realtime/src/lib.rs index e10697a6..06599ca5 100644 --- a/realtime/src/lib.rs +++ b/realtime/src/lib.rs @@ -138,17 +138,11 @@ pub async fn create_realtime_node( let bridge_rpc_addr = realtime_config.bridge_rpc_addr.clone(); let raiko_client = raiko::RaikoClient::new(&realtime_config); - let l1_chain_id = { - use common::l1::traits::ELTrait; - ethereum_l1.execution_layer.common().chain_id() - }; - let node = Node::new( node_config, cancel_token.clone(), ethereum_l1.clone(), taiko.clone(), - metrics.clone(), batch_builder_config, transaction_error_receiver, fork_info, @@ -158,7 +152,6 @@ pub async fn create_realtime_node( preconf_only, proof_request_bypass, bridge_rpc_addr, - l1_chain_id, ) .await .map_err(|e| anyhow::anyhow!("Failed to create Node: {}", e))?; diff --git a/realtime/src/node/mod.rs b/realtime/src/node/mod.rs index 35030696..86db01c0 100644 --- a/realtime/src/node/mod.rs +++ b/realtime/src/node/mod.rs @@ -6,7 +6,6 @@ use common::{ fork_info::ForkInfo, l1::{ethereum_l1::EthereumL1, transaction_error::TransactionError}, l2::taiko_driver::{TaikoDriver, models::BuildPreconfBlockResponse}, - metrics::Metrics, shared::{l2_slot_info_v2::L2SlotContext, l2_tx_lists::PreBuiltTxList}, utils::{self as common_utils, cancellation_token::CancellationToken}, }; @@ -34,8 +33,6 @@ pub struct Node { taiko: Arc, watchdog: common_utils::watchdog::Watchdog, operator: Operator, - #[allow(dead_code)] - metrics: Arc, proposal_manager: BatchManager, head_verifier: HeadVerifier, transaction_error_channel: Receiver, @@ -49,7 +46,6 @@ impl Node { cancel_token: CancellationToken, ethereum_l1: Arc>, taiko: Arc, - metrics: Arc, batch_builder_config: BatchBuilderConfig, transaction_error_channel: Receiver, fork_info: ForkInfo, @@ -59,7 +55,6 @@ impl Node { preconf_only: bool, proof_request_bypass: bool, bridge_rpc_addr: String, - l1_chain_id: u64, ) -> Result { let operator = Operator::new( ethereum_l1.execution_layer.clone(), @@ -84,14 +79,12 @@ impl Node { batch_builder_config, ethereum_l1.clone(), taiko.clone(), - metrics.clone(), cancel_token.clone(), last_finalized_block_hash, raiko_client, basefee_sharing_pctg, proof_request_bypass, bridge_rpc_addr, - l1_chain_id, ) .await .map_err(|e| anyhow::anyhow!("Failed to create BatchManager: {}", e))?; @@ -110,7 +103,6 @@ impl Node { taiko, watchdog, operator, - metrics, proposal_manager, head_verifier, transaction_error_channel, diff --git a/realtime/src/node/proposal_manager/batch_builder.rs b/realtime/src/node/proposal_manager/batch_builder.rs index 851eb7eb..f0271c0e 100644 --- a/realtime/src/node/proposal_manager/batch_builder.rs +++ b/realtime/src/node/proposal_manager/batch_builder.rs @@ -6,11 +6,7 @@ use crate::node::proposal_manager::{ }; use alloy::primitives::{B256, FixedBytes}; use anyhow::Error; -use common::metrics::Metrics; -use common::{ - batch_builder::BatchBuilderConfig, - shared::l2_block_v2::{L2BlockV2, L2BlockV2Draft}, -}; +use common::{batch_builder::BatchBuilderConfig, shared::l2_block_v2::L2BlockV2Draft}; use common::{l1::slot_clock::SlotClock, shared::anchor_block_info::AnchorBlockInfo}; use std::{collections::VecDeque, sync::Arc}; use tracing::{debug, info, trace, warn}; @@ -20,22 +16,15 @@ pub struct BatchBuilder { proposals_to_send: VecDeque, current_proposal: Option, slot_clock: Arc, - #[allow(dead_code)] - metrics: Arc, } impl BatchBuilder { - pub fn new( - config: BatchBuilderConfig, - slot_clock: Arc, - metrics: Arc, - ) -> Self { + pub fn new(config: BatchBuilderConfig, slot_clock: Arc) -> Self { Self { config, proposals_to_send: VecDeque::new(), current_proposal: None, slot_clock, - metrics, } } @@ -120,19 +109,6 @@ impl BatchBuilder { } } - /// Add a pre-built L2BlockV2 directly to the current proposal. - /// Used during recovery to bypass the draft/payload flow. - #[allow(dead_code)] - pub fn add_recovered_l2_block(&mut self, l2_block: L2BlockV2) -> Result<(), Error> { - if let Some(current_proposal) = self.current_proposal.as_mut() { - current_proposal.total_bytes += l2_block.prebuilt_tx_list.bytes_length; - current_proposal.l2_blocks.push(l2_block); - Ok(()) - } else { - Err(anyhow::anyhow!("No current batch for recovered block")) - } - } - pub fn add_user_op(&mut self, user_op_data: UserOp) -> Result<&Proposal, Error> { if let Some(current_proposal) = self.current_proposal.as_mut() { current_proposal.user_ops.push(user_op_data.clone()); @@ -143,16 +119,6 @@ impl BatchBuilder { } } - #[allow(dead_code)] - pub fn add_l2_user_op_id(&mut self, id: u64) -> Result<(), Error> { - if let Some(current_proposal) = self.current_proposal.as_mut() { - current_proposal.l2_user_op_ids.push(id); - Ok(()) - } else { - Err(anyhow::anyhow!("No current batch for L2 user op id")) - } - } - pub fn add_l2_mempool_tx_hash(&mut self, tx_hash: B256) -> Result<(), Error> { if let Some(current_proposal) = self.current_proposal.as_mut() { current_proposal.l2_mempool_tx_hashes.push(tx_hash); diff --git a/realtime/src/node/proposal_manager/bridge_handler.rs b/realtime/src/node/proposal_manager/bridge_handler.rs index aa7bcfe5..4eba46fc 100644 --- a/realtime/src/node/proposal_manager/bridge_handler.rs +++ b/realtime/src/node/proposal_manager/bridge_handler.rs @@ -143,8 +143,6 @@ pub struct BridgeHandler { taiko: Arc, rx: Receiver, status_store: UserOpStatusStore, - #[allow(dead_code)] - l1_chain_id: u64, } impl BridgeHandler { @@ -153,7 +151,6 @@ impl BridgeHandler { ethereum_l1: Arc>, taiko: Arc, cancellation_token: CancellationToken, - l1_chain_id: u64, last_finalized_block_number: Arc, ) -> Result { let (tx, rx) = mpsc::channel::(1024); @@ -416,7 +413,6 @@ impl BridgeHandler { taiko, rx, status_store, - l1_chain_id, }) } diff --git a/realtime/src/node/proposal_manager/mod.rs b/realtime/src/node/proposal_manager/mod.rs index 84b3e029..916033c9 100644 --- a/realtime/src/node/proposal_manager/mod.rs +++ b/realtime/src/node/proposal_manager/mod.rs @@ -17,7 +17,6 @@ use anyhow::Error; use async_submitter::AsyncSubmitter; use batch_builder::BatchBuilder; use bridge_handler::BridgeHandler; -use common::metrics::Metrics; use common::{batch_builder::BatchBuilderConfig, shared::l2_slot_info_v2::L2SlotContext}; use common::{ l1::{ethereum_l1::EthereumL1, traits::ELTrait}, @@ -33,8 +32,6 @@ use std::{net::SocketAddr, sync::Arc}; use tokio::sync::Mutex; use tracing::{debug, error, info, warn}; -use crate::node::L2SlotInfoV2; - const MIN_ANCHOR_OFFSET: u64 = 2; pub struct BatchManager { @@ -44,10 +41,6 @@ pub struct BatchManager { ethereum_l1: Arc>, pub taiko: Arc, l1_height_lag: u64, - #[allow(dead_code)] - metrics: Arc, - #[allow(dead_code)] - cancel_token: CancellationToken, last_finalized_block_hash: B256, last_finalized_block_number: Arc, /// L1→L2 return signal slot discovered during Pass 2 (L2Direct pre-sim). @@ -69,14 +62,12 @@ impl BatchManager { config: BatchBuilderConfig, ethereum_l1: Arc>, taiko: Arc, - metrics: Arc, cancel_token: CancellationToken, last_finalized_block_hash: B256, raiko_client: RaikoClient, basefee_sharing_pctg: u8, proof_request_bypass: bool, bridge_rpc_addr: String, - l1_chain_id: u64, ) -> Result { info!( "Batch builder config:\n\ @@ -108,7 +99,6 @@ impl BatchManager { ethereum_l1.clone(), taiko.clone(), cancel_token.clone(), - l1_chain_id, last_finalized_block_number.clone(), ) .await?, @@ -122,18 +112,12 @@ impl BatchManager { ); Ok(Self { - batch_builder: BatchBuilder::new( - config, - ethereum_l1.slot_clock.clone(), - metrics.clone(), - ), + batch_builder: BatchBuilder::new(config, ethereum_l1.slot_clock.clone()), async_submitter, bridge_handler, ethereum_l1, taiko, l1_height_lag, - metrics, - cancel_token, last_finalized_block_hash, last_finalized_block_number, pending_return_signal: None, @@ -547,7 +531,6 @@ impl BatchManager { self.batch_builder = batch_builder::BatchBuilder::new( self.batch_builder.get_config().clone(), self.ethereum_l1.slot_clock.clone(), - self.metrics.clone(), ); Ok(()) @@ -625,22 +608,4 @@ impl BatchManager { self.last_finalized_block_hash = last_finalized_hash; Ok(()) } - - #[allow(dead_code)] - pub async fn reanchor_block( - &mut self, - pending_tx_list: PreBuiltTxList, - l2_slot_info: L2SlotInfoV2, - ) -> Result { - let l2_slot_context = L2SlotContext { - info: l2_slot_info, - end_of_sequencing: false, - }; - - let block = self - .add_new_l2_block(pending_tx_list, &l2_slot_context, OperationType::Reanchor) - .await?; - - Ok(block) - } } diff --git a/realtime/src/node/proposal_manager/proposal.rs b/realtime/src/node/proposal_manager/proposal.rs index 438e43b4..2a6165c7 100644 --- a/realtime/src/node/proposal_manager/proposal.rs +++ b/realtime/src/node/proposal_manager/proposal.rs @@ -5,14 +5,10 @@ use crate::node::proposal_manager::{ }; use alloy::primitives::{Address, B256, FixedBytes}; use common::shared::l2_block_v2::{L2BlockV2, L2BlockV2Draft}; -use std::collections::VecDeque; use std::time::Instant; use taiko_protocol::shasta::manifest::{BlockManifest, DerivationSourceManifest}; use tracing::{debug, warn}; -#[allow(dead_code)] -pub type Proposals = VecDeque; - #[derive(Default, Clone)] pub struct Proposal { pub l2_blocks: Vec, diff --git a/realtime/src/raiko/mod.rs b/realtime/src/raiko/mod.rs index 1b4f7ede..50356d41 100644 --- a/realtime/src/raiko/mod.rs +++ b/realtime/src/raiko/mod.rs @@ -12,10 +12,6 @@ pub struct RaikoClient { pub base_url: String, pub api_key: Option, pub proof_type: ProofType, - #[allow(dead_code)] - l2_network: String, - #[allow(dead_code)] - l1_network: String, poll_interval: Duration, max_retries: u32, } @@ -105,8 +101,6 @@ impl RaikoClient { base_url: config.raiko_url.clone(), api_key: config.raiko_api_key.clone(), proof_type: config.proof_type, - l2_network: config.raiko_network.clone(), - l1_network: config.raiko_l1_network.clone(), poll_interval: Duration::from_millis(config.raiko_poll_interval_ms), max_retries: config.raiko_max_retries, } diff --git a/realtime/src/utils/config.rs b/realtime/src/utils/config.rs index 90ca566d..f9d0a66c 100644 --- a/realtime/src/utils/config.rs +++ b/realtime/src/utils/config.rs @@ -9,16 +9,11 @@ pub struct RealtimeConfig { pub realtime_inbox: Address, pub proposer_multicall: Address, pub bridge: Address, - /// L1 SignalService — needed for L1 callback simulation - /// (state_override on `_receivedSignals` to pass fast-signal check). - pub signal_service: Address, /// L2 SignalService address — used on the L2 side for signal operations. pub l2_signal_service: Address, pub raiko_url: String, pub raiko_api_key: Option, pub proof_type: ProofType, - pub raiko_network: String, - pub raiko_l1_network: String, pub raiko_poll_interval_ms: u64, pub raiko_max_retries: u32, pub bridge_rpc_addr: String, @@ -42,7 +37,6 @@ impl ConfigTrait for RealtimeConfig { let realtime_inbox = read_contract_address("REALTIME_INBOX_ADDRESS")?; let proposer_multicall = read_contract_address("PROPOSER_MULTICALL_ADDRESS")?; let bridge = read_contract_address("L1_BRIDGE_ADDRESS")?; - let signal_service = read_contract_address("L1_SIGNAL_SERVICE_ADDRESS")?; let l2_signal_service = read_contract_address("L2_SIGNAL_SERVICE_ADDRESS")?; let raiko_url = @@ -51,10 +45,6 @@ impl ConfigTrait for RealtimeConfig { let proof_type: ProofType = std::env::var("PROOF_TYPE") .unwrap_or_else(|_| "sp1".to_string()) .parse()?; - let raiko_network = - std::env::var("RAIKO_L2_NETWORK").unwrap_or_else(|_| "taiko_mainnet".to_string()); - let raiko_l1_network = - std::env::var("RAIKO_L1_NETWORK").unwrap_or_else(|_| "ethereum".to_string()); let raiko_poll_interval_ms: u64 = std::env::var("RAIKO_POLL_INTERVAL_MS") .ok() @@ -85,13 +75,10 @@ impl ConfigTrait for RealtimeConfig { realtime_inbox, proposer_multicall, bridge, - signal_service, l2_signal_service, raiko_url, raiko_api_key, proof_type, - raiko_network, - raiko_l1_network, raiko_poll_interval_ms, raiko_max_retries, bridge_rpc_addr, From 57581bea1ae85c7f561486842cd3e1c8adba2d96 Mon Sep 17 00:00:00 2001 From: Maciej Skrzypkowski Date: Wed, 29 Apr 2026 14:08:51 +0200 Subject: [PATCH 09/13] Replaced sled crate with fjall. Sled is unmaintained, cargo deny didn't accept it. --- Cargo.lock | 287 +++++++++++------- Cargo.toml | 2 +- deny.toml | 1 + realtime/Cargo.toml | 2 +- .../node/proposal_manager/bridge_handler.rs | 33 +- 5 files changed, 204 insertions(+), 121 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index de677791..794b715a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -623,7 +623,7 @@ dependencies = [ "futures", "futures-utils-wasm", "lru 0.16.3", - "parking_lot 0.12.5", + "parking_lot", "pin-project", "reqwest 0.12.28", "serde", @@ -647,7 +647,7 @@ dependencies = [ "auto_impl", "bimap", "futures", - "parking_lot 0.12.5", + "parking_lot", "serde", "serde_json", "tokio", @@ -956,7 +956,7 @@ dependencies = [ "derive_more", "futures", "futures-utils-wasm", - "parking_lot 0.12.5", + "parking_lot", "serde", "serde_json", "thiserror 2.0.18", @@ -1968,6 +1968,12 @@ dependencies = [ "serde", ] +[[package]] +name = "byteview" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6236364b88b9b6d0bc181ba374cf1ab55ba3ef97a1cb6f8cddad48a273767fb5" + [[package]] name = "c-kzg" version = "2.1.6" @@ -2227,6 +2233,12 @@ dependencies = [ "tracing-subscriber 0.3.23", ] +[[package]] +name = "compare" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0095f6103c2a8b44acd6fd15960c801dafebf02e21940360833e0673f48ba7" + [[package]] name = "concat-kdf" version = "0.1.0" @@ -2581,7 +2593,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.12", + "parking_lot_core", "serde", ] @@ -2830,7 +2842,7 @@ dependencies = [ "lru 0.12.5", "more-asserts", "multiaddr", - "parking_lot 0.12.5", + "parking_lot", "rand 0.8.6", "smallvec", "socket2 0.5.10", @@ -2863,7 +2875,7 @@ dependencies = [ "lru 0.12.5", "more-asserts", "multiaddr", - "parking_lot 0.12.5", + "parking_lot", "rand 0.8.6", "smallvec", "socket2 0.5.10", @@ -2890,6 +2902,12 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "double-ended-peekable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0d05e1c0dbad51b52c38bda7adceef61b9efc2baf04acfe8726a8c4630a6f57" + [[package]] name = "driver" version = "2.0.0" @@ -3086,6 +3104,18 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "enum_dispatch" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" +dependencies = [ + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.117", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -3281,6 +3311,23 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "fjall" +version = "2.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b25ad44cd4360a0448a9b5a0a6f1c7a621101cca4578706d43c9a821418aebc" +dependencies = [ + "byteorder", + "byteview", + "dashmap", + "log", + "lsm-tree", + "path-absolutize", + "std-semaphore", + "tempfile", + "xxhash-rust", +] + [[package]] name = "flate2" version = "1.1.9" @@ -3345,16 +3392,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "fs2" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "fs_extra" version = "1.3.0" @@ -3436,7 +3473,7 @@ checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" dependencies = [ "futures-core", "lock_api", - "parking_lot 0.12.5", + "parking_lot", ] [[package]] @@ -3518,15 +3555,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -3631,6 +3659,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "guardian" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17e2ac29387b1aa07a1e448f7bb4f35b500787971e965b02842b900afa5c8f6f" + [[package]] name = "h2" version = "0.4.13" @@ -3785,7 +3819,7 @@ dependencies = [ "ipconfig", "moka", "once_cell", - "parking_lot 0.12.5", + "parking_lot", "rand 0.9.4", "resolv-conf", "serde", @@ -4288,12 +4322,12 @@ dependencies = [ ] [[package]] -name = "instant" -version = "0.1.13" +name = "interval-heap" +version = "0.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +checksum = "11274e5e8e89b8607cfedc2910b6626e998779b48a019151c7604d0adcb86ac6" dependencies = [ - "cfg-if", + "compare", ] [[package]] @@ -4434,7 +4468,7 @@ dependencies = [ "http-body", "http-body-util", "jsonrpsee-types", - "parking_lot 0.12.5", + "parking_lot", "pin-project", "rand 0.9.4", "rustc-hash", @@ -4856,7 +4890,7 @@ dependencies = [ "multiaddr", "multihash", "multistream-select", - "parking_lot 0.12.5", + "parking_lot", "pin-project", "quick-protobuf", "rand 0.8.6", @@ -4878,7 +4912,7 @@ dependencies = [ "hickory-resolver", "libp2p-core", "libp2p-identity", - "parking_lot 0.12.5", + "parking_lot", "smallvec", "tracing", ] @@ -5309,6 +5343,36 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" +[[package]] +name = "lsm-tree" +version = "2.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799399117a2bfb37660e08be33f470958babb98386b04185288d829df362ea15" +dependencies = [ + "byteorder", + "crossbeam-skiplist", + "double-ended-peekable", + "enum_dispatch", + "guardian", + "interval-heap", + "log", + "lz4_flex 0.11.6", + "path-absolutize", + "quick_cache", + "rustc-hash", + "self_cell", + "tempfile", + "value-log", + "varint-rs", + "xxhash-rust", +] + +[[package]] +name = "lz4_flex" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373f5eceeeab7925e0c1098212f2fbc4d416adec9d35051a6ab251e824c1854a" + [[package]] name = "lz4_flex" version = "0.12.1" @@ -5541,7 +5605,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "equivalent", - "parking_lot 0.12.5", + "parking_lot", "portable-atomic", "smallvec", "tagptr", @@ -6379,17 +6443,6 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - [[package]] name = "parking_lot" version = "0.12.5" @@ -6397,21 +6450,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", - "parking_lot_core 0.9.12", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] @@ -6433,6 +6472,24 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "path-absolutize" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4af381fe79fa195b4909485d99f73a80792331df0625188e707854f0b3383f5" +dependencies = [ + "path-dedot", +] + +[[package]] +name = "path-dedot" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07ba0ad7e047712414213ff67533e6dd477af0a4e1d14fb52343e53d30ea9397" +dependencies = [ + "once_cell", +] + [[package]] name = "pem" version = "3.0.6" @@ -6847,7 +6904,7 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot 0.12.5", + "parking_lot", "thiserror 2.0.18", ] @@ -6859,7 +6916,7 @@ checksum = "cf41c1a7c32ed72abe5082fb19505b969095c12da9f5732a4bc9878757fd087c" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.5", + "parking_lot", "prometheus-client-derive-encode", ] @@ -7005,6 +7062,16 @@ dependencies = [ "unsigned-varint 0.8.0", ] +[[package]] +name = "quick_cache" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a70b1b8b47e31d0498ecbc3c5470bb931399a8bfed1fd79d1717a61ce7f96e3" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", +] + [[package]] name = "quinn" version = "0.11.9" @@ -7237,6 +7304,7 @@ dependencies = [ "chrono", "common", "dotenvy", + "fjall", "flate2", "futures-util", "hex", @@ -7251,22 +7319,12 @@ dependencies = [ "rpc", "serde", "serde_json", - "sled", "tokio", "tokio-util", "tracing", "tracing-subscriber 0.3.23", ] -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.5.18" @@ -7485,7 +7543,7 @@ dependencies = [ "alloy-primitives", "derive_more", "metrics 0.24.3", - "parking_lot 0.12.5", + "parking_lot", "pin-project", "rand 0.9.4", "reth-chainspec 1.11.3", @@ -7705,7 +7763,7 @@ dependencies = [ "discv5 0.10.2", "enr", "itertools 0.14.0", - "parking_lot 0.12.5", + "parking_lot", "rand 0.8.6", "reth-ethereum-forks 1.11.3", "reth-net-banlist 1.11.3", @@ -8118,7 +8176,7 @@ dependencies = [ "byteorder", "dashmap", "derive_more", - "parking_lot 0.12.5", + "parking_lot", "reth-mdbx-sys", "smallvec", "thiserror 2.0.18", @@ -8203,7 +8261,7 @@ dependencies = [ "futures", "itertools 0.14.0", "metrics 0.24.3", - "parking_lot 0.12.5", + "parking_lot", "pin-project", "rand 0.8.6", "rand 0.9.4", @@ -8352,7 +8410,7 @@ dependencies = [ "anyhow", "bincode", "derive_more", - "lz4_flex", + "lz4_flex 0.12.1", "memmap2", "reth-fs-util", "serde", @@ -8590,7 +8648,7 @@ dependencies = [ "itertools 0.14.0", "metrics 0.24.3", "notify", - "parking_lot 0.12.5", + "parking_lot", "rayon", "reth-chain-state", "reth-chainspec 1.11.3", @@ -8883,7 +8941,7 @@ dependencies = [ "bitflags 2.11.0", "futures-util", "metrics 0.24.3", - "parking_lot 0.12.5", + "parking_lot", "pin-project", "rand 0.9.4", "reth-chain-state", @@ -8925,7 +8983,7 @@ dependencies = [ "auto_impl", "itertools 0.14.0", "metrics 0.24.3", - "parking_lot 0.12.5", + "parking_lot", "reth-execution-errors", "reth-metrics 1.11.3", "reth-primitives-traits 1.11.3", @@ -8968,7 +9026,7 @@ source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c4 dependencies = [ "alloy-primitives", "metrics 0.24.3", - "parking_lot 0.12.5", + "parking_lot", "reth-db-api", "reth-execution-errors", "reth-metrics 1.11.3", @@ -10033,6 +10091,12 @@ dependencies = [ "libc", ] +[[package]] +name = "self_cell" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b12e76d157a900eb52e81bc6e9f3069344290341720e9178cde2407113ac8d89" + [[package]] name = "semver" version = "0.11.0" @@ -10360,22 +10424,6 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" -[[package]] -name = "sled" -version = "0.34.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" -dependencies = [ - "crc32fast", - "crossbeam-epoch", - "crossbeam-utils", - "fs2", - "fxhash", - "libc", - "log", - "parking_lot 0.11.2", -] - [[package]] name = "smallvec" version = "1.15.1" @@ -10689,6 +10737,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "std-semaphore" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ae9eec00137a8eed469fb4148acd9fc6ac8c3f9b110f52cd34698c8b5bfa0e" + [[package]] name = "stringprep" version = "0.1.5" @@ -10984,7 +11038,7 @@ dependencies = [ "bytes", "libc", "mio", - "parking_lot 0.12.5", + "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2 0.6.2", @@ -11619,6 +11673,29 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" +[[package]] +name = "value-log" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62fc7c4ce161f049607ecea654dca3f2d727da5371ae85e2e4f14ce2b98ed67c" +dependencies = [ + "byteorder", + "byteview", + "interval-heap", + "log", + "path-absolutize", + "rustc-hash", + "tempfile", + "varint-rs", + "xxhash-rust", +] + +[[package]] +name = "varint-rs" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f54a172d0620933a27a4360d3db3e2ae0dd6cceae9730751a036bbf182c4b23" + [[package]] name = "vcpkg" version = "0.2.15" @@ -11844,7 +11921,7 @@ checksum = "1c598d6b99ea013e35844697fc4670d08339d5cda15588f193c6beedd12f644b" dependencies = [ "futures", "js-sys", - "parking_lot 0.12.5", + "parking_lot", "pin-utils", "slab", "wasm-bindgen", @@ -12585,6 +12662,12 @@ dependencies = [ "xml-rs", ] +[[package]] +name = "xxhash-rust" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3" + [[package]] name = "yamux" version = "0.12.1" @@ -12594,7 +12677,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.5", + "parking_lot", "pin-project", "rand 0.8.6", "static_assertions", @@ -12609,7 +12692,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.5", + "parking_lot", "pin-project", "rand 0.9.4", "static_assertions", diff --git a/Cargo.toml b/Cargo.toml index 448bcc6f..827d863b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,6 +60,7 @@ discv5 = { version = "0.10", default-features = false, features = ["libp2p"] } dotenvy = { version = "0.15", default-features = false } ecdsa = { version = "0.16", default-features = false } elliptic-curve = { version = "0.13", default-features = false } +fjall = { version = "2" } flate2 = { version = "1.1", default-features = false, features = ["zlib"] } futures = { version = "0.3.31", default-features = false } futures-util = { version = "0.3", default-features = false } @@ -93,7 +94,6 @@ secp256k1 = { version = "0.30", features = ["recovery", "rand"] } serde = { version = "1.0", default-features = false, features = ["derive"] } serde_json = { version = "1.0", default-features = false } shasta = { path = "shasta" } -sled = { version = "0.34", default-features = false } ssz_rs = { version = "0.9.0" } strum = { version = "0.27", features = ["derive"] } diff --git a/deny.toml b/deny.toml index 07f4b9b5..b2a3a80a 100644 --- a/deny.toml +++ b/deny.toml @@ -28,6 +28,7 @@ allow = [ "MIT", "Apache-2.0", "0BSD", + "BSL-1.0", "BSD-3-Clause", "BSD-2-Clause", "CC-BY-1.0", diff --git a/realtime/Cargo.toml b/realtime/Cargo.toml index d345d87c..c51db8b1 100644 --- a/realtime/Cargo.toml +++ b/realtime/Cargo.toml @@ -15,6 +15,7 @@ async-trait = { workspace = true } chrono = { workspace = true } common = { workspace = true } dotenvy = { workspace = true } +fjall = { workspace = true } flate2 = { workspace = true } futures-util = { workspace = true } hex = { workspace = true } @@ -26,7 +27,6 @@ prometheus = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -sled = { workspace = true } taiko_alethia_reth = { workspace = true } taiko_bindings = { workspace = true } taiko_protocol = { workspace = true } diff --git a/realtime/src/node/proposal_manager/bridge_handler.rs b/realtime/src/node/proposal_manager/bridge_handler.rs index 4eba46fc..d36465de 100644 --- a/realtime/src/node/proposal_manager/bridge_handler.rs +++ b/realtime/src/node/proposal_manager/bridge_handler.rs @@ -24,37 +24,36 @@ pub enum UserOpStatus { Executed, } -/// Disk-backed user op status store using sled. +/// Disk-backed user op status store using fjall. /// -/// Two keyspaces live in this store: -/// - default tree: keyed by `u64` UserOp id (L1→L2→L1 path). -/// - `by_hash` tree: keyed by L2 tx hash `B256` (L2→L1→L2 mempool-picked txs). +/// Two partitions live in this store: +/// - `by_id`: keyed by `u64` UserOp id (L1→L2→L1 path). +/// - `by_hash`: keyed by L2 tx hash `B256` (L2→L1→L2 mempool-picked txs). #[derive(Clone)] pub struct UserOpStatusStore { - db: sled::Db, - by_hash: sled::Tree, + by_id: fjall::PartitionHandle, + by_hash: fjall::PartitionHandle, } impl UserOpStatusStore { pub fn open(path: &str) -> Result { - let db = sled::open(path) - .map_err(|e| anyhow::anyhow!("Failed to open user op status store: {}", e))?; - let by_hash = db - .open_tree("by_hash") - .map_err(|e| anyhow::anyhow!("Failed to open by_hash tree: {}", e))?; - Ok(Self { db, by_hash }) + let keyspace = fjall::Config::new(path).open()?; + let by_id = keyspace.open_partition("by_id", fjall::PartitionCreateOptions::default())?; + let by_hash = + keyspace.open_partition("by_hash", fjall::PartitionCreateOptions::default())?; + Ok(Self { by_id, by_hash }) } pub fn set(&self, id: u64, status: &UserOpStatus) { if let Ok(value) = serde_json::to_vec(status) - && let Err(e) = self.db.insert(id.to_be_bytes(), value) + && let Err(e) = self.by_id.insert(id.to_be_bytes(), value) { - error!("Failed to write user op status: {}", e); + error!("Failed to write user op status: {e}"); } } pub fn get(&self, id: u64) -> Option { - self.db + self.by_id .get(id.to_be_bytes()) .ok() .flatten() @@ -62,14 +61,14 @@ impl UserOpStatusStore { } pub fn remove(&self, id: u64) { - let _ = self.db.remove(id.to_be_bytes()); + let _ = self.by_id.remove(id.to_be_bytes()); } pub fn set_by_hash(&self, hash: B256, status: &UserOpStatus) { if let Ok(value) = serde_json::to_vec(status) && let Err(e) = self.by_hash.insert(hash.as_slice(), value) { - error!("Failed to write tx status by hash: {}", e); + error!("Failed to write tx status by hash: {e}"); } } From b9534115e793f45425faf938958a7a5db20fbcac Mon Sep 17 00:00:00 2001 From: Maciej Skrzypkowski Date: Wed, 15 Apr 2026 13:53:10 +0200 Subject: [PATCH 10/13] Returning transaction result handlers instead of propagating them to send batch functions and tx monitor --- common/src/shared/transaction_monitor.rs | 101 +++++++++++------- common/src/utils/logging.rs | 10 ++ realtime/src/l1/execution_layer.rs | 10 +- .../node/proposal_manager/async_submitter.rs | 86 +++++++-------- shasta/src/l1/execution_layer.rs | 5 +- 5 files changed, 114 insertions(+), 98 deletions(-) diff --git a/common/src/shared/transaction_monitor.rs b/common/src/shared/transaction_monitor.rs index d9a09e18..8c784777 100644 --- a/common/src/shared/transaction_monitor.rs +++ b/common/src/shared/transaction_monitor.rs @@ -35,6 +35,13 @@ pub enum TxStatus { Pending, } +/// Receivers returned by `monitor_new_transaction` so the caller can track progress +/// without coupling the monitor's API to sender types. +pub struct TxMonitorHandles { + pub tx_hash_receiver: tokio::sync::oneshot::Receiver, + pub tx_result_receiver: tokio::sync::oneshot::Receiver, +} + #[derive(Debug, Clone)] pub struct TransactionMonitorConfig { min_priority_fee_per_gas_wei: u128, @@ -55,8 +62,7 @@ pub struct TransactionMonitorThread { chain_id: u64, sent_tx_hashes: Vec>, tx_hash_notifier: Option>, - /// Notifies the caller whether the transaction was confirmed (true) or failed (false). - tx_result_notifier: Option>, + tx_result_notifier: tokio::sync::oneshot::Sender, } //#[derive(Debug)] @@ -102,13 +108,12 @@ impl TransactionMonitor { impl TransactionMonitor { /// Monitor a transaction until it is confirmed or fails. /// Spawns a new tokio task to monitor the transaction. + /// Returns handles to receive the tx hash and final result. pub async fn monitor_new_transaction( &self, tx: TransactionRequest, nonce: u64, - tx_hash_notifier: Option>, - tx_result_notifier: Option>, - ) -> Result<(), Error> { + ) -> Result { let mut guard = self.join_handle.lock().await; if let Some(join_handle) = guard.as_ref() && !join_handle.is_finished() @@ -118,19 +123,26 @@ impl TransactionMonitor { )); } - let mut monitor_thread = TransactionMonitorThread::new( + let (tx_hash_sender, tx_hash_receiver) = tokio::sync::oneshot::channel(); + let (tx_result_sender, tx_result_receiver) = tokio::sync::oneshot::channel(); + let handles = TxMonitorHandles { + tx_hash_receiver, + tx_result_receiver, + }; + + let monitor_thread = TransactionMonitorThread::new( self.provider.clone(), self.config.clone(), nonce, self.error_notification_channel.clone(), self.metrics.clone(), self.chain_id, + tx_hash_sender, + tx_result_sender, ); - monitor_thread.tx_hash_notifier = tx_hash_notifier; - monitor_thread.tx_result_notifier = tx_result_notifier; let join_handle = monitor_thread.spawn_monitoring_task(tx); *guard = Some(join_handle); - Ok(()) + Ok(handles) } /// Monitor a transaction built by a deferred builder. @@ -140,7 +152,7 @@ impl TransactionMonitor { &self, tx_builder: impl TransactionRequestBuilder, nonce: u64, - ) -> Result<(), Error> { + ) -> Result { let mut guard = self.join_handle.lock().await; if let Some(join_handle) = guard.as_ref() && !join_handle.is_finished() @@ -150,6 +162,13 @@ impl TransactionMonitor { )); } + let (tx_hash_sender, tx_hash_receiver) = tokio::sync::oneshot::channel(); + let (tx_result_sender, tx_result_receiver) = tokio::sync::oneshot::channel(); + let handles = TxMonitorHandles { + tx_hash_receiver, + tx_result_receiver, + }; + let monitor_thread = TransactionMonitorThread::new( self.provider.clone(), self.config.clone(), @@ -157,10 +176,12 @@ impl TransactionMonitor { self.error_notification_channel.clone(), self.metrics.clone(), self.chain_id, + tx_hash_sender, + tx_result_sender, ); let join_handle = monitor_thread.spawn_monitoring_task_with_builder(tx_builder); *guard = Some(join_handle); - Ok(()) + Ok(handles) } pub async fn is_transaction_in_progress(&self) -> Result { @@ -173,6 +194,7 @@ impl TransactionMonitor { } impl TransactionMonitorThread { + #[allow(clippy::too_many_arguments)] pub fn new( provider: DynProvider, config: TransactionMonitorConfig, @@ -180,6 +202,8 @@ impl TransactionMonitorThread { error_notification_channel: Sender, metrics: Arc, chain_id: u64, + tx_hash_notifier: tokio::sync::oneshot::Sender, + tx_result_notifier: tokio::sync::oneshot::Sender, ) -> Self { Self { provider, @@ -189,24 +213,24 @@ impl TransactionMonitorThread { metrics, chain_id, sent_tx_hashes: Vec::new(), - tx_hash_notifier: None, - tx_result_notifier: None, + tx_hash_notifier: Some(tx_hash_notifier), + tx_result_notifier, } } - pub fn spawn_monitoring_task(mut self, tx: TransactionRequest) -> JoinHandle<()> { + pub fn spawn_monitoring_task(self, tx: TransactionRequest) -> JoinHandle<()> { tokio::spawn(async move { self.monitor_transaction(tx).await; }) } - fn notify_result(&mut self, success: bool) { - if let Some(notifier) = self.tx_result_notifier.take() { - let _ = notifier.send(success); + fn notify_result(self, success: bool) { + if let Err(err) = self.tx_result_notifier.send(success) { + error!("Failed to send transaction result signal: {}", err); } } pub fn spawn_monitoring_task_with_builder( - mut self, + self, tx_builder: impl TransactionRequestBuilder, ) -> JoinHandle<()> { tokio::spawn(async move { @@ -217,12 +241,13 @@ impl TransactionMonitorThread { Err(err) => { error!("Transaction builder failed: {}", err); self.send_error_signal(err).await; + // notifiers are dropped here, receivers will see channel closed } } }) } - async fn monitor_transaction(&mut self, mut tx: TransactionRequest) { + async fn monitor_transaction(mut self, mut tx: TransactionRequest) { tx.set_nonce(self.nonce); if !matches!(tx.buildable_type(), Some(TxType::Eip1559 | TxType::Eip4844)) { self.send_error_signal(TransactionError::UnsupportedTransactionType) @@ -302,7 +327,7 @@ impl TransactionMonitorThread { let tx_hash = *pending_tx.tx_hash(); self.sent_tx_hashes.push(tx_hash); - // Notify the first tx hash to the caller if requested + // Notify the first tx hash to the caller (fires once, on first send attempt) if let Some(notifier) = self.tx_hash_notifier.take() { let _ = notifier.send(tx_hash); } @@ -351,7 +376,7 @@ impl TransactionMonitorThread { //Wait for transaction result let mut wait_attempt = 0; - let mut resolved = false; + let mut result: Option = None; if let Some(root_provider) = root_provider { // We can use unwrap since tx_hashes is updated before root_provider let tx_hash = self @@ -368,16 +393,14 @@ impl TransactionMonitorThread { ) .await { - self.notify_result(confirmed); - resolved = true; + result = Some(confirmed); break; } if self .verify_tx_included(wait_attempt + self.config.max_attempts_to_send_tx) .await { - self.notify_result(true); - resolved = true; + result = Some(true); break; } warn!("🟣 Transaction watcher timed out without a result. Waiting..."); @@ -385,21 +408,21 @@ impl TransactionMonitorThread { } } - if !resolved { - if wait_attempt >= self.config.max_attempts_to_wait_tx { - error!( - "⛔ Transaction {} with nonce {} not confirmed", - if let Some(tx_hash) = self.sent_tx_hashes.last() { - tx_hash.to_string() - } else { - "unknown".to_string() - }, - self.nonce, - ); - - self.send_error_signal(TransactionError::NotConfirmed).await; + match result { + Some(confirmed) => self.notify_result(confirmed), + None => { + if wait_attempt >= self.config.max_attempts_to_wait_tx { + error!( + "⛔ Transaction {} with nonce {} not confirmed", + self.sent_tx_hashes + .last() + .map_or_else(|| "unknown".to_string(), |h| h.to_string()), + self.nonce, + ); + self.send_error_signal(TransactionError::NotConfirmed).await; + } + self.notify_result(false); } - self.notify_result(false); } } diff --git a/common/src/utils/logging.rs b/common/src/utils/logging.rs index a52fe711..2029c6ba 100644 --- a/common/src/utils/logging.rs +++ b/common/src/utils/logging.rs @@ -43,6 +43,16 @@ pub fn init_logging() { .parse() .expect("assert: can parse env filter directive"), ) + .add_directive( + "alloy_pubsub=info" + .parse() + .expect("assert: can parse env filter directive"), + ) + .add_directive( + "alloy_provider=info" + .parse() + .expect("assert: can parse env filter directive"), + ) }); // Create a custom formatter for heartbeat logs diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs index 5d7230cf..9e4a69bf 100644 --- a/realtime/src/l1/execution_layer.rs +++ b/realtime/src/l1/execution_layer.rs @@ -186,9 +186,7 @@ impl ExecutionLayer { pub async fn send_batch_to_l1( &self, batch: Proposal, - tx_hash_notifier: Option>, - tx_result_notifier: Option>, - ) -> Result<(), Error> { + ) -> Result { info!( "📦 Proposing with {} blocks | user_ops: {:?} | signal_slots: {:?} | l1_calls: {:?} | zk_proof: {}", batch.l2_blocks.len(), @@ -215,11 +213,9 @@ impl ExecutionLayer { let pending_nonce = self.get_preconfer_nonce_pending().await?; self.transaction_monitor - .monitor_new_transaction(tx, pending_nonce, tx_hash_notifier, tx_result_notifier) + .monitor_new_transaction(tx, pending_nonce) .await - .map_err(|e| Error::msg(format!("Sending batch to L1 failed: {e}")))?; - - Ok(()) + .map_err(|e| Error::msg(format!("Sending batch to L1 failed: {e}"))) } pub async fn is_transaction_in_progress(&self) -> Result { diff --git a/realtime/src/node/proposal_manager/async_submitter.rs b/realtime/src/node/proposal_manager/async_submitter.rs index 61c9683c..b3f123e7 100644 --- a/realtime/src/node/proposal_manager/async_submitter.rs +++ b/realtime/src/node/proposal_manager/async_submitter.rs @@ -334,69 +334,57 @@ async fn submission_task( // Step 2: Send L1 transaction let mut user_op_ids: Vec = proposal.user_ops.iter().map(|op| op.id).collect(); user_op_ids.extend(&proposal.l2_user_op_ids); + let l2_mempool_tx_hashes: Vec = proposal.l2_mempool_tx_hashes.clone(); - let has_tracked_entries = - (!user_op_ids.is_empty() || !l2_mempool_tx_hashes.is_empty()) && status_store.is_some(); - - let (tx_hash_sender, tx_hash_receiver) = if has_tracked_entries { - let (s, r) = tokio::sync::oneshot::channel(); - (Some(s), Some(r)) - } else { - (None, None) - }; - let (tx_result_sender, tx_result_receiver) = if has_tracked_entries { - let (s, r) = tokio::sync::oneshot::channel(); - (Some(s), Some(r)) - } else { - (None, None) - }; - if let Err(err) = ethereum_l1 + let handles = match ethereum_l1 .execution_layer - .send_batch_to_l1(proposal.clone(), tx_hash_sender, tx_result_sender) + .send_batch_to_l1(proposal.clone()) .await { - // Mark all tracked entries (L1/L2 UserOps and mempool-picked L2 txs) as rejected - if let Some(ref store) = status_store { - let reason = format!("L1 multicall failed: {}", err); - for op in &proposal.user_ops { - store.set( - op.id, - &UserOpStatus::Rejected { - reason: reason.clone(), - }, - ); - } - for id in &proposal.l2_user_op_ids { - store.set( - *id, - &UserOpStatus::Rejected { - reason: reason.clone(), - }, - ); - } - for tx_hash in &proposal.l2_mempool_tx_hashes { - store.set_by_hash( - *tx_hash, - &UserOpStatus::Rejected { - reason: reason.clone(), - }, - ); + Ok(handles) => handles, + Err(err) => { + if let Some(ref store) = status_store { + let reason = format!("L1 multicall failed: {}", err); + for op in &proposal.user_ops { + store.set( + op.id, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } + for id in &proposal.l2_user_op_ids { + store.set( + *id, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } + for tx_hash in &l2_mempool_tx_hashes { + store.set_by_hash( + *tx_hash, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } } + return Err(err); } - return Err(err); - } + }; // Step 3: After successful submission, the new lastFinalizedBlockHash is the checkpoint's blockHash let new_last_finalized_block_hash = proposal.checkpoint.blockHash; let new_last_finalized_block_number = proposal.checkpoint.blockNumber.to::(); // Step 4: Spawn user-op status tracker - if let (Some(hash_rx), Some(result_rx), Some(store)) = - (tx_hash_receiver, tx_result_receiver, status_store) + if let Some(store) = status_store + && (!user_op_ids.is_empty() || !l2_mempool_tx_hashes.is_empty()) { tokio::spawn(async move { - let tx_hash = match hash_rx.await { + let tx_hash = match handles.tx_hash_receiver.await { Ok(tx_hash) => { for id in &user_op_ids { store.set(*id, &UserOpStatus::Processing { tx_hash }); @@ -428,7 +416,7 @@ async fn submission_task( }; if tx_hash.is_some() { - match result_rx.await { + match handles.tx_result_receiver.await { Ok(true) => { for id in &user_op_ids { store.set(*id, &UserOpStatus::Executed); diff --git a/shasta/src/l1/execution_layer.rs b/shasta/src/l1/execution_layer.rs index 5b60692a..ae4a122b 100644 --- a/shasta/src/l1/execution_layer.rs +++ b/shasta/src/l1/execution_layer.rs @@ -209,9 +209,8 @@ impl ExecutionLayer { self.transaction_monitor .monitor_new_transaction_with_builder(tx_builder, pending_nonce) .await - .map_err(|e| Error::msg(format!("Sending proposal to L1 failed: {e}")))?; - - Ok(()) + .map(|_| ()) // ignore transaction result handlers, not needed for shasta + .map_err(|e| Error::msg(format!("Sending proposal to L1 failed: {e}"))) } pub async fn is_transaction_in_progress(&self) -> Result { From ef251d8e6443a9e75745e4d5845e8e6330840937 Mon Sep 17 00:00:00 2001 From: Maciej Skrzypkowski Date: Wed, 29 Apr 2026 15:08:09 +0200 Subject: [PATCH 11/13] v1.38.0 --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 794b715a..0318f6b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2200,7 +2200,7 @@ dependencies = [ [[package]] name = "common" -version = "1.37.3" +version = "1.38.0" dependencies = [ "alloy", "alloy-json-rpc", @@ -5767,7 +5767,7 @@ dependencies = [ [[package]] name = "node" -version = "1.37.3" +version = "1.38.0" dependencies = [ "alloy", "alloy-json-rpc", @@ -6366,7 +6366,7 @@ dependencies = [ [[package]] name = "p2p-boot-node" -version = "1.37.3" +version = "1.38.0" dependencies = [ "anyhow", "discv5 0.10.2", @@ -6378,7 +6378,7 @@ dependencies = [ [[package]] name = "pacaya" -version = "1.37.3" +version = "1.38.0" dependencies = [ "alloy", "alloy-json-rpc", @@ -6517,7 +6517,7 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "permissionless" -version = "1.37.3" +version = "1.38.0" dependencies = [ "alethia-reth-consensus 0.6.0 (git+https://github.com/taikoxyz/alethia-reth.git?rev=637f7a150f72fe8d6cc5949a41aebb638a5305cf)", "alloy", @@ -7292,7 +7292,7 @@ dependencies = [ [[package]] name = "realtime" -version = "1.37.3" +version = "1.38.0" dependencies = [ "alethia-reth-consensus 0.6.0 (git+https://github.com/taikoxyz/alethia-reth.git?rev=637f7a150f72fe8d6cc5949a41aebb638a5305cf)", "alloy", @@ -10323,7 +10323,7 @@ dependencies = [ [[package]] name = "shasta" -version = "1.37.3" +version = "1.38.0" dependencies = [ "alethia-reth-consensus 0.6.0 (git+https://github.com/taikoxyz/alethia-reth.git?rev=637f7a150f72fe8d6cc5949a41aebb638a5305cf)", "alloy", @@ -11609,7 +11609,7 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "urc" -version = "1.37.3" +version = "1.38.0" dependencies = [ "alloy", "alloy-json-rpc", diff --git a/Cargo.toml b/Cargo.toml index 827d863b..b80e57d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ resolver = "2" default-members = ["node"] [workspace.package] -version = "1.37.3" +version = "1.38.0" edition = "2024" repository = "https://github.com/NethermindEth/Catalyst" license = "MIT" From 5957dc83bc47d3fdf78e4c1385142d9c19d6d631 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Thu, 30 Apr 2026 12:16:31 +0530 Subject: [PATCH 12/13] fix(realtime): address Claude review findings on PR #953 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Picks up the actionable items from the automated review on NethermindEth/Catalyst#953 and leaves the rest documented as follow-ups. Net change: +117 / -46 in realtime/. Bug / correctness: * (#1) Defer staging additions to the in-flight proposal until `advance_head_to_new_l2_block` succeeds. Previously a failed advance left orphan signal slots / user ops in the proposal that no L2 block corresponds to, so the next attempt would re-add the same slot from the mempool scan and `_verifySignalSlots` would revert on duplicate. `pending_return_signal` and `pending_mempool_tx_hash` are now only consumed in the Ok arm so retries see the same slot. * (#3) `AsyncSubmitter::submit` no longer panics on the `is_busy()` invariant — returns `Err` and the caller propagates. * (#2) `transfer_eth_from_l2_to_l1` returns an explicit "not implemented" `Err` instead of `Ok(())` to make accidental wiring loud. Realtime does not run the funds_controller flow. * (#8) `processMessage` L2 call now uses the L2 slot's actual base fee for `max_fee_per_gas` instead of a hardcoded 1 gwei. Matches the anchor tx pattern. * (#10) Raiko reqwest client now has a configurable timeout (`RAIKO_TIMEOUT_SEC`, default 30s) — a hung Raiko no longer deadlocks the async submitter forever. Security: * (#4) `BRIDGE_RPC_ADDR` defaults to `127.0.0.1:4545` instead of `0.0.0.0:4545` so the unauthenticated `surge_*` JSON-RPC endpoints are not exposed externally unless an operator explicitly opts in. Usability: * (#13) The fjall DB path for `UserOpStatusStore` is now configurable via `USER_OP_STATUS_DB_PATH` (default `data/user_op_status`), threaded through `BatchManager` → `BridgeHandler`. Diagnostics / cleanup: * (#12) `RealtimeConfig` Display now includes bridge, l2_signal_service, raiko_max_retries, raiko_timeout_sec, mock_mode, bridge_rpc_addr, user_op_status_db_path — startup log shows the full picture. * (#11) `SEND_MESSAGE_SELECTOR` lives in `realtime/src/shared_abi/mod.rs` instead of being duplicated in both execution layers. Deferred (separate PRs / issues): * (#5) Cross-fork pacaya dependency — design decision; pacaya is a shared utility crate on master, deliberately reused. * (#6) Hardcoded NodeConfig values (handover_window_slots etc.) — fine for the only current deployment; expose when a 2nd one shows up. * (#7) Bridge `__ctx` storage slots hardcoded — needs runtime layout check; out of scope here. * (#9) Duplicate blob encoding per submission — perf optimization. * (#14) `surge_txStatus` cleanup timing. * (#15) `getConfig()` called twice on startup. Quality gate: `cargo build --workspace`, `cargo clippy --all-features` on touched crates, `cargo fmt --check`, `cargo test --workspace` (122 passed) all clean. Co-Authored-By: Claude Opus 4.7 (1M context) --- realtime/src/l1/execution_layer.rs | 3 +- realtime/src/l2/execution_layer.rs | 30 +++++--- realtime/src/lib.rs | 2 + realtime/src/node/mod.rs | 2 + .../node/proposal_manager/async_submitter.rs | 16 ++-- .../node/proposal_manager/bridge_handler.rs | 3 +- realtime/src/node/proposal_manager/mod.rs | 74 ++++++++++++------- realtime/src/raiko/mod.rs | 6 +- realtime/src/shared_abi/mod.rs | 4 + realtime/src/utils/config.rs | 23 +++++- 10 files changed, 117 insertions(+), 46 deletions(-) diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs index 9e4a69bf..8ec0ac7c 100644 --- a/realtime/src/l1/execution_layer.rs +++ b/realtime/src/l1/execution_layer.rs @@ -497,8 +497,7 @@ impl L1BridgeHandlerOps for ExecutionLayer { } } -/// `Bridge.sendMessage(Message)` selector. -const SEND_MESSAGE_SELECTOR: [u8; 4] = [0x1b, 0xdb, 0x00, 0x37]; +use crate::shared_abi::SEND_MESSAGE_SELECTOR; /// Recursively search call frames for a CALL to `bridge_address` with the /// `sendMessage` function selector. Returns the decoded `IBridge.Message` diff --git a/realtime/src/l2/execution_layer.rs b/realtime/src/l2/execution_layer.rs index 7e022952..fd652ba9 100644 --- a/realtime/src/l2/execution_layer.rs +++ b/realtime/src/l2/execution_layer.rs @@ -33,7 +33,7 @@ use common::{ }; use pacaya::l2::config::TaikoConfig; use std::sync::Arc; -use tracing::{debug, info, warn}; +use tracing::{debug, info}; pub struct L2ExecutionLayer { common: ExecutionLayerCommon, @@ -140,6 +140,10 @@ impl L2ExecutionLayer { common::crypto::fixed_k_signer::sign_hash_deterministic(GOLDEN_TOUCH_PRIVATE_KEY, hash) } + /// Stubbed out — the realtime fork does not run the funds_controller flow + /// (it has no L2→L1 bridge sweep), but `Taiko` must still implement the + /// `Bridgeable` trait. Return an explicit error so accidental wiring is + /// noisy instead of silently dropping ETH. pub async fn transfer_eth_from_l2_to_l1( &self, _amount: u128, @@ -147,8 +151,9 @@ impl L2ExecutionLayer { _preconfer_address: Address, _bridge_relayer_fee: u64, ) -> Result<(), Error> { - warn!("Implement bridge transfer logic here"); - Ok(()) + Err(anyhow::anyhow!( + "transfer_eth_from_l2_to_l1 is not implemented for the realtime fork" + )) } pub async fn get_last_synced_anchor_block_id_from_geth(&self) -> Result { @@ -191,7 +196,11 @@ impl L2ExecutionLayer { // Surge: L2 EL ops for Bridge Handler pub trait L2BridgeHandlerOps { - async fn construct_l2_call_tx(&self, message: Message) -> Result; + async fn construct_l2_call_tx( + &self, + message: Message, + base_fee: u64, + ) -> Result; async fn find_message_and_signal_slot( &self, block_id: u64, @@ -205,7 +214,11 @@ pub trait L2BridgeHandlerOps { } impl L2BridgeHandlerOps for L2ExecutionLayer { - async fn construct_l2_call_tx(&self, message: Message) -> Result { + async fn construct_l2_call_tx( + &self, + message: Message, + base_fee: u64, + ) -> Result { use alloy::signers::local::PrivateKeySigner; use std::str::FromStr; @@ -223,7 +236,7 @@ impl L2BridgeHandlerOps for L2ExecutionLayer { .bridge .processMessage(message, Bytes::new()) .gas(3_000_000) - .max_fee_per_gas(1_000_000_000) + .max_fee_per_gas(u128::from(base_fee)) .max_priority_fee_per_gas(0) .nonce(nonce) .chain_id(self.chain_id); @@ -372,10 +385,7 @@ impl L2BridgeHandlerOps for L2ExecutionLayer { // Surge: L2 mempool tx scanning and simulation -/// `Bridge.sendMessage(Message)` selector — used for call-based detection -/// in the trace tree because the L2 bridge is behind a DELEGATECALL proxy -/// and the Nethermind callTracer doesn't surface event logs from proxied calls. -const SEND_MESSAGE_SELECTOR: [u8; 4] = [0x1b, 0xdb, 0x00, 0x37]; +use crate::shared_abi::SEND_MESSAGE_SELECTOR; impl L2ExecutionLayer { /// Trace a transaction to detect any `Bridge.sendMessage` call it makes. diff --git a/realtime/src/lib.rs b/realtime/src/lib.rs index 06599ca5..50a7a479 100644 --- a/realtime/src/lib.rs +++ b/realtime/src/lib.rs @@ -136,6 +136,7 @@ pub async fn create_realtime_node( let preconf_only = realtime_config.preconf_only; let proof_request_bypass = realtime_config.proof_request_bypass; let bridge_rpc_addr = realtime_config.bridge_rpc_addr.clone(); + let user_op_status_db_path = realtime_config.user_op_status_db_path.clone(); let raiko_client = raiko::RaikoClient::new(&realtime_config); let node = Node::new( @@ -152,6 +153,7 @@ pub async fn create_realtime_node( preconf_only, proof_request_bypass, bridge_rpc_addr, + user_op_status_db_path, ) .await .map_err(|e| anyhow::anyhow!("Failed to create Node: {}", e))?; diff --git a/realtime/src/node/mod.rs b/realtime/src/node/mod.rs index 86db01c0..49c90bae 100644 --- a/realtime/src/node/mod.rs +++ b/realtime/src/node/mod.rs @@ -55,6 +55,7 @@ impl Node { preconf_only: bool, proof_request_bypass: bool, bridge_rpc_addr: String, + user_op_status_db_path: String, ) -> Result { let operator = Operator::new( ethereum_l1.execution_layer.clone(), @@ -85,6 +86,7 @@ impl Node { basefee_sharing_pctg, proof_request_bypass, bridge_rpc_addr, + user_op_status_db_path, ) .await .map_err(|e| anyhow::anyhow!("Failed to create BatchManager: {}", e))?; diff --git a/realtime/src/node/proposal_manager/async_submitter.rs b/realtime/src/node/proposal_manager/async_submitter.rs index b3f123e7..cb85a620 100644 --- a/realtime/src/node/proposal_manager/async_submitter.rs +++ b/realtime/src/node/proposal_manager/async_submitter.rs @@ -73,11 +73,16 @@ impl AsyncSubmitter { /// Submit a proposal asynchronously. Spawns a background task that fetches the ZK proof /// from Raiko and then sends the L1 transaction. Results are retrieved via `try_recv_result`. - pub fn submit(&mut self, proposal: Proposal, status_store: Option) { - assert!( - !self.is_busy(), - "Cannot submit while another submission is in flight" - ); + pub fn submit( + &mut self, + proposal: Proposal, + status_store: Option, + ) -> Result<(), Error> { + if self.is_busy() { + return Err(anyhow::anyhow!( + "Cannot submit while another submission is in flight" + )); + } let (result_tx, result_rx) = oneshot::channel(); let raiko_client = self.raiko_client.clone(); @@ -129,6 +134,7 @@ impl AsyncSubmitter { }); self.in_flight = Some(InFlightSubmission { result_rx, handle }); + Ok(()) } pub fn abort(&mut self) { diff --git a/realtime/src/node/proposal_manager/bridge_handler.rs b/realtime/src/node/proposal_manager/bridge_handler.rs index d36465de..e88c4ecd 100644 --- a/realtime/src/node/proposal_manager/bridge_handler.rs +++ b/realtime/src/node/proposal_manager/bridge_handler.rs @@ -151,9 +151,10 @@ impl BridgeHandler { taiko: Arc, cancellation_token: CancellationToken, last_finalized_block_number: Arc, + status_store_path: &str, ) -> Result { let (tx, rx) = mpsc::channel::(1024); - let status_store = UserOpStatusStore::open("data/user_op_status")?; + let status_store = UserOpStatusStore::open(status_store_path)?; let rpc_context = BridgeRpcContext { tx, diff --git a/realtime/src/node/proposal_manager/mod.rs b/realtime/src/node/proposal_manager/mod.rs index 916033c9..0b1fefc7 100644 --- a/realtime/src/node/proposal_manager/mod.rs +++ b/realtime/src/node/proposal_manager/mod.rs @@ -68,6 +68,7 @@ impl BatchManager { basefee_sharing_pctg: u8, proof_request_bypass: bool, bridge_rpc_addr: String, + user_op_status_db_path: String, ) -> Result { info!( "Batch builder config:\n\ @@ -100,6 +101,7 @@ impl BatchManager { taiko.clone(), cancel_token.clone(), last_finalized_block_number.clone(), + &user_op_status_db_path, ) .await?, )); @@ -183,7 +185,7 @@ impl BatchManager { batch.last_finalized_block_hash, ); - self.async_submitter.submit(batch, Some(status_store)); + self.async_submitter.submit(batch, Some(status_store))?; Ok(()) } @@ -289,6 +291,7 @@ impl BatchManager { async fn add_pending_user_ops_to_draft_block( &mut self, l2_draft_block: &mut L2BlockV2Draft, + base_fee: u64, ) -> Result)>, anyhow::Error> { let routed = { let mut handler = self.bridge_handler.lock().await; @@ -304,7 +307,7 @@ impl BatchManager { let l2_call_bridge_tx = self .taiko .l2_execution_layer() - .construct_l2_call_tx(routed.l2_call.message_from_l1) + .construct_l2_call_tx(routed.l2_call.message_from_l1, base_fee) .await?; info!("Inserting processMessage tx into L2 block"); @@ -399,15 +402,20 @@ impl BatchManager { ) -> Result { let mut anchor_signal_slots: Vec> = vec![]; - // Process L1→L2 UserOps (via surge_sendUserOp RPC) + // Stage additions for the in-flight proposal but defer committing them + // to `batch_builder` until `advance_head_to_new_l2_block` succeeds. + // Otherwise a failed advance leaks state (signal slots / user ops) + // into the proposal that does not correspond to any built L2 block. + debug!("Checking for pending UserOps (L1→L2 deposits)"); - if let Some((user_op_data, signal_slot)) = self - .add_pending_user_ops_to_draft_block(&mut l2_draft_block) - .await? - { - self.batch_builder.add_user_op(user_op_data)?; - self.batch_builder.add_signal_slot(signal_slot)?; - anchor_signal_slots.push(signal_slot); + let pending_user_op = self + .add_pending_user_ops_to_draft_block( + &mut l2_draft_block, + l2_slot_context.info.base_fee(), + ) + .await?; + if let Some((_, signal_slot)) = &pending_user_op { + anchor_signal_slots.push(*signal_slot); } else { debug!("No L1→L2 UserOps pending"); } @@ -420,30 +428,18 @@ impl BatchManager { // Copy rather than take — the pre-simulated slot is passed as a hint // to `find_l1_call` after preconf so the L1Call's requiredReturnSignal - // matches the slot we inject into the anchor. Cleared below. + // matches the slot we inject into the anchor. The take() happens only + // in the Ok arm below so a failed advance lets the next attempt + // re-discover the same slot from the mempool scan. let pending_return_slot_hint = self.pending_return_signal; - if let Some(return_slot) = self.pending_return_signal.take() { + if let Some(return_slot) = self.pending_return_signal { info!( "Injecting L2→L1→L2 return signal into anchor fast signals: slot={}", return_slot ); - self.batch_builder.add_signal_slot(return_slot)?; anchor_signal_slots.push(return_slot); } - if let Some(tx_hash) = self.pending_mempool_tx_hash.take() { - self.batch_builder.add_l2_mempool_tx_hash(tx_hash)?; - let status_store = self.bridge_handler.lock().await.status_store(); - status_store.set_by_hash( - tx_hash, - &crate::node::proposal_manager::bridge_handler::UserOpStatus::Pending, - ); - info!( - "Tracking L2→L1→L2 mempool tx {} under status store (Pending)", - tx_hash - ); - } - let payload = self.batch_builder.add_l2_draft_block(l2_draft_block)?; match self @@ -457,6 +453,27 @@ impl BatchManager { .await { Ok(preconfed_block) => { + // Commit staged additions now that the L2 block is built. + if let Some((user_op_data, signal_slot)) = pending_user_op { + self.batch_builder.add_user_op(user_op_data)?; + self.batch_builder.add_signal_slot(signal_slot)?; + } + if let Some(return_slot) = self.pending_return_signal.take() { + self.batch_builder.add_signal_slot(return_slot)?; + } + if let Some(tx_hash) = self.pending_mempool_tx_hash.take() { + self.batch_builder.add_l2_mempool_tx_hash(tx_hash)?; + let status_store = self.bridge_handler.lock().await.status_store(); + status_store.set_by_hash( + tx_hash, + &crate::node::proposal_manager::bridge_handler::UserOpStatus::Pending, + ); + info!( + "Tracking L2→L1→L2 mempool tx {} under status store (Pending)", + tx_hash + ); + } + self.batch_builder.set_proposal_checkpoint(Checkpoint { blockNumber: U48::from(preconfed_block.number), stateRoot: preconfed_block.state_root, @@ -485,6 +502,11 @@ impl BatchManager { Err(err) => { error!("Failed to advance head to new L2 block: {}", err); self.remove_last_l2_block(); + // Leave `pending_return_signal` / `pending_mempool_tx_hash` + // intact so the next attempt re-injects them. The L1→L2 user op + // was consumed from the bridge handler queue and is dropped on + // this failure — it is not committed to the proposal so no + // L1/L2 state mismatch occurs. Err(anyhow::anyhow!( "Failed to advance head to new L2 block: {}", err diff --git a/realtime/src/raiko/mod.rs b/realtime/src/raiko/mod.rs index 50356d41..1f5837c3 100644 --- a/realtime/src/raiko/mod.rs +++ b/realtime/src/raiko/mod.rs @@ -96,8 +96,12 @@ pub struct RaikoProof { impl RaikoClient { pub fn new(config: &RealtimeConfig) -> Self { + let client = Client::builder() + .timeout(Duration::from_secs(config.raiko_timeout_sec)) + .build() + .expect("reqwest client builder with a timeout should not fail"); Self { - client: Client::new(), + client, base_url: config.raiko_url.clone(), api_key: config.raiko_api_key.clone(), proof_type: config.proof_type, diff --git a/realtime/src/shared_abi/mod.rs b/realtime/src/shared_abi/mod.rs index 90c70dcc..963511fd 100644 --- a/realtime/src/shared_abi/mod.rs +++ b/realtime/src/shared_abi/mod.rs @@ -1 +1,5 @@ pub mod bindings; + +/// Selector for `Bridge.sendMessage((Message))`. Used by the L1 / L2 callback +/// simulation to detect outbound bridge messages in a call trace. +pub const SEND_MESSAGE_SELECTOR: [u8; 4] = [0x1b, 0xdb, 0x00, 0x37]; diff --git a/realtime/src/utils/config.rs b/realtime/src/utils/config.rs index f9d0a66c..4501100e 100644 --- a/realtime/src/utils/config.rs +++ b/realtime/src/utils/config.rs @@ -16,7 +16,9 @@ pub struct RealtimeConfig { pub proof_type: ProofType, pub raiko_poll_interval_ms: u64, pub raiko_max_retries: u32, + pub raiko_timeout_sec: u64, pub bridge_rpc_addr: String, + pub user_op_status_db_path: String, pub preconf_only: bool, pub proof_request_bypass: bool, /// When true, overrides the SubProof bit flag to MOCK_ECDSA (0b00000001) @@ -56,8 +58,18 @@ impl ConfigTrait for RealtimeConfig { .and_then(|v| v.parse().ok()) .unwrap_or(60); + let raiko_timeout_sec: u64 = std::env::var("RAIKO_TIMEOUT_SEC") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(30); + + // Default to loopback so the unauthenticated surge_* JSON-RPC endpoints + // are not exposed externally unless an operator opts in. let bridge_rpc_addr = - std::env::var("BRIDGE_RPC_ADDR").unwrap_or_else(|_| "0.0.0.0:4545".to_string()); + std::env::var("BRIDGE_RPC_ADDR").unwrap_or_else(|_| "127.0.0.1:4545".to_string()); + + let user_op_status_db_path = std::env::var("USER_OP_STATUS_DB_PATH") + .unwrap_or_else(|_| "data/user_op_status".to_string()); let preconf_only = std::env::var("PRECONF_ONLY") .map(|v| v.to_lowercase() != "false" && v != "0") @@ -81,7 +93,9 @@ impl ConfigTrait for RealtimeConfig { proof_type, raiko_poll_interval_ms, raiko_max_retries, + raiko_timeout_sec, bridge_rpc_addr, + user_op_status_db_path, preconf_only, proof_request_bypass, mock_mode, @@ -94,13 +108,20 @@ impl fmt::Display for RealtimeConfig { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "RealTime inbox: {:#?}", self.realtime_inbox)?; writeln!(f, "Proposer multicall: {:#?}", self.proposer_multicall)?; + writeln!(f, "L1 bridge: {:#?}", self.bridge)?; + writeln!(f, "L2 signal service: {:#?}", self.l2_signal_service)?; writeln!(f, "Raiko URL: {}", self.raiko_url)?; + writeln!(f, "Raiko max retries: {}", self.raiko_max_retries)?; + writeln!(f, "Raiko timeout: {}s", self.raiko_timeout_sec)?; writeln!( f, "Proof type: {} (bit flag: {})", self.proof_type, self.proof_type.proof_bit_flag() )?; + writeln!(f, "Mock mode: {}", self.mock_mode)?; + writeln!(f, "Bridge RPC addr: {}", self.bridge_rpc_addr)?; + writeln!(f, "User op status DB path: {}", self.user_op_status_db_path)?; writeln!(f, "Preconf only: {}", self.preconf_only)?; writeln!(f, "Proof request bypass: {}", self.proof_request_bypass)?; Ok(()) From 6cd45bf884f9a83bcd909ffb44137f379b7f1658 Mon Sep 17 00:00:00 2001 From: Maciej Skrzypkowski Date: Thu, 30 Apr 2026 21:51:19 +0200 Subject: [PATCH 13/13] Changed error to debug in TransactionMonitorThread::notify_result, becaue it's expected that there could be no one to listen for the result. --- common/src/shared/transaction_monitor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/src/shared/transaction_monitor.rs b/common/src/shared/transaction_monitor.rs index 8c784777..7f5a4b54 100644 --- a/common/src/shared/transaction_monitor.rs +++ b/common/src/shared/transaction_monitor.rs @@ -225,7 +225,7 @@ impl TransactionMonitorThread { fn notify_result(self, success: bool) { if let Err(err) = self.tx_result_notifier.send(success) { - error!("Failed to send transaction result signal: {}", err); + debug!("Transaction result ({err}) signal dropped (receiver not listening)"); } }