From d63daed2fe94374c528f8b1c2e956218a71091a1 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Mon, 30 Mar 2026 17:10:28 +0530 Subject: [PATCH 1/9] miner, core, consensus/bor: pipelined state root computation (PoC) --- consensus/bor/bor.go | 164 ++-- core/block_validator.go | 15 - core/blockchain.go | 937 ++++++++-------------- core/blockchain_reader.go | 7 - core/events.go | 7 - core/evm.go | 59 ++ core/evm_speculative_test.go | 232 ++++++ core/rawdb/accessors_state.go | 18 - core/rawdb/schema.go | 7 - core/state/statedb.go | 44 +- core/state/statedb_pipeline_test.go | 129 +++ core/stateless.go | 26 +- core/stateless/witness.go | 49 +- core/stateless/witness_test.go | 105 +-- core/txpool/blobpool/blobpool_test.go | 2 +- core/txpool/blobpool/interface.go | 5 +- core/txpool/legacypool/legacypool.go | 41 +- core/txpool/legacypool/legacypool_test.go | 2 +- core/txpool/txpool.go | 30 +- core/vm/contracts_test.go | 1 - docs/cli/default_config.toml | 2 + docs/cli/server.md | 4 + eth/api_backend.go | 29 +- eth/handler.go | 30 - internal/cli/server/config.go | 10 + internal/cli/server/flags.go | 14 + miner/fake_miner.go | 2 +- miner/miner.go | 2 + miner/pipeline.go | 825 +++++++++++++++++++ miner/speculative_chain_reader.go | 115 +++ miner/speculative_chain_reader_test.go | 204 +++++ miner/worker.go | 70 +- params/config.go | 7 - tests/bor/bor_test.go | 455 +++-------- tests/bor/helper.go | 67 ++ 35 files changed, 2432 insertions(+), 1284 deletions(-) create mode 100644 core/evm_speculative_test.go create mode 100644 core/state/statedb_pipeline_test.go create mode 100644 miner/pipeline.go create mode 100644 miner/speculative_chain_reader.go create mode 100644 miner/speculative_chain_reader_test.go diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 59676edd0b..63a38bd902 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -58,7 +58,18 @@ const ( inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory inmemorySignatures = 4096 // Number of recent block signatures to keep in memory veblopBlockTimeout = time.Second * 8 // Timeout for new span check. DO NOT CHANGE THIS VALUE. - minBlockBuildTime = 1 * time.Second // Minimum remaining time before extending the block deadline to avoid empty blocks + // minBlockBuildTime is the minimum remaining time before Prepare() extends + // the block deadline to avoid producing empty blocks. If time.Until(target) + // is less than this value, the target timestamp is pushed forward by one + // blockTime period. + // + // This interacts with pipelined SRC: when a speculative block is aborted, + // the pipeline triggers a fresh commitWork. On chains where blockTime == + // minBlockBuildTime (e.g., 1-second devnets), the remaining time after the + // abort (~990ms) is always less than minBlockBuildTime, so the timestamp is + // always pushed — adding an extra 1s gap. On mainnet (2s blocks), the + // remaining time (~1.99s) exceeds minBlockBuildTime, so no push occurs. + minBlockBuildTime = 1 * time.Second ) // Bor protocol constants. @@ -1361,25 +1372,9 @@ func (c *Bor) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *typ return nil, nil, 0, err } + // No block rewards in PoA, so the state remains as it is start := time.Now() - - // No block rewards in PoA, so the state remains as it is. - // Under delayed SRC, header.Root stores the parent block's actual state root; - // the goroutine in BlockChain.spawnSRCGoroutine handles this block's root. - if c.chainConfig.Bor != nil && c.chainConfig.Bor.IsDelayedSRC(header.Number) { - dsrcReader, ok := chain.(core.DelayedSRCReader) - if !ok { - return nil, nil, 0, fmt.Errorf("chain does not implement DelayedSRCReader") - } - parentRoot := dsrcReader.GetPostStateRoot(header.ParentHash) - if parentRoot == (common.Hash{}) { - return nil, nil, 0, fmt.Errorf("delayed state root unavailable for parent %s", header.ParentHash) - } - header.Root = parentRoot - } else { - header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) - } - + header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) commitTime := time.Since(start) // Uncles are dropped @@ -1404,6 +1399,81 @@ func (c *Bor) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *typ return block, receipts, commitTime, nil } +// FinalizeForPipeline runs the same post-transaction state modifications as +// FinalizeAndAssemble (state sync, span commits, contract code changes) but +// does NOT compute IntermediateRoot or assemble the block. It returns the +// stateSyncData so the caller can pass it to AssembleBlock later after the +// background SRC goroutine has computed the state root. +// +// This is the pipelined SRC equivalent of the first half of FinalizeAndAssemble. +func (c *Bor) FinalizeForPipeline(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, body *types.Body, receipts []*types.Receipt) ([]*types.StateSyncData, error) { + headerNumber := header.Number.Uint64() + if body.Withdrawals != nil || header.WithdrawalsHash != nil { + return nil, consensus.ErrUnexpectedWithdrawals + } + if header.RequestsHash != nil { + return nil, consensus.ErrUnexpectedRequests + } + + var ( + stateSyncData []*types.StateSyncData + err error + ) + + if IsSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) { + cx := statefull.ChainContext{Chain: chain, Bor: c} + + if !c.config.IsRio(header.Number) { + if err = c.checkAndCommitSpan(statedb, header, cx); err != nil { + log.Error("Error while committing span", "error", err) + return nil, err + } + } + + if c.HeimdallClient != nil { + stateSyncData, err = c.CommitStates(statedb, header, cx) + if err != nil { + log.Error("Error while committing states", "error", err) + return nil, err + } + } + } + + if err = c.changeContractCodeIfNeeded(headerNumber, statedb); err != nil { + log.Error("Error changing contract code", "error", err) + return nil, err + } + + return stateSyncData, nil +} + +// AssembleBlock constructs the final block from a pre-computed state root, +// without calling IntermediateRoot. This is used by pipelined SRC where the +// state root is computed by a background goroutine. +// +// stateSyncData is the state sync data collected during Finalize(). If non-nil +// and the Madhugiri fork is active, a StateSyncTx is appended to the body. +func (c *Bor) AssembleBlock(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, body *types.Body, receipts []*types.Receipt, stateRoot common.Hash, stateSyncData []*types.StateSyncData) (*types.Block, []*types.Receipt, error) { + headerNumber := header.Number.Uint64() + + header.Root = stateRoot + header.UncleHash = types.CalcUncleHash(nil) + + if len(stateSyncData) > 0 && c.config != nil && c.config.IsMadhugiri(big.NewInt(int64(headerNumber))) { + stateSyncTx := types.NewTx(&types.StateSyncTx{ + StateSyncData: stateSyncData, + }) + body.Transactions = append(body.Transactions, stateSyncTx) + receipts = insertStateSyncTransactionAndCalculateReceipt(stateSyncTx, header, body, statedb, receipts) + } else { + bc := chain.(core.BorStateSyncer) + bc.SetStateSync(stateSyncData) + } + + block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)) + return block, receipts, nil +} + // Authorize injects a private key into the consensus engine to mint new blocks // with. func (c *Bor) Authorize(currentSigner common.Address, signFn SignerFn) { @@ -1597,38 +1667,15 @@ func (c *Bor) checkAndCommitSpan( headerNumber := header.Number.Uint64() tempState := state.Inner().Copy() - if c.chainConfig.Bor != nil && c.chainConfig.Bor.IsDelayedSRC(header.Number) { - // Under delayed SRC, skip ResetPrefetcher + StartPrefetcher. - // The full-node state is at root_{N-2} with a FlatDiff overlay - // approximating root_{N-1}. ResetPrefetcher clears that overlay, - // causing GetCurrentSpan to read stale root_{N-2} values — different - // from what the stateless node sees at root_{N-1}. The mismatch leads - // to different storage-slot access patterns, so the SRC goroutine - // captures the wrong trie nodes. - // - // StartPrefetcher is also unnecessary: the witness is built by the - // SRC goroutine, and tempState's reads are captured via - // CommitSnapshot + TouchAllAddresses below. - } else { - tempState.ResetPrefetcher() - tempState.StartPrefetcher("bor", state.Witness(), nil) - } + tempState.ResetPrefetcher() + tempState.StartPrefetcher("bor", state.Witness(), nil) span, err := c.spanner.GetCurrentSpan(ctx, header.ParentHash, tempState) if err != nil { return err } - if c.chainConfig.Bor != nil && c.chainConfig.Bor.IsDelayedSRC(header.Number) { - // Under delayed SRC, use CommitSnapshot instead of IntermediateRoot - // to capture all accesses without computing a trie root. Touch - // every address on the main state so they appear in the block's - // FlatDiff and the SRC goroutine includes their trie paths in - // the witness. - tempState.CommitSnapshot(false).TouchAllAddresses(state.Inner()) - } else { - tempState.IntermediateRoot(false) - } + tempState.IntermediateRoot(false) if c.needToCommitSpan(span, headerNumber) { return c.FetchAndCommitSpan(ctx, span.Id+1, state, header, chain) @@ -1765,30 +1812,21 @@ func (c *Bor) CommitStates( if c.config.IsIndore(header.Number) { // Fetch the LastStateId from contract via current state instance tempState := state.Inner().Copy() - if c.chainConfig.Bor != nil && c.chainConfig.Bor.IsDelayedSRC(header.Number) { - // See comment in checkAndCommitSpan: under delayed SRC, - // skip ResetPrefetcher + StartPrefetcher to preserve the - // FlatDiff overlay and avoid stale root_{N-2} reads. - } else { - tempState.ResetPrefetcher() - tempState.StartPrefetcher("bor", state.Witness(), nil) - } + tempState.ResetPrefetcher() + tempState.StartPrefetcher("bor", state.Witness(), nil) lastStateIDBig, err = c.GenesisContractsClient.LastStateId(tempState, number-1, header.ParentHash) if err != nil { return nil, err } - if c.chainConfig.Bor != nil && c.chainConfig.Bor.IsDelayedSRC(header.Number) { - // Under delayed SRC, use CommitSnapshot instead of - // IntermediateRoot to capture all accesses without computing - // a trie root. Touch every address on the main state so they - // appear in the block's FlatDiff and the SRC goroutine - // includes their trie paths in the witness. - tempState.CommitSnapshot(false).TouchAllAddresses(state.Inner()) - } else { - tempState.IntermediateRoot(false) - } + tempState.IntermediateRoot(false) + + // Propagate addresses accessed during LastStateId back to the original + // state so they appear in the FlatDiff ReadSet. Without this, the + // pipelined SRC goroutine's witness won't capture their trie proof + // nodes, causing stateless execution to fail with missing trie nodes. + tempState.PropagateReadsTo(state.Inner()) stateSyncDelay := c.config.CalculateStateSyncDelay(number) to = time.Unix(int64(header.Time-stateSyncDelay), 0) diff --git a/core/block_validator.go b/core/block_validator.go index dd5453db2e..e17fb4f6b7 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -167,20 +166,6 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD } else if res.Requests != nil { return errors.New("block has requests before prague fork") } - // Under delayed SRC, header.Root = state root of the PARENT block. - // Verify it matches the persisted delayed root and skip IntermediateRoot — - // the background goroutine spawned by spawnSRCGoroutine computes root_N. - if v.config.Bor != nil && v.config.Bor.IsDelayedSRC(header.Number) { - parentActualRoot := v.bc.GetPostStateRoot(header.ParentHash) - if parentActualRoot == (common.Hash{}) { - return fmt.Errorf("delayed state root unavailable for parent %x", header.ParentHash) - } - if header.Root != parentActualRoot { - return fmt.Errorf("invalid delayed state root (header: %x, parent actual: %x)", header.Root, parentActualRoot) - } - return nil - } - // Validate the state root against the received state root and throw // an error if they don't match. if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { diff --git a/core/blockchain.go b/core/blockchain.go index 286675d043..e3ebe86177 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -343,7 +343,7 @@ type txLookup struct { transaction *types.Transaction } -// pendingSRCState tracks an in-flight state root computation goroutine. +// pendingSRCState tracks an in-flight pipelined state root computation goroutine. // root, witness, and err are written by the goroutine before wg.Done(); // callers block on wg.Wait() and read them afterwards. type pendingSRCState struct { @@ -351,7 +351,7 @@ type pendingSRCState struct { blockNumber uint64 wg sync.WaitGroup root common.Hash - witness *stateless.Witness // complete witness for stateless execution of this block + witness []byte // RLP-encoded witness built by the SRC goroutine err error } @@ -389,21 +389,10 @@ type BlockChain struct { chainHeadFeed event.Feed logsFeed event.Feed blockProcFeed event.Feed - witnessFeed event.Feed blockProcCounter int32 scope event.SubscriptionScope genesisBlock *types.Block - // lastFlatDiff holds the FlatDiff from the most recently committed block's - // CommitSnapshot. Under delayed SRC, the miner uses it together with the - // grandparent's committed root to open a statedb via NewWithFlatBase, - // allowing block N+1 execution to start before G_N finishes. - // lastFlatDiffBlockHash is the hash of the block that produced lastFlatDiff, - // used by insertChain to verify the diff is for the correct parent before seeding. - lastFlatDiff *state.FlatDiff - lastFlatDiffBlockHash common.Hash - lastFlatDiffMu sync.RWMutex - // This mutex synchronizes chain write operations. // Readers don't need to take it, they can just read the database. chainmu *syncx.ClosableMutex @@ -452,10 +441,18 @@ type BlockChain struct { chainSideFeed event.Feed // Side chain data feed (removed from geth but needed in bor) milestoneFetcher func(ctx context.Context) (uint64, error) // Function to fetch the latest milestone end block from Heimdall. - // DelayedSRC: concurrent state root calculation. - // pendingSRC tracks the in-flight state root goroutine for the most recent block. + // Pipelined SRC: concurrent state root calculation. + // pendingSRC tracks the in-flight SRC goroutine for the most recent block. pendingSRC *pendingSRCState pendingSRCMu sync.Mutex + + // lastFlatDiff holds the FlatDiff from the most recently committed block. + // The miner uses it together with the grandparent's committed root to open + // a StateDB via NewWithFlatBase, allowing block N+1 execution to start + // before the SRC goroutine finishes. + lastFlatDiff *state.FlatDiff + lastFlatDiffBlockHash common.Hash + lastFlatDiffMu sync.RWMutex } // NewBlockChain returns a fully initialised block chain using information @@ -594,16 +591,6 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine, } } } - // Delayed SRC crash recovery: if the head block is in the delayed-SRC range - // and its post-execution state root is missing, re-execute the head block to - // recover the FlatDiff and spawn the SRC goroutine. - head = bc.CurrentBlock() // re-read, may have been rewound above - if bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsDelayedSRC(head.Number) && !bc.cfg.Stateless { - postRoot := bc.GetPostStateRoot(head.Hash()) - if postRoot == (common.Hash{}) || !bc.HasState(postRoot) { - bc.recoverDelayedSRC(head) - } - } // Ensure that a previous crash in SetHead doesn't leave extra ancients //nolint:nestif if frozen, err := bc.db.ItemAmountInAncient(); err == nil && frozen > 0 { @@ -745,18 +732,7 @@ func NewParallelBlockChain(db ethdb.Database, genesis *Genesis, engine consensus return bc, nil } -// ProcessBlock executes the transactions in block, validates state, and returns -// the resulting receipts, logs, gas used, and updated StateDB. func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header, witness *stateless.Witness, followupInterrupt *atomic.Bool) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, vtime time.Duration, blockEndErr error) { - return bc.processBlock(block, parent, nil, witness, followupInterrupt) -} - -// processBlock is the internal implementation of ProcessBlock. -// When flatDiff is non-nil (delayed SRC path), each statedb is opened at -// parent.Root and then has flatDiff applied as an in-memory overlay, allowing -// block N+1's transaction execution to begin concurrently with the background -// goroutine that commits block N's state root to the path DB. -func (bc *BlockChain) processBlock(block *types.Block, parent *types.Header, flatDiff *state.FlatDiff, witness *stateless.Witness, followupInterrupt *atomic.Bool) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, vtime time.Duration, blockEndErr error) { // Process the block using processor and parallelProcessor at the same time, take the one which finishes first, cancel the other, and return the result ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -790,23 +766,14 @@ func (bc *BlockChain) processBlock(block *types.Block, parent *types.Header, fla if err != nil { return nil, nil, 0, nil, 0, err } - if flatDiff != nil { - throwaway.SetFlatDiffRef(flatDiff) - } statedb, err := state.NewWithReader(parentRoot, bc.statedb, process) if err != nil { return nil, nil, 0, nil, 0, err } - if flatDiff != nil { - statedb.SetFlatDiffRef(flatDiff) - } parallelStatedb, err := state.NewWithReader(parentRoot, bc.statedb, process) if err != nil { return nil, nil, 0, nil, 0, err } - if flatDiff != nil { - parallelStatedb.SetFlatDiffRef(flatDiff) - } // Upload the statistics of reader at the end defer func() { @@ -1085,7 +1052,6 @@ func (bc *BlockChain) loadLastState() error { if pruning := bc.historyPrunePoint.Load(); pruning != nil { log.Info("Chain history is pruned", "earliest", pruning.BlockNumber, "hash", pruning.BlockHash) } - return nil } @@ -2469,392 +2435,6 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. return stateSyncLogs, nil } -// writeBlockData writes the block data (TD, block body, receipts, preimages, -// witness) to the database WITHOUT committing trie state. Used by the delayed-SRC -// path where a background goroutine handles CommitWithUpdate concurrently. -// Returns state-sync logs (bor-specific logs not covered by receipts) for feed emission. -func (bc *BlockChain) writeBlockData(block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB) ([]*types.Log, error) { - ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) - if ptd == nil { - return nil, consensus.ErrUnknownAncestor - } - externTd := new(big.Int).Add(block.Difficulty(), ptd) - - blockBatch := bc.db.NewBatch() - rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) - rawdb.WriteBlock(blockBatch, block) - rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) - - var stateSyncLogs []*types.Log - blockLogs := statedb.Logs() - if len(blockLogs) > 0 { - if !(bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsMadhugiri(block.Number())) && len(blockLogs) > len(logs) { - sort.SliceStable(blockLogs, func(i, j int) bool { - return blockLogs[i].Index < blockLogs[j].Index - }) - stateSyncLogs = blockLogs[len(logs):] - types.DeriveFieldsForBorLogs(stateSyncLogs, block.Hash(), block.NumberU64(), uint(len(receipts)), uint(len(logs))) - - var cumulativeGasUsed uint64 - if len(receipts) > 0 { - cumulativeGasUsed = receipts[len(receipts)-1].CumulativeGasUsed - } - rawdb.WriteBorReceipt(blockBatch, block.Hash(), block.NumberU64(), &types.ReceiptForStorage{ - Status: types.ReceiptStatusSuccessful, - Logs: stateSyncLogs, - CumulativeGasUsed: cumulativeGasUsed, - }) - rawdb.WriteBorTxLookupEntry(blockBatch, block.Hash(), block.NumberU64()) - } - } - - rawdb.WritePreimages(blockBatch, statedb.Preimages()) - - // Under delayed SRC, the witness built during tx execution (via NewWithFlatBase) - // is incomplete: accounts in the FlatDiff overlay bypass the trie, so their MPT - // proof nodes are never captured. The complete witness is built by the SRC - // goroutine (spawnSRCGoroutine) and written there after CommitWithUpdate. - - if err := blockBatch.Write(); err != nil { - log.Crit("Failed to write block into disk", "err", err) - } - rawdb.WriteBytecodeSyncLastBlock(bc.db, block.NumberU64()) - return stateSyncLogs, nil -} - -// writeBlockDataAndSetHead is the delayed-SRC analogue of writeBlockAndSetHead: -// it persists block data without trie state (trie commit is done by the SRC goroutine) -// and then applies the block as the new chain head. -func (bc *BlockChain) writeBlockDataAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB, emitHeadEvent bool) (WriteStatus, error) { - stateSyncLogs, err := bc.writeBlockData(block, receipts, logs, statedb) - if err != nil { - return NonStatTy, err - } - - currentBlock := bc.CurrentBlock() - reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header()) - if err != nil { - return NonStatTy, err - } - - var status WriteStatus - if reorg { - if block.ParentHash() != currentBlock.Hash() { - if err = bc.reorg(currentBlock, block.Header()); err != nil { - return NonStatTy, err - } - } - status = CanonStatTy - } else { - status = SideStatTy - } - - if status == CanonStatTy { - bc.writeHeadBlock(block) - - bc.chainFeed.Send(ChainEvent{ - Header: block.Header(), - Receipts: receipts, - Transactions: block.Transactions(), - }) - - if len(logs) > 0 { - bc.logsFeed.Send(logs) - } - if len(stateSyncLogs) > 0 { - bc.logsFeed.Send(stateSyncLogs) - } - if emitHeadEvent { - bc.chainHeadFeed.Send(ChainHeadEvent{Header: block.Header()}) - bc.stateSyncMu.RLock() - for _, data := range bc.GetStateSync() { - bc.stateSyncFeed.Send(StateSyncEvent{Data: data}) - } - bc.stateSyncMu.RUnlock() - } - } else { - bc.chainSideFeed.Send(ChainSideEvent{Header: block.Header()}) - - bc.chain2HeadFeed.Send(Chain2HeadEvent{ - Type: Chain2HeadForkEvent, - NewChain: []*types.Header{block.Header()}, - }) - } - - return status, nil -} - -// recoverDelayedSRC re-executes the head block to recover the FlatDiff -// and spawn the SRC goroutine after a crash. This is needed because -// under delayed SRC the background goroutine may not have finished -// (or its results may not have been journaled) before the crash. -func (bc *BlockChain) recoverDelayedSRC(head *types.Header) { - block := bc.GetBlock(head.Hash(), head.Number.Uint64()) - if block == nil { - log.Error("Delayed SRC recovery: head block not found", "number", head.Number, "hash", head.Hash()) - return - } - - // head.Root = root_{N-1} under delayed SRC; HasState already confirmed it's available. - statedb, err := bc.StateAt(head.Root) - if err != nil { - log.Error("Delayed SRC recovery: failed to open state", "root", head.Root, "err", err) - return - } - - _, err = bc.processor.Process(block, statedb, bc.cfg.VmConfig, nil, context.Background()) - if err != nil { - log.Error("Delayed SRC recovery: block re-execution failed", "number", head.Number, "err", err) - return - } - - flatDiff := statedb.CommitSnapshot(bc.chainConfig.IsEIP158(head.Number)) - - bc.lastFlatDiffMu.Lock() - bc.lastFlatDiff = flatDiff - bc.lastFlatDiffBlockHash = block.Hash() - bc.lastFlatDiffMu.Unlock() - - bc.spawnSRCGoroutine(block, head.Root, flatDiff) - log.Info("Delayed SRC recovery: re-executed head block", "number", head.Number, "hash", head.Hash()) -} - -// GetPostStateRoot returns the actual post-execution state root for the given -// block. It checks, in order: -// -// 1. The in-flight SRC goroutine (blocks until it finishes). -// 2. The canonical child's header (block[N+1].Root == root_N by protocol invariant). -// 3. The persisted post-state root key-value store. -// 4. For pre-fork blocks, header.Root is the block's own post-execution root. -func (bc *BlockChain) GetPostStateRoot(blockHash common.Hash) common.Hash { - // 1. Check in-flight goroutine. - bc.pendingSRCMu.Lock() - pending := bc.pendingSRC - bc.pendingSRCMu.Unlock() - - if pending != nil && pending.blockHash == blockHash { - pending.wg.Wait() - if pending.err != nil { - log.Error("Delayed SRC goroutine failed", "blockHash", blockHash, "err", pending.err) - return common.Hash{} - } - return pending.root - } - - // 2-4. No in-flight goroutine; resolve from on-chain data. - header := bc.GetHeaderByHash(blockHash) - if header == nil { - return common.Hash{} - } - if bc.chainConfig.Bor == nil || !bc.chainConfig.Bor.IsDelayedSRC(header.Number) { - return header.Root - } - child := bc.GetHeaderByNumber(header.Number.Uint64() + 1) - if child != nil && child.ParentHash == blockHash { - return child.Root - } - return rawdb.ReadPostStateRoot(bc.db, blockHash) -} - -// PostExecutionStateAt returns a StateDB representing the post-execution state -// of the given block header. Under delayed SRC, if the FlatDiff for this block -// is still cached (i.e. this is the chain head), it returns a non-blocking -// overlay state via NewWithFlatBase — matching the miner's approach. -// Otherwise it falls back to resolving the actual state root (which may block -// if the background SRC goroutine is still running). -func (bc *BlockChain) PostExecutionStateAt(header *types.Header) (*state.StateDB, error) { - // Fast path: if delayed SRC is active and we have the FlatDiff for this - // block, use it as an overlay on top of header.Root (= root_{N-1}). - if bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsDelayedSRC(header.Number) { - bc.lastFlatDiffMu.RLock() - flatDiff := bc.lastFlatDiff - flatDiffHash := bc.lastFlatDiffBlockHash - bc.lastFlatDiffMu.RUnlock() - - if flatDiff != nil && flatDiffHash == header.Hash() { - return state.NewWithFlatBase(header.Root, bc.statedb, flatDiff) - } - } - - // Slow path: resolve the actual post-execution root. - // For delayed-SRC blocks this may block on the background goroutine. - // For pre-fork blocks, GetPostStateRoot returns common.Hash{} and we - // use header.Root directly. - root := header.Root - if r := bc.GetPostStateRoot(header.Hash()); r != (common.Hash{}) { - root = r - } - return bc.StateAt(root) -} - -// expectedPreStateRoot returns the parent header's on-chain Root field. -// This is what witness.Root() (= Headers[0].Root) should equal — it validates -// that the witness carries the correct parent header. -// -// Note: under delayed SRC, parentHeader.Root = root_{N-2}, not root_{N-1}. -// The actual pre-state root validation (block.Root() == root_{N-1}) is done -// separately in writeBlockAndSetHead. -func (bc *BlockChain) expectedPreStateRoot(block *types.Block) (common.Hash, error) { - parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) - if parent == nil { - return common.Hash{}, fmt.Errorf("parent header not found: %s (block %d)", block.ParentHash(), block.NumberU64()) - } - return parent.Root, nil -} - -// GetDelayedWitnessForBlock returns the stateless witness for block blockHash -// that was built as a byproduct of the delayed SRC goroutine. It blocks until -// the goroutine finishes, identical in structure to GetPostStateRoot. -// Returns nil if the witness was not built (e.g. pre-fork block or goroutine -// failure) or if the goroutine for blockHash is no longer in flight. -func (bc *BlockChain) GetDelayedWitnessForBlock(blockHash common.Hash) *stateless.Witness { - bc.pendingSRCMu.Lock() - pending := bc.pendingSRC - bc.pendingSRCMu.Unlock() - - if pending != nil && pending.blockHash == blockHash { - pending.wg.Wait() - if pending.err != nil { - return nil - } - return pending.witness - } - // Witness is not retained after the goroutine is superseded; callers - // that need it must request it before the next block's goroutine starts. - return nil -} - -// spawnSRCGoroutine launches a background goroutine that computes the actual -// state root for block by replaying flatDiff on top of parentRoot. -// The result is stored in pending.root; pending.wg is decremented when finished. -// As a byproduct of the MPT hashing, a complete witness for stateless execution -// of block is built and stored in pending.witness. -func (bc *BlockChain) spawnSRCGoroutine(block *types.Block, parentRoot common.Hash, flatDiff *state.FlatDiff) { - pending := &pendingSRCState{ - blockHash: block.Hash(), - blockNumber: block.NumberU64(), - } - - bc.pendingSRCMu.Lock() - bc.pendingSRC = pending - bc.pendingSRCMu.Unlock() - - deleteEmptyObjects := bc.chainConfig.IsEIP158(block.Number()) - isCancun := bc.chainConfig.IsCancun(block.Number()) - - // bc.wg.Go handles Add(1)/Done() for graceful shutdown tracking. - // pending.wg tracks completion for GetPostStateRoot callers. - pending.wg.Add(1) - bc.wg.Go(func() { - defer pending.wg.Done() - - // Create a snapshot-less database so that all account and storage - // reads go directly through the MPT. This ensures the prevalueTracer - // on each trie captures every intermediate node, which is later - // flushed into the witness. Using the snapshot would bypass the trie - // and leave those proof-path nodes out of the witness. - // noSnapDB := state.NewDatabase(bc.statedb.TrieDB(), nil) - tmpDB, err := state.New(parentRoot, bc.statedb) - if err != nil { - log.Error("Delayed SRC: failed to open tmpDB", "parentRoot", parentRoot, "err", err) - pending.err = err - return - } - - // Attach a witness so that IntermediateRoot captures all root_{N-1} - // trie nodes as a byproduct of the MPT hashing. parentRoot is the - // correct pre-state root for stateless execution of block N. - witness, witnessErr := stateless.NewWitness(block.Header(), bc) - if witnessErr != nil { - log.Warn("Delayed SRC: failed to create witness", "block", block.NumberU64(), "err", witnessErr) - } else { - // Embed parentRoot as the pre-state root. NewWitness zeroed context.Root; - // a non-zero value here signals delayed SRC to witness.Root(). - witness.Header().Root = parentRoot - tmpDB.SetWitness(witness) - } - - // Mark all write mutations as dirty. - tmpDB.ApplyFlatDiffForCommit(flatDiff) - - // Load read-only accounts and storage slots so that the statedb - // has stateObjects (with originStorage) for every address and slot - // that was accessed during the original block execution. These reads - // go through the reader's trie; IntermediateRoot (called by - // CommitWithUpdate) then re-walks read-only accounts and storage - // through s.trie / obj.trie to capture proof-path nodes for the - // witness when no prefetcher is present. - for _, addr := range flatDiff.ReadSet { - tmpDB.GetBalance(addr) - for _, slot := range flatDiff.ReadStorage[addr] { - tmpDB.GetState(addr, slot) - } - } - // Load read-only storage for mutated accounts (slots in originStorage - // that aren't in pendingStorage). These reads capture trie nodes that - // stateless execution needs (e.g., span commit reads validator contract - // slots it doesn't write). - for addr := range flatDiff.Accounts { - for _, slot := range flatDiff.ReadStorage[addr] { - tmpDB.GetState(addr, slot) - } - } - - // Pure-destruct accounts (created AND destroyed within block N) are - // absent from root_{N-1}. SelfDestruct returns early for them, so - // CommitWithUpdate never traverses their account trie paths. The - // stateless node still needs these paths for deleteStateObject. - // Force a read to create stateObjects; IntermediateRoot captures - // the account trie nodes via the no-prefetcher witness path. - for addr := range flatDiff.Destructs { - if _, resurrected := flatDiff.Accounts[addr]; !resurrected { - tmpDB.GetBalance(addr) - } - } - - // Non-existent accounts accessed during execution (e.g., by - // state-sync EVM calls) need proof-of-absence trie nodes in the - // witness. GetBalance triggers a trie read through the reader; - // IntermediateRoot (called by CommitWithUpdate) then walks - // these paths through s.trie to capture the proof nodes. - for _, addr := range flatDiff.NonExistentReads { - tmpDB.GetBalance(addr) - } - - root, stateUpdate, err := tmpDB.CommitWithUpdate(block.NumberU64(), deleteEmptyObjects, isCancun) - if err != nil { - log.Error("Delayed SRC: CommitWithUpdate failed", "block", block.NumberU64(), "err", err) - pending.err = err - return - } - - if bc.stateSizer != nil { - bc.stateSizer.Notify(stateUpdate) - } - - // Write the complete witness to the database and announce it. - // This must happen after CommitWithUpdate so that all trie nodes - // (for both write and read-set accounts) have been accumulated. - if witness != nil { - var witBuf bytes.Buffer - if err := witness.EncodeRLP(&witBuf); err != nil { - log.Error("Delayed SRC: failed to encode witness", "block", block.NumberU64(), "err", err) - } else { - bc.WriteWitness(bc.db, block.Hash(), witBuf.Bytes()) - bc.witnessFeed.Send(WitnessReadyEvent{Block: block, Witness: witness}) - } - } - - // Persist so GetPostStateRoot can find this root on restart - // even before a child block is imported. - rawdb.WritePostStateRoot(bc.db, block.Hash(), root) - - // Set root and witness before wg.Done() so callers see them. - pending.root = root - pending.witness = witness - }) -} - // WriteBlockAndSetHead writes the given block and all associated state to the database, // and applies the block as the new chain head. func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { @@ -2869,52 +2449,6 @@ func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead. // This function expects the chain mutex to be held. func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool, stateless bool) (status WriteStatus, err error) { - // Under delayed SRC: CommitWithUpdate is deferred — either to a background - // goroutine (miner/import path) or handled inline (stateless path). - if bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsDelayedSRC(block.Number()) { - parentRoot := bc.GetPostStateRoot(block.ParentHash()) - if parentRoot == (common.Hash{}) { - return NonStatTy, fmt.Errorf("delayed state root unavailable for parent %s", block.ParentHash()) - } - // Validate: block.Root() must equal the parent's computed post-state root. - // This mirrors ValidateState (block_validator.go:178) for stateless nodes, - // where ValidateState returns early (stateless=true skips root checks). - if block.Root() != parentRoot { - return NonStatTy, fmt.Errorf("delayed SRC state root mismatch: header.Root=%x, computedParentRoot=%x, block=%d", - block.Root(), parentRoot, block.NumberU64()) - } - - if stateless { - // Stateless path: the state root is cheap to compute on the - // witness-backed trie, so there's no need to defer it. Record - // the cross-root for the next block's validation, then fall - // through to writeBlockWithState which naturally handles code - // persistence, witness writing, etc. - crossRoot := state.IntermediateRoot(bc.chainConfig.IsEIP158(block.Number())) - pending := &pendingSRCState{ - blockHash: block.Hash(), - blockNumber: block.NumberU64(), - root: crossRoot, - } - // pending.wg is at zero, so wg.Wait() returns immediately. - bc.pendingSRCMu.Lock() - bc.pendingSRC = pending - bc.pendingSRCMu.Unlock() - // Persist to DB so the root survives reorgs and restarts. - rawdb.WritePostStateRoot(bc.db, block.Hash(), crossRoot) - // Fall through to writeBlockWithState below. - } else { - // Full-node path: defer CommitWithUpdate to a background goroutine. - flatDiff := state.CommitSnapshot(bc.chainConfig.IsEIP158(block.Number())) - bc.lastFlatDiffMu.Lock() - bc.lastFlatDiff = flatDiff - bc.lastFlatDiffBlockHash = block.Hash() - bc.lastFlatDiffMu.Unlock() - bc.spawnSRCGoroutine(block, parentRoot, flatDiff) - return bc.writeBlockDataAndSetHead(block, receipts, logs, state, emitHeadEvent) - } - } - stateSyncLogs, err := bc.writeBlockWithState(block, receipts, logs, state) if err != nil { return NonStatTy, err @@ -3223,12 +2757,11 @@ func (bc *BlockChain) insertChainStatelessParallel(chain types.Blocks, witnesses // Validate witness pre-state for this block (if present) before writing if i < len(witnesses) && witnesses[i] != nil { - expectedRoot, err := bc.expectedPreStateRoot(block) - if err != nil { - stopHeaders() - return int(processed.Load()), fmt.Errorf("post-import witness validation failed for block %d: %w", block.NumberU64(), err) + var headerReader stateless.HeaderReader = bc + if witnesses[i].HeaderReader() != nil { + headerReader = witnesses[i].HeaderReader() } - if err := stateless.ValidateWitnessPreState(witnesses[i], expectedRoot); err != nil { + if err := stateless.ValidateWitnessPreState(witnesses[i], headerReader); err != nil { stopHeaders() return int(processed.Load()), fmt.Errorf("post-import witness validation failed for block %d: %w", block.NumberU64(), err) } @@ -3388,11 +2921,11 @@ func (bc *BlockChain) insertChainStatelessSequential(chain types.Blocks, witness // End-of-batch witness validation for i, block := range chain { if i < len(witnesses) && witnesses[i] != nil { - expectedRoot, err := bc.expectedPreStateRoot(block) - if err != nil { - return int(processed.Load()), fmt.Errorf("post-import witness validation failed for block %d: %w", block.NumberU64(), err) + var headerReader stateless.HeaderReader = bc + if witnesses[i].HeaderReader() != nil { + headerReader = witnesses[i].HeaderReader() } - if err := stateless.ValidateWitnessPreState(witnesses[i], expectedRoot); err != nil { + if err := stateless.ValidateWitnessPreState(witnesses[i], headerReader); err != nil { return int(processed.Load()), fmt.Errorf("post-import witness validation failed for block %d: %w", block.NumberU64(), err) } } @@ -3565,25 +3098,6 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, // Track the singleton witness from this chain insertion (if any) var witness *stateless.Witness - // prevFlatDiff is the FlatDiff extracted from the previous block under delayed SRC. - // Carrying it across iterations lets block N+1 open state at parent.Root + flatDiff_N - // immediately, without waiting for the background goroutine to commit root_N. - // - // Seed from bc.lastFlatDiff when the first block in this batch is the direct - // successor of the block that produced lastFlatDiff. This handles the case - // where block N was processed in a previous insertChain call (or by the miner - // path) and block N+1 now arrives in a fresh call. Without seeding here, - // processBlock would open state at parent.Root = root_{N-1} (under delayed SRC) - // without the flatDiff_N overlay, yielding stale nonces and bad block errors. - var prevFlatDiff *state.FlatDiff - if bc.chainConfig.Bor != nil && len(chain) > 0 && bc.chainConfig.Bor.IsDelayedSRC(chain[0].Number()) { - bc.lastFlatDiffMu.RLock() - if bc.lastFlatDiffBlockHash == chain[0].ParentHash() { - prevFlatDiff = bc.lastFlatDiff - } - bc.lastFlatDiffMu.RUnlock() - } - // accumulator for canonical blocks var canonAccum []*types.Block @@ -3676,19 +3190,6 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, if parent == nil { parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) } - - isDelayedSRC := bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsDelayedSRC(block.Number()) - - // Under delayed SRC, parent.Root is the committed trie base (= root_{N-1} for block N). - // prevFlatDiff, if non-nil, carries block N-1's mutations as an in-memory overlay so - // block N's transaction execution can begin immediately without waiting for the - // background goroutine (G_{N-1}) to finish committing root_{N-1} to the path DB. - // The sync point (ValidateState → GetPostStateRoot) is deferred until - // AFTER transaction execution completes inside processBlock. - if !isDelayedSRC { - prevFlatDiff = nil // reset when leaving the delayed-SRC regime - } - statedb, err := state.New(parent.Root, bc.statedb) if err != nil { return nil, it.index, err @@ -3723,14 +3224,11 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, if witnesses != nil && len(witnesses) > it.processed()-1 && witnesses[it.processed()-1] != nil { // 1. Validate the witness. - expectedRoot, err := bc.expectedPreStateRoot(block) - if err != nil { - log.Error("Pre-state root unavailable for witness validation", "blockNumber", block.Number(), "blockHash", block.Hash(), "err", err) - bc.reportBlock(block, &ProcessResult{}, err) - followupInterrupt.Store(true) - return nil, it.index, fmt.Errorf("witness validation failed: %w", err) + var headerReader stateless.HeaderReader = bc + if witnesses[it.processed()-1].HeaderReader() != nil { + headerReader = witnesses[it.processed()-1].HeaderReader() } - if err := stateless.ValidateWitnessPreState(witnesses[it.processed()-1], expectedRoot); err != nil { + if err := stateless.ValidateWitnessPreState(witnesses[it.processed()-1], headerReader); err != nil { log.Error("Witness validation failed during chain insertion", "blockNumber", block.Number(), "blockHash", block.Hash(), "err", err) bc.reportBlock(block, &ProcessResult{}, err) followupInterrupt.Store(true) @@ -3751,7 +3249,7 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, } } - receipts, logs, usedGas, statedb, vtime, err := bc.processBlock(block, parent, prevFlatDiff, witness, &followupInterrupt) + receipts, logs, usedGas, statedb, vtime, err := bc.ProcessBlock(block, parent, witness, &followupInterrupt) bc.statedb.TrieDB().SetReadBackend(nil) bc.statedb.EnableSnapInReader() activeState = statedb @@ -3810,48 +3308,11 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, return nil, it.index, whitelist.ErrMismatch } - if isDelayedSRC { - // ValidateState (inside processBlock) was the sync point: it called - // GetPostStateRoot(block.ParentHash()) and waited for G_{N-1}. - // pendingSRC still points to G_{N-1}'s entry; reading from the closed - // done-channel is instant — no second goroutine barrier here. - actualParentRoot := bc.GetPostStateRoot(block.ParentHash()) - if actualParentRoot == (common.Hash{}) { - return nil, it.index, fmt.Errorf("delayed state root unavailable for parent %s", block.ParentHash()) - } - - // Extract flat diff cheaply (~1ms, no MPT hashing) and spawn the - // background goroutine that will compute and persist root_N. - flatDiff := statedb.CommitSnapshot(bc.chainConfig.IsEIP158(block.Number())) - bc.spawnSRCGoroutine(block, actualParentRoot, flatDiff) - - // Pass the flat diff to the next iteration so it can open state at - // parent.Root (= root_{N-1}) + flatDiff overlay, starting tx execution - // concurrently with this goroutine's commitAndFlush. - prevFlatDiff = flatDiff - - // Also update lastFlatDiff so the local miner uses the correct pre-state - // when building the next block after importing this one from a peer. - // Without this, a validator that imports a peer block via insertChain - // keeps a stale lastFlatDiff and mines the next block from the wrong - // base state (missing all mutations from the imported block). - bc.lastFlatDiffMu.Lock() - bc.lastFlatDiff = flatDiff - bc.lastFlatDiffBlockHash = block.Hash() - bc.lastFlatDiffMu.Unlock() - - if !setHead { - _, err = bc.writeBlockData(block, receipts, logs, statedb) - } else { - status, err = bc.writeBlockDataAndSetHead(block, receipts, logs, statedb, false) - } + if !setHead { + // Don't set the head, only insert the block + _, err = bc.writeBlockWithState(block, receipts, logs, statedb) } else { - if !setHead { - // Don't set the head, only insert the block - _, err = bc.writeBlockWithState(block, receipts, logs, statedb) - } else { - status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false, false) - } + status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false, false) } followupInterrupt.Store(true) @@ -3888,7 +3349,7 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, if !setHead { // After merge we expect few side chains. Simply count - // all blocks the CL gives us for GC processing time. + // all blocks the CL gives us for GC processing time bc.gcproc += proctime return witness, it.index, nil // Direct block insertion of a single block } @@ -3910,7 +3371,7 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, lastCanon = block - // Only count canonical blocks for GC processing time. + // Only count canonical blocks for GC processing time bc.gcproc += proctime case SideStatTy: @@ -3952,10 +3413,10 @@ func (bpr *blockProcessingResult) Witness() *stateless.Witness { return bpr.witness } -// processBlockStateful executes and validates the given block. If there was no error +// ProcessBlock executes and validates the given block. If there was no error // it writes the block and associated state to database. // nolint : unused -func (bc *BlockChain) processBlockStateful(block *types.Block, statedb *state.StateDB, start time.Time, setHead bool, diskdb ethdb.Database) (_ *blockProcessingResult, blockEndErr error) { +func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, start time.Time, setHead bool, diskdb ethdb.Database) (_ *blockProcessingResult, blockEndErr error) { startTime := time.Now() if bc.logger != nil && bc.logger.OnBlockStart != nil { td := bc.GetTd(block.ParentHash(), block.NumberU64()-1) @@ -4017,9 +3478,7 @@ func (bc *BlockChain) processBlockStateful(block *types.Block, statedb *state.St if err != nil { return nil, fmt.Errorf("stateless self-validation failed: %v", err) } - // Under delayed SRC, block.Root() = parent's state root, not this block's; - // skip the equality check in that case. - if (bc.chainConfig.Bor == nil || !bc.chainConfig.Bor.IsDelayedSRC(block.Number())) && crossStateRoot != block.Root() { + if crossStateRoot != block.Root() { return nil, fmt.Errorf("stateless self-validation root mismatch (cross: %x local: %x)", crossStateRoot, block.Root()) } if crossReceiptRoot != block.ReceiptHash() { @@ -4537,21 +3996,6 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Header) error // Release the tx-lookup lock after mutation. bc.txLookupLock.Unlock() - // Delayed-SRC cleanup: if the in-flight SRC goroutine is for a dropped block, - // clear it so GetPostStateRoot falls back to the canonical child-header lookup. - if bc.chainConfig.Bor != nil { - bc.pendingSRCMu.Lock() - if bc.pendingSRC != nil { - for _, h := range oldChain { - if bc.pendingSRC.blockHash == h.Hash() { - bc.pendingSRC = nil - break - } - } - } - bc.pendingSRCMu.Unlock() - } - return nil } @@ -4797,25 +4241,293 @@ func (bc *BlockChain) SubscribeChain2HeadEvent(ch chan<- Chain2HeadEvent) event. return bc.scope.Track(bc.chain2HeadFeed.Subscribe(ch)) } -// SubscribeWitnessReadyEvent registers a subscription for WitnessReadyEvent, -// which is fired after the delayed-SRC goroutine finishes and the complete -// witness has been written to the database. -func (bc *BlockChain) SubscribeWitnessReadyEvent(ch chan<- WitnessReadyEvent) event.Subscription { - return bc.scope.Track(bc.witnessFeed.Subscribe(ch)) +// WriteBlockAndSetHeadPipelined writes block data (header, body, receipts) to +// the database and sets it as the chain head, WITHOUT committing trie state. +// The state commit is handled separately by the SRC goroutine that already +// called CommitWithUpdate. This avoids the "layer stale" error that occurs +// when two CommitWithUpdate calls diverge from the same parent root. +func (bc *BlockChain) WriteBlockAndSetHeadPipelined(block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB, emitHeadEvent bool, witnessBytes []byte) (WriteStatus, error) { + if !bc.chainmu.TryLock() { + return NonStatTy, errChainStopped + } + defer bc.chainmu.Unlock() + + // Write block data without state commit + ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) + if ptd == nil { + return NonStatTy, consensus.ErrUnknownAncestor + } + externTd := new(big.Int).Add(block.Difficulty(), ptd) + + blockBatch := bc.db.NewBatch() + rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) + rawdb.WriteBlock(blockBatch, block) + rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) + + // Handle bor state sync logs + blockLogs := statedb.Logs() + var stateSyncLogs []*types.Log + if len(blockLogs) > 0 { + if !(bc.chainConfig.Bor != nil && bc.chainConfig.Bor.IsMadhugiri(block.Number())) && len(blockLogs) > len(logs) { + sort.SliceStable(blockLogs, func(i, j int) bool { + return blockLogs[i].Index < blockLogs[j].Index + }) + stateSyncLogs = blockLogs[len(logs):] + types.DeriveFieldsForBorLogs(stateSyncLogs, block.Hash(), block.NumberU64(), uint(len(receipts)), uint(len(logs))) + + var cumulativeGasUsed uint64 + if len(receipts) > 0 { + cumulativeGasUsed = receipts[len(receipts)-1].CumulativeGasUsed + } + rawdb.WriteBorReceipt(blockBatch, block.Hash(), block.NumberU64(), &types.ReceiptForStorage{ + Status: types.ReceiptStatusSuccessful, + Logs: stateSyncLogs, + CumulativeGasUsed: cumulativeGasUsed, + }) + rawdb.WriteBorTxLookupEntry(blockBatch, block.Hash(), block.NumberU64()) + } + } + + rawdb.WritePreimages(blockBatch, statedb.Preimages()) + + // Write the witness produced by the SRC goroutine. The execution-side + // witness is incomplete for pipelined blocks (FlatDiff overlay bypasses + // the trie), so we use the SRC goroutine's witness which captures all + // MPT proof nodes during CommitWithUpdate. + if len(witnessBytes) > 0 { + bc.WriteWitness(block.Hash(), witnessBytes) + } + + if err := blockBatch.Write(); err != nil { + log.Crit("Failed to write block into disk", "err", err) + } + rawdb.WriteBytecodeSyncLastBlock(bc.db, block.NumberU64()) + + // Set head and emit events (same logic as writeBlockAndSetHead) + currentBlock := bc.CurrentBlock() + reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header()) + if err != nil { + return NonStatTy, err + } + + var status WriteStatus + if reorg { + if block.ParentHash() != currentBlock.Hash() { + if err := bc.reorg(currentBlock, block.Header()); err != nil { + return NonStatTy, err + } + } + status = CanonStatTy + } else { + status = SideStatTy + } + + if status == CanonStatTy { + bc.writeHeadBlock(block) + + bc.chainFeed.Send(ChainEvent{ + Header: block.Header(), + Receipts: receipts, + Transactions: block.Transactions(), + }) + if len(logs) > 0 { + bc.logsFeed.Send(logs) + } + if len(stateSyncLogs) > 0 { + bc.logsFeed.Send(stateSyncLogs) + } + if emitHeadEvent { + bc.chainHeadFeed.Send(ChainHeadEvent{Header: block.Header()}) + bc.stateSyncMu.RLock() + for _, data := range bc.GetStateSync() { + bc.stateSyncFeed.Send(StateSyncEvent{Data: data}) + } + bc.stateSyncMu.RUnlock() + } + } else { + bc.chainSideFeed.Send(ChainSideEvent{Header: block.Header()}) + bc.chain2HeadFeed.Send(Chain2HeadEvent{ + Type: Chain2HeadForkEvent, + NewChain: []*types.Header{block.Header()}, + }) + } + + return status, nil +} + +// --- Pipelined SRC methods --- + +// PostExecutionStateAt returns a StateDB representing the post-execution state +// of the given block header. Under pipelined SRC, if the FlatDiff for this block +// is still cached (i.e. this is the chain head), it returns a non-blocking +// overlay state via NewWithFlatBase. Otherwise it falls back to resolving the +// actual state root via StateAt. +// +// This is used by the txpool and RPC layer to get correct state when the chain +// head was produced via the pipeline (where the committed trie root may lag +// behind the actual post-execution state). +func (bc *BlockChain) PostExecutionStateAt(header *types.Header) (*state.StateDB, error) { + // Fast path: if we have the FlatDiff for this block, use it as an overlay. + bc.lastFlatDiffMu.RLock() + flatDiff := bc.lastFlatDiff + flatDiffHash := bc.lastFlatDiffBlockHash + bc.lastFlatDiffMu.RUnlock() + + if flatDiff != nil && flatDiffHash == header.Hash() { + return state.NewWithFlatBase(header.Root, bc.statedb, flatDiff) + } + + // Slow path: use the committed state root directly. + return bc.StateAt(header.Root) +} + +// SpawnSRCGoroutine launches a background goroutine that computes the actual +// state root for block by replaying flatDiff on top of parentRoot. +// The result is stored in pending.root; pending.wg is decremented when finished. +func (bc *BlockChain) SpawnSRCGoroutine(block *types.Block, parentRoot common.Hash, flatDiff *state.FlatDiff) { + pending := &pendingSRCState{ + blockHash: block.Hash(), + blockNumber: block.NumberU64(), + } + + bc.pendingSRCMu.Lock() + bc.pendingSRC = pending + bc.pendingSRCMu.Unlock() + + deleteEmptyObjects := bc.chainConfig.IsEIP158(block.Number()) + + pending.wg.Add(1) + bc.wg.Add(1) + + go func() { + defer bc.wg.Done() + defer pending.wg.Done() + + tmpDB, err := state.New(parentRoot, bc.statedb) + if err != nil { + log.Error("Pipelined SRC: failed to open tmpDB", "parentRoot", parentRoot, "err", err) + pending.err = err + return + } + + // Attach a witness so that IntermediateRoot (called by CommitWithUpdate) + // captures all trie nodes as a byproduct of the MPT hashing. The witness + // built during tx execution is incomplete because FlatDiff overlay accounts + // bypass the trie, so their MPT proof nodes are never captured there. + // The complete witness is built here instead. + witness, witnessErr := stateless.NewWitness(block.Header(), bc) + if witnessErr != nil { + log.Warn("Pipelined SRC: failed to create witness", "block", block.NumberU64(), "err", witnessErr) + } else { + tmpDB.SetWitness(witness) + } + + // Replay all write mutations as dirty state via the journal. + tmpDB.ApplyFlatDiffForCommit(flatDiff) + + // Load read-only accounts and storage slots so that the trie captures + // their proof-path nodes for witness building. + for _, addr := range flatDiff.ReadSet { + tmpDB.GetBalance(addr) + for _, slot := range flatDiff.ReadStorage[addr] { + tmpDB.GetState(addr, slot) + } + } + // Load read-only storage for mutated accounts. + for addr := range flatDiff.Accounts { + for _, slot := range flatDiff.ReadStorage[addr] { + tmpDB.GetState(addr, slot) + } + } + // Load pure-destruct accounts for witness proof-path nodes. + for addr := range flatDiff.Destructs { + if _, resurrected := flatDiff.Accounts[addr]; !resurrected { + tmpDB.GetBalance(addr) + } + } + // Load non-existent accounts for proof-of-absence nodes. + for _, addr := range flatDiff.NonExistentReads { + tmpDB.GetBalance(addr) + } + + root, stateUpdate, err := tmpDB.CommitWithUpdate(block.NumberU64(), deleteEmptyObjects, bc.chainConfig.IsCancun(block.Number())) + if err != nil { + log.Error("Pipelined SRC: CommitWithUpdate failed", "block", block.NumberU64(), "err", err) + pending.err = err + return + } + + if bc.stateSizer != nil { + bc.stateSizer.Notify(stateUpdate) + } + + // Encode the complete witness. This must happen after CommitWithUpdate + // so that all trie nodes (for both write and read-set accounts) have + // been accumulated in the witness. + // NOTE: We do NOT write to DB here because the block hasn't been sealed + // yet — the final block hash (which includes the Seal signature) is not + // known. The caller retrieves the encoded witness via WaitForSRC and + // writes it in resultLoop with the correct sealed hash. + if witness != nil { + var witBuf bytes.Buffer + if err := witness.EncodeRLP(&witBuf); err != nil { + log.Error("Pipelined SRC: failed to encode witness", "block", block.NumberU64(), "err", err) + } else { + pending.witness = witBuf.Bytes() + } + } + + pending.root = root + }() +} + +// WaitForSRC blocks until the pending SRC goroutine completes and returns the +// computed state root and RLP-encoded witness. The witness may be nil if witness +// creation failed or was not applicable. Returns an error if the goroutine +// failed or no SRC is pending. +func (bc *BlockChain) WaitForSRC() (common.Hash, []byte, error) { + bc.pendingSRCMu.Lock() + pending := bc.pendingSRC + bc.pendingSRCMu.Unlock() + + if pending == nil { + return common.Hash{}, nil, errors.New("no pending SRC goroutine") + } + + pending.wg.Wait() + if pending.err != nil { + return common.Hash{}, nil, pending.err + } + return pending.root, pending.witness, nil } // GetLastFlatDiff returns the FlatDiff captured from the most recently committed -// block's CommitSnapshot. Under delayed SRC, the miner uses this to open a -// NewWithFlatBase statedb without waiting for the current SRC goroutine. +// block. The miner uses this to open a NewWithFlatBase StateDB without waiting +// for the current SRC goroutine to finish. func (bc *BlockChain) GetLastFlatDiff() *state.FlatDiff { bc.lastFlatDiffMu.RLock() defer bc.lastFlatDiffMu.RUnlock() return bc.lastFlatDiff } -// StateAtWithFlatDiff opens a statedb at baseRoot with flatDiff as an in-memory -// overlay, equivalent to state.NewWithFlatBase. Used by the miner under delayed -// SRC to begin executing block N+1 before G_N has finished. +// GetLastFlatDiffBlockHash returns the block hash associated with the cached FlatDiff. +func (bc *BlockChain) GetLastFlatDiffBlockHash() common.Hash { + bc.lastFlatDiffMu.RLock() + defer bc.lastFlatDiffMu.RUnlock() + return bc.lastFlatDiffBlockHash +} + +// SetLastFlatDiff stores the FlatDiff and its source block hash. +func (bc *BlockChain) SetLastFlatDiff(diff *state.FlatDiff, blockHash common.Hash) { + bc.lastFlatDiffMu.Lock() + bc.lastFlatDiff = diff + bc.lastFlatDiffBlockHash = blockHash + bc.lastFlatDiffMu.Unlock() +} + +// StateAtWithFlatDiff opens a StateDB at baseRoot with flatDiff as an in-memory +// overlay, allowing reads to see the post-state of the block that produced +// flatDiff without waiting for its state root to be committed to the trie DB. func (bc *BlockChain) StateAtWithFlatDiff(baseRoot common.Hash, flatDiff *state.FlatDiff) (*state.StateDB, error) { return state.NewWithFlatBase(baseRoot, bc.statedb, flatDiff) } @@ -4829,26 +4541,21 @@ func (bc *BlockChain) ProcessBlockWithWitnesses(block *types.Block, witness *sta // Validate witness. // During parallel import, defer pre-state validation to the end of the batch. if !bc.parallelStatelessImportEnabled.Load() { - expectedRoot, err := bc.expectedPreStateRoot(block) - if err != nil { - log.Error("Pre-state root unavailable for witness validation", "blockNumber", block.Number(), "blockHash", block.Hash(), "err", err) - return nil, nil, fmt.Errorf("witness validation failed: %w", err) + var headerReader stateless.HeaderReader + if witness.HeaderReader() != nil { + headerReader = witness.HeaderReader() + } else { + headerReader = bc } - if err := stateless.ValidateWitnessPreState(witness, expectedRoot); err != nil { + if err := stateless.ValidateWitnessPreState(witness, headerReader); err != nil { log.Error("Witness validation failed during stateless processing", "blockNumber", block.Number(), "blockHash", block.Hash(), "err", err) return nil, nil, fmt.Errorf("witness validation failed: %w", err) } } - // Remove the receipt hash so ExecuteStateless can recompute it from scratch. - // Under delayed SRC, block.Root() carries the pre-state root for this block - // (the actual post-execution state root of the parent); preserve it so that - // ExecuteStateless can use it to open the correct pre-execution state. - // For pre-fork blocks, zero Root too so ExecuteStateless recomputes it. + // Remove critical computed fields from the block to force true recalculation context := block.Header() - if bc.chainConfig.Bor == nil || !bc.chainConfig.Bor.IsDelayedSRC(block.Number()) { - context.Root = common.Hash{} - } + context.Root = common.Hash{} context.ReceiptHash = common.Hash{} task := types.NewBlockWithHeader(context).WithBody(*block.Body()) @@ -4863,9 +4570,7 @@ func (bc *BlockChain) ProcessBlockWithWitnesses(block *types.Block, witness *sta log.Error("Stateless self-validation failed", "block", block.Number(), "hash", block.Hash(), "error", err) return nil, nil, err } - // Under delayed SRC, block.Root() = parent's state root, not this block's; - // skip the equality check in that case. - if (bc.chainConfig.Bor == nil || !bc.chainConfig.Bor.IsDelayedSRC(block.Number())) && crossStateRoot != block.Root() { + if crossStateRoot != block.Root() { log.Error("Stateless self-validation root mismatch", "block", block.Number(), "hash", block.Hash(), "cross", crossStateRoot, "local", block.Root()) err = fmt.Errorf("%w: remote %x != local %x", ErrStatelessStateRootMismatch, block.Root(), crossStateRoot) return nil, nil, err diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 67e99f6b54..58a8a49e13 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -684,13 +684,6 @@ type BorStateSyncer interface { SubscribeStateSyncEvent(ch chan<- StateSyncEvent) event.Subscription } -// DelayedSRCReader is implemented by BlockChain and allows consensus code to -// retrieve the actual post-execution state root stored separately under the -// delayed-SRC protocol (where header.Root holds the parent's state root). -type DelayedSRCReader interface { - GetPostStateRoot(blockHash common.Hash) common.Hash -} - // SetStateSync set sync data in state_data func (bc *BlockChain) SetStateSync(stateData []*types.StateSyncData) { bc.stateSyncMu.Lock() diff --git a/core/events.go b/core/events.go index a35ab615cf..fadecdedf7 100644 --- a/core/events.go +++ b/core/events.go @@ -36,13 +36,6 @@ type NewMinedBlockEvent struct { SealedAt time.Time // time when WriteBlockAndSetHead completed, used to measure broadcast latency } -// WitnessReadyEvent is posted when a delayed-SRC witness has been fully -// computed and written to the database, signalling that it can be broadcast. -type WitnessReadyEvent struct { - Block *types.Block - Witness *stateless.Witness -} - // RemovedLogsEvent is posted when a reorg happens type RemovedLogsEvent struct{ Logs []*types.Log } diff --git a/core/evm.go b/core/evm.go index 68b2a52ea2..de46fac729 100644 --- a/core/evm.go +++ b/core/evm.go @@ -19,6 +19,7 @@ package core import ( "math/big" "sync" + "sync/atomic" "github.com/holiman/uint256" @@ -155,6 +156,64 @@ func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash } } +// SpeculativeGetHashFn returns a GetHashFunc for use during pipelined SRC +// speculative execution of block N+1, where block N's hash is not yet known +// (SRC(N) is still computing root_N). +// +// It uses three-tier resolution: +// - Tier 1 (n == pendingBlockN): lazy-resolves by calling srcDone(), which +// blocks until SRC(N) completes and returns hash(block_N). Cached after +// first call. +// - Tier 2 (n == pendingBlockN-1): returns blockN1Header.Hash() directly. +// Block N-1 is fully committed and in the chain DB. +// - Tier 3 (n < pendingBlockN-1): delegates to GetHashFn anchored at +// block N-1. Its cache seeds from blockN1Header.ParentHash = hash(block_{N-2}), +// so index 0 gives BLOCKHASH(N-2), which is correct. +// +// srcDone is called at most once and must return hash(block_N) after SRC(N) +// completes. It may block. +func SpeculativeGetHashFn(blockN1Header *types.Header, chain ChainContext, + pendingBlockN uint64, srcDone func() common.Hash, blockhashNAccessed *atomic.Bool) func(uint64) common.Hash { + + blockN1Hash := blockN1Header.Hash() + + // olderFn handles blocks N-2 and below via the standard chain walk. + olderFn := GetHashFn(blockN1Header, chain) + + var resolvedBlockNHash common.Hash + var resolved bool + var resolveMu sync.Mutex + + return func(n uint64) common.Hash { + if n >= pendingBlockN+1 { + return common.Hash{} // future block + } + if n == pendingBlockN { + // Tier 1: lazy-resolve block N's hash. + // Flag that BLOCKHASH(N) was accessed — the resolved hash is + // pre-seal (no signature in Extra) and will differ from the + // final on-chain hash. The caller must abort speculative + // execution and fall back to the sequential path. + if blockhashNAccessed != nil { + blockhashNAccessed.Store(true) + } + resolveMu.Lock() + defer resolveMu.Unlock() + if !resolved { + resolvedBlockNHash = srcDone() + resolved = true + } + return resolvedBlockNHash + } + if n == pendingBlockN-1 { + // Tier 2: block N-1 is fully committed. + return blockN1Hash + } + // Tier 3: blocks N-2 and older via standard chain walk. + return olderFn(n) + } +} + // CanTransfer checks whether there are enough funds in the address' account to make a transfer. // This does not take the necessary gas in to account to make the transfer valid. func CanTransfer(db vm.StateDB, addr common.Address, amount *uint256.Int) bool { diff --git a/core/evm_speculative_test.go b/core/evm_speculative_test.go new file mode 100644 index 0000000000..29400be696 --- /dev/null +++ b/core/evm_speculative_test.go @@ -0,0 +1,232 @@ +package core + +import ( + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" +) + +// mockChainContext implements ChainContext for testing SpeculativeGetHashFn. +type mockChainContext struct { + headers map[uint64]*types.Header +} + +func (m *mockChainContext) Config() *params.ChainConfig { + return params.TestChainConfig +} + +func (m *mockChainContext) CurrentHeader() *types.Header { + return nil +} + +func (m *mockChainContext) GetHeader(hash common.Hash, number uint64) *types.Header { + return m.headers[number] +} + +func (m *mockChainContext) GetHeaderByNumber(number uint64) *types.Header { + return m.headers[number] +} + +func (m *mockChainContext) GetHeaderByHash(hash common.Hash) *types.Header { + for _, h := range m.headers { + if h.Hash() == hash { + return h + } + } + return nil +} + +func (m *mockChainContext) GetTd(hash common.Hash, number uint64) *big.Int { + return big.NewInt(1) +} + +func (m *mockChainContext) Engine() consensus.Engine { + return nil +} + +// buildChain builds a simple chain of headers from 0 to count-1. +func buildChain(count int) (*mockChainContext, []*types.Header) { + headers := make([]*types.Header, count) + chain := &mockChainContext{headers: make(map[uint64]*types.Header)} + + for i := 0; i < count; i++ { + h := &types.Header{ + Number: big.NewInt(int64(i)), + ParentHash: common.Hash{}, + Extra: []byte("test"), + } + if i > 0 { + h.ParentHash = headers[i-1].Hash() + } + headers[i] = h + chain.headers[uint64(i)] = h + } + + return chain, headers +} + +func TestSpeculativeGetHashFn_Tier1_LazyResolve(t *testing.T) { + chain, headers := buildChain(10) + + // Block N=9 is pending (SRC running), block N-1=8 is committed. + blockN1Header := headers[8] // block 8 + pendingBlockN := uint64(9) + expectedBlockNHash := common.HexToHash("0xdeadbeef") + + var srcCalled bool + srcDone := func() common.Hash { + srcCalled = true + return expectedBlockNHash + } + + fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, srcDone, nil) + + // Tier 1: BLOCKHASH(9) should lazy-resolve + result := fn(9) + if result != expectedBlockNHash { + t.Errorf("Tier 1: expected %x, got %x", expectedBlockNHash, result) + } + if !srcCalled { + t.Error("Tier 1: srcDone was not called") + } + + // Second call should return cached value without calling srcDone again + srcCalled = false + result = fn(9) + if result != expectedBlockNHash { + t.Errorf("Tier 1 (cached): expected %x, got %x", expectedBlockNHash, result) + } +} + +func TestSpeculativeGetHashFn_Tier2_ImmediateParent(t *testing.T) { + chain, headers := buildChain(10) + + blockN1Header := headers[8] // block 8 + pendingBlockN := uint64(9) + expectedN1Hash := blockN1Header.Hash() + + srcDone := func() common.Hash { + t.Error("srcDone should not be called for Tier 2") + return common.Hash{} + } + + fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, srcDone, nil) + + // Tier 2: BLOCKHASH(8) should return block 8's hash immediately + result := fn(8) + if result != expectedN1Hash { + t.Errorf("Tier 2: expected %x, got %x", expectedN1Hash, result) + } +} + +func TestSpeculativeGetHashFn_Tier3_OlderBlocks(t *testing.T) { + chain, headers := buildChain(10) + + blockN1Header := headers[8] // block 8 + pendingBlockN := uint64(9) + + srcDone := func() common.Hash { + t.Error("srcDone should not be called for Tier 3") + return common.Hash{} + } + + fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, srcDone, nil) + + // Tier 3: BLOCKHASH(7) should resolve via chain walk from block 8 + expectedHash7 := headers[7].Hash() + result := fn(7) + if result != expectedHash7 { + t.Errorf("Tier 3 (block 7): expected %x, got %x", expectedHash7, result) + } + + // BLOCKHASH(5) — deeper walk + expectedHash5 := headers[5].Hash() + result = fn(5) + if result != expectedHash5 { + t.Errorf("Tier 3 (block 5): expected %x, got %x", expectedHash5, result) + } + + // BLOCKHASH(0) — genesis + expectedHash0 := headers[0].Hash() + result = fn(0) + if result != expectedHash0 { + t.Errorf("Tier 3 (block 0): expected %x, got %x", expectedHash0, result) + } +} + +func TestSpeculativeGetHashFn_FutureBlock(t *testing.T) { + chain, headers := buildChain(10) + + blockN1Header := headers[8] + pendingBlockN := uint64(9) + + srcDone := func() common.Hash { + t.Error("srcDone should not be called for future blocks") + return common.Hash{} + } + + fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, srcDone, nil) + + // BLOCKHASH(10) — future block, should return zero + result := fn(10) + if result != (common.Hash{}) { + t.Errorf("Future block: expected zero hash, got %x", result) + } + + // BLOCKHASH(11) — also future + result = fn(11) + if result != (common.Hash{}) { + t.Errorf("Future block 11: expected zero hash, got %x", result) + } +} + +func TestSpeculativeGetHashFn_Tier1_Blocking(t *testing.T) { + chain, headers := buildChain(10) + + blockN1Header := headers[8] + pendingBlockN := uint64(9) + expectedHash := common.HexToHash("0xabcdef") + + var wg sync.WaitGroup + wg.Add(1) + + srcDone := func() common.Hash { + wg.Wait() // block until released + return expectedHash + } + + fn := SpeculativeGetHashFn(blockN1Header, chain, pendingBlockN, srcDone, nil) + + // Start BLOCKHASH(9) in a goroutine — it should block + resultCh := make(chan common.Hash, 1) + go func() { + resultCh <- fn(9) + }() + + // Verify it hasn't resolved yet + select { + case <-resultCh: + t.Error("BLOCKHASH(9) resolved before srcDone was released") + case <-time.After(100 * time.Millisecond): + // expected — still blocking + } + + // Release srcDone + wg.Done() + + // Now it should resolve + select { + case result := <-resultCh: + if result != expectedHash { + t.Errorf("Tier 1 blocking: expected %x, got %x", expectedHash, result) + } + case <-time.After(2 * time.Second): + t.Error("BLOCKHASH(9) did not resolve after srcDone was released") + } +} diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index ad49db9251..555c358826 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -339,24 +339,6 @@ func DeleteWitness(db ethdb.KeyValueWriter, blockHash common.Hash) { } } -// WritePostStateRoot stores the post-execution state root for a given block. -// This persists across reorgs and restarts so that GetPostStateRoot can -// retrieve the root when no child block exists yet. -func WritePostStateRoot(db ethdb.KeyValueWriter, blockHash common.Hash, root common.Hash) { - if err := db.Put(postStateRootKey(blockHash), root.Bytes()); err != nil { - log.Crit("Failed to store post-state root", "err", err) - } -} - -// ReadPostStateRoot retrieves the post-execution state root for the given block. -func ReadPostStateRoot(db ethdb.KeyValueReader, blockHash common.Hash) common.Hash { - data, err := db.Get(postStateRootKey(blockHash)) - if err != nil || len(data) == 0 { - return common.Hash{} - } - return common.BytesToHash(data) -} - func ReadWitnessPruneCursor(db ethdb.KeyValueReader) *uint64 { log.Debug("ReadWitnessCursor") data, err := db.Get(witnessPruneCursorKey()) diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index a3113e170a..4f5cb05bf0 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -164,8 +164,6 @@ var ( WitnessPruneCursorKey = []byte("witnessPruneCursorKey") WitnessPruneHeadKey = []byte("witnessPruneHeadKey") - PostStateRootPrefix = []byte("post-state-root-") // PostStateRootPrefix + hash -> post-execution state root - // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress BloomBitsIndexPrefix = []byte("iB") @@ -297,11 +295,6 @@ func witnessSizeKey(hash common.Hash) []byte { return append(WitnessSizePrefix, hash.Bytes()...) } -// postStateRootKey = PostStateRootPrefix + hash -func postStateRootKey(hash common.Hash) []byte { - return append(PostStateRootPrefix, hash.Bytes()...) -} - func witnessPruneCursorKey() []byte { return WitnessPruneCursorKey } diff --git a/core/state/statedb.go b/core/state/statedb.go index 119c1c7bf3..d08acd805a 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -151,7 +151,7 @@ type StateDB struct { witnessStats *stateless.WitnessStats // nonExistentReads tracks addresses that were looked up but don't exist - // in the state trie. Under delayed SRC, these are included in the + // in the state trie. Under pipelined SRC, these are included in the // FlatDiff so the SRC goroutine can walk their trie paths and capture // proof-of-absence nodes for the witness. Without this, stateless // execution fails when it tries to prove these accounts don't exist. @@ -1057,7 +1057,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject { } // Short circuit if the account is not found if acct == nil { - // Track the address so the delayed SRC goroutine can walk + // Track the address so the pipelined SRC goroutine can walk // the trie path and capture proof-of-absence nodes for the // witness. Without this, stateless execution can't verify // non-existent accounts. @@ -1972,7 +1972,7 @@ type FlatDiff struct { Code map[common.Hash][]byte // newly deployed code // ReadSet and ReadStorage list accounts and storage slots that were read - // (but not mutated) during block execution. The delayed SRC goroutine loads + // (but not mutated) during block execution. The pipelined SRC goroutine loads // these from the root_{N-1} trie so their MPT proof nodes are captured in // the witness for stateless execution. ReadSet []common.Address @@ -2079,7 +2079,7 @@ func (s *StateDB) CommitSnapshot(deleteEmptyObjects bool) *FlatDiff { } // Capture read-only accounts: accessed during execution but not mutated. - // The delayed SRC goroutine uses these to load their root_{N-1} trie nodes + // The pipelined SRC goroutine uses these to load their root_{N-1} trie nodes // into the witness so stateless nodes can execute against root_{N-1}. for addr, obj := range s.stateObjects { if _, isMutation := s.mutations[addr]; isMutation { @@ -2229,9 +2229,9 @@ func (s *StateDB) ApplyFlatDiffForCommit(diff *FlatDiff) { // the post-state of the block that produced flatDiff, without waiting for // that block's state root to be computed. // -// This is used during DelayedSRC block processing: while goroutine G_N is -// computing root_N from (root_{N-1}, FlatDiff_N), the next block N+1 can -// already be executed using NewWithFlatBase(root_{N-1}, db, FlatDiff_N). +// This is used during pipelined SRC: while a background goroutine computes +// root_N from (root_{N-1}, FlatDiff_N), the next block N+1 can already be +// executed using NewWithFlatBase(root_{N-1}, db, FlatDiff_N). func NewWithFlatBase(parentCommittedRoot common.Hash, db Database, flatDiff *FlatDiff) (*StateDB, error) { sdb, err := New(parentCommittedRoot, db) if err != nil { @@ -2248,6 +2248,19 @@ func (s *StateDB) SetFlatDiffRef(diff *FlatDiff) { s.flatDiffRef = diff } +// WasStorageSlotRead returns true if the given address+slot was accessed +// (read) during this block's execution. Used by pipelined SRC to detect +// whether any transaction read the EIP-2935 history storage slot that +// contains stale data during speculative execution. +func (s *StateDB) WasStorageSlotRead(addr common.Address, slot common.Hash) bool { + obj, exists := s.stateObjects[addr] + if !exists { + return false + } + _, accessed := obj.originStorage[slot] + return accessed +} + // Prepare handles the preparatory steps for executing a state transition with. // This method must be invoked before state transition. // @@ -2399,3 +2412,20 @@ func (s *StateDB) AccessEvents() *AccessEvents { func (s *StateDB) Inner() *StateDB { return s } + +// PropagateReadsTo touches all addresses and storage slots accessed in s on +// the destination StateDB. This ensures the destination tracks them in its +// stateObjects (and later in its FlatDiff ReadSet) so the pipelined SRC +// goroutine captures their trie proof nodes in the witness. +// +// Use this when a temporary copy of the state is used for EVM calls (e.g., +// CommitStates → LastStateId) and the accessed addresses must be visible +// in the original state for witness generation. +func (s *StateDB) PropagateReadsTo(dst *StateDB) { + for addr, obj := range s.stateObjects { + dst.GetBalance(addr) + for slot := range obj.originStorage { + dst.GetState(addr, slot) + } + } +} diff --git a/core/state/statedb_pipeline_test.go b/core/state/statedb_pipeline_test.go new file mode 100644 index 0000000000..a6ee4d1552 --- /dev/null +++ b/core/state/statedb_pipeline_test.go @@ -0,0 +1,129 @@ +package state + +import ( + "testing" + + "github.com/holiman/uint256" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/triedb" +) + +func TestWasStorageSlotRead(t *testing.T) { + db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil) + sdb, _ := New(types.EmptyRootHash, db) + + addr := common.HexToAddress("0x1234") + slot := common.HexToHash("0xabcd") + + // Slot not read yet + if sdb.WasStorageSlotRead(addr, slot) { + t.Error("slot should not be marked as read before any access") + } + + // Create an account and read its storage + sdb.CreateAccount(addr) + sdb.SetNonce(addr, 1, 0) + sdb.Finalise(false) + + // Read the slot + sdb.GetState(addr, slot) + + // Now it should be marked as read + if !sdb.WasStorageSlotRead(addr, slot) { + t.Error("slot should be marked as read after GetState") + } + + // A different slot should not be marked + otherSlot := common.HexToHash("0x5678") + if sdb.WasStorageSlotRead(addr, otherSlot) { + t.Error("other slot should not be marked as read") + } + + // A different address should not be marked + otherAddr := common.HexToAddress("0x5678") + if sdb.WasStorageSlotRead(otherAddr, slot) { + t.Error("other address should not be marked as read") + } +} + +func TestFlatDiffOverlay_ReadThrough(t *testing.T) { + // Create a base state with an account + db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil) + sdb, _ := New(types.EmptyRootHash, db) + + baseAddr := common.HexToAddress("0xbase") + sdb.CreateAccount(baseAddr) + sdb.SetNonce(baseAddr, 1, 0) + sdb.SetBalance(baseAddr, uint256.NewInt(100), 0) + root, _, _ := sdb.CommitWithUpdate(0, false, false) + + // Create a FlatDiff with a new account + overlayAddr := common.HexToAddress("0xoverlay") + diff := &FlatDiff{ + Accounts: map[common.Address]types.StateAccount{ + overlayAddr: { + Nonce: 42, + Balance: uint256.NewInt(200), + Root: types.EmptyRootHash, + CodeHash: types.EmptyCodeHash.Bytes(), + }, + }, + Storage: make(map[common.Address]map[common.Hash]common.Hash), + Destructs: make(map[common.Address]struct{}), + Code: make(map[common.Hash][]byte), + ReadStorage: make(map[common.Address][]common.Hash), + NonExistentReads: nil, + } + + // Create StateDB with FlatDiff overlay + overlayDB, err := NewWithFlatBase(root, db, diff) + if err != nil { + t.Fatal(err) + } + + // Should see the overlay account + if overlayDB.GetNonce(overlayAddr) != 42 { + t.Errorf("expected nonce 42 for overlay addr, got %d", overlayDB.GetNonce(overlayAddr)) + } + + // Should still see the base account + if overlayDB.GetNonce(baseAddr) != 1 { + t.Errorf("expected nonce 1 for base addr, got %d", overlayDB.GetNonce(baseAddr)) + } +} + +func TestCommitSnapshot_CapturesWrites(t *testing.T) { + db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil) + sdb, _ := New(types.EmptyRootHash, db) + + addr := common.HexToAddress("0x1234") + sdb.CreateAccount(addr) + sdb.SetNonce(addr, 10, 0) + sdb.SetBalance(addr, uint256.NewInt(500), 0) + + slot := common.HexToHash("0xaaaa") + sdb.SetState(addr, slot, common.HexToHash("0xbbbb")) + + diff := sdb.CommitSnapshot(false) + + // Verify account is captured + acct, ok := diff.Accounts[addr] + if !ok { + t.Fatal("account not captured in FlatDiff") + } + if acct.Nonce != 10 { + t.Errorf("expected nonce 10, got %d", acct.Nonce) + } + + // Verify storage is captured + slots, ok := diff.Storage[addr] + if !ok { + t.Fatal("storage not captured in FlatDiff") + } + if slots[slot] != common.HexToHash("0xbbbb") { + t.Errorf("expected slot value 0xbbbb, got %x", slots[slot]) + } +} diff --git a/core/stateless.go b/core/stateless.go index 3a85b7fcc1..c00e9b3f3e 100644 --- a/core/stateless.go +++ b/core/stateless.go @@ -43,27 +43,17 @@ import ( // // TODO(karalabe): Would be nice to resolve both issues above somehow and move it. func ExecuteStateless(config *params.ChainConfig, vmconfig vm.Config, block *types.Block, witness *stateless.Witness, author *common.Address, consensus consensus.Engine, diskdb ethdb.Database) (common.Hash, common.Hash, *state.StateDB, *ProcessResult, error) { - var preStateRoot common.Hash - if config.Bor != nil && config.Bor.IsDelayedSRC(block.Number()) { - // Under delayed SRC, block.Root() carries the pre-state root for this block - // (the actual post-execution state root of the parent, placed there by the - // block producer). Use it directly; do NOT treat it as a faulty value. - preStateRoot = block.Root() - } else { - // Sanity check: the caller should have zeroed Root and ReceiptHash so that - // we can compute them from scratch via the witness. - if block.Root() != (common.Hash{}) { - log.Error("stateless runner received state root it's expected to calculate (faulty consensus client)", "block", block.Number()) - } - if block.ReceiptHash() != (common.Hash{}) { - log.Error("stateless runner received receipt root it's expected to calculate (faulty consensus client)", "block", block.Number()) - } - preStateRoot = witness.Root() + // Sanity check if the supplied block accidentally contains a set root or + // receipt hash. If so, be very loud, but still continue. + if block.Root() != (common.Hash{}) { + log.Error("stateless runner received state root it's expected to calculate (faulty consensus client)", "block", block.Number()) + } + if block.ReceiptHash() != (common.Hash{}) { + log.Error("stateless runner received receipt root it's expected to calculate (faulty consensus client)", "block", block.Number()) } - // Create and populate the state database to serve as the stateless backend memdb := witness.MakeHashDB(diskdb) - db, err := state.New(preStateRoot, state.NewDatabase(triedb.NewDatabase(memdb, triedb.HashDefaults), nil)) + db, err := state.New(witness.Root(), state.NewDatabase(triedb.NewDatabase(memdb, triedb.HashDefaults), nil)) if err != nil { return common.Hash{}, common.Hash{}, nil, nil, err } diff --git a/core/stateless/witness.go b/core/stateless/witness.go index 33a755dda2..57f01e85d3 100644 --- a/core/stateless/witness.go +++ b/core/stateless/witness.go @@ -35,29 +35,39 @@ type HeaderReader interface { GetHeader(hash common.Hash, number uint64) *types.Header } -// ValidateWitnessPreState validates that the witness pre-state root matches -// expectedPreStateRoot (the parent block's actual post-execution state root). -// -// Under delayed SRC, the pre-state root is stored in contextHeader.Root -// (set by spawnSRCGoroutine). Under normal operation, it is witness.Root() -// (= Headers[0].Root = parent header's Root field). -func ValidateWitnessPreState(witness *Witness, expectedPreStateRoot common.Hash) error { +// ValidateWitnessPreState validates that the witness pre-state root matches the parent block's state root. +func ValidateWitnessPreState(witness *Witness, headerReader HeaderReader) error { if witness == nil { return fmt.Errorf("witness is nil") } + + // Check if witness has any headers. if len(witness.Headers) == 0 { return fmt.Errorf("witness has no headers") } + + // Get the witness context header (the block this witness is for). contextHeader := witness.Header() if contextHeader == nil { return fmt.Errorf("witness context header is nil") } - // Normal path: witness.Root() (= parent header's Root) must match expected. - if witness.Root() != expectedPreStateRoot { - return fmt.Errorf("witness pre-state root mismatch: witness=%x, expected=%x, blockNumber=%d", - witness.Root(), expectedPreStateRoot, contextHeader.Number.Uint64()) + // Get the parent block header from the chain. + parentHeader := headerReader.GetHeader(contextHeader.ParentHash, contextHeader.Number.Uint64()-1) + if parentHeader == nil { + return fmt.Errorf("parent block header not found: parentHash=%x, parentNumber=%d", + contextHeader.ParentHash, contextHeader.Number.Uint64()-1) } + + // Get witness pre-state root (from first header which should be parent). + witnessPreStateRoot := witness.Root() + + // Compare with actual parent block's state root. + if witnessPreStateRoot != parentHeader.Root { + return fmt.Errorf("witness pre-state root mismatch: witness=%x, parent=%x, blockNumber=%d", + witnessPreStateRoot, parentHeader.Root, contextHeader.Number.Uint64()) + } + return nil } @@ -86,16 +96,9 @@ func NewWitness(context *types.Header, chain HeaderReader) (*Witness, error) { } headers = append(headers, parent) } - // Gut out the root and receipt hash: these are what stateless execution - // computes. A non-zero Root signals delayed SRC (the pre-state root is - // embedded there by the caller after NewWitness returns). - ctx := types.CopyHeader(context) - ctx.Root = common.Hash{} - ctx.ReceiptHash = common.Hash{} - // Create the witness with a reconstructed gutted out block return &Witness{ - context: ctx, + context: context, Headers: headers, Codes: make(map[string]struct{}), State: make(map[string]struct{}), @@ -156,13 +159,7 @@ func (w *Witness) Copy() *Witness { return cpy } -// Root returns the pre-state root for executing this block's transactions. -// This is always Headers[0].Root, i.e. the parent block's post-execution state -// root (the trustless pre-state anchor included in every witness). -// -// Under delayed SRC the correct pre-state root lives in the block header itself -// (block[N].Header.Root = root_{N-1}); callers that have the block available -// should use block.Root() directly rather than this method. +// Root returns the pre-state root from the first header. // // Note, this method will panic in case of a bad witness (but RLP decoding will // sanitize it and fail before that). diff --git a/core/stateless/witness_test.go b/core/stateless/witness_test.go index 86c07a4ce0..6d662020fd 100644 --- a/core/stateless/witness_test.go +++ b/core/stateless/witness_test.go @@ -26,8 +26,13 @@ func TestValidateWitnessPreState_Success(t *testing.T) { contextHeader := &types.Header{ Number: big.NewInt(100), ParentHash: parentHash, + Root: common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"), } + // Set up mock header reader. + mockReader := NewMockHeaderReader() + mockReader.AddHeader(parentHeader) + // Create witness with matching pre-state root. witness := &Witness{ context: contextHeader, @@ -36,8 +41,8 @@ func TestValidateWitnessPreState_Success(t *testing.T) { State: make(map[string]struct{}), } - // Test validation - should succeed (witness.Root() == parentStateRoot). - err := ValidateWitnessPreState(witness, parentStateRoot) + // Test validation - should succeed. + err := ValidateWitnessPreState(witness, mockReader) if err != nil { t.Errorf("Expected validation to succeed, but got error: %v", err) } @@ -60,6 +65,7 @@ func TestValidateWitnessPreState_StateMismatch(t *testing.T) { contextHeader := &types.Header{ Number: big.NewInt(100), ParentHash: parentHash, + Root: common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"), } // Create witness header with mismatched state root. @@ -69,6 +75,10 @@ func TestValidateWitnessPreState_StateMismatch(t *testing.T) { Root: mismatchedStateRoot, // Different from actual parent. } + // Set up mock header reader. + mockReader := NewMockHeaderReader() + mockReader.AddHeader(parentHeader) + // Create witness with mismatched pre-state root. witness := &Witness{ context: contextHeader, @@ -77,8 +87,8 @@ func TestValidateWitnessPreState_StateMismatch(t *testing.T) { State: make(map[string]struct{}), } - // Test validation - should fail (witness.Root() = mismatchedStateRoot != parentStateRoot). - err := ValidateWitnessPreState(witness, parentStateRoot) + // Test validation - should fail. + err := ValidateWitnessPreState(witness, mockReader) if err == nil { t.Error("Expected validation to fail due to state root mismatch, but it succeeded") } @@ -92,11 +102,11 @@ func TestValidateWitnessPreState_StateMismatch(t *testing.T) { } func TestValidateWitnessPreState_EdgeCases(t *testing.T) { - dummyRoot := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") + mockReader := NewMockHeaderReader() // Test case 1: Nil witness. t.Run("NilWitness", func(t *testing.T) { - err := ValidateWitnessPreState(nil, dummyRoot) + err := ValidateWitnessPreState(nil, mockReader) if err == nil { t.Error("Expected validation to fail for nil witness") } @@ -114,7 +124,7 @@ func TestValidateWitnessPreState_EdgeCases(t *testing.T) { State: make(map[string]struct{}), } - err := ValidateWitnessPreState(witness, dummyRoot) + err := ValidateWitnessPreState(witness, mockReader) if err == nil { t.Error("Expected validation to fail for witness with no headers") } @@ -130,14 +140,14 @@ func TestValidateWitnessPreState_EdgeCases(t *testing.T) { Headers: []*types.Header{ { Number: big.NewInt(99), - Root: dummyRoot, + Root: common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), }, }, Codes: make(map[string]struct{}), State: make(map[string]struct{}), } - err := ValidateWitnessPreState(witness, dummyRoot) + err := ValidateWitnessPreState(witness, mockReader) if err == nil { t.Error("Expected validation to fail for witness with nil context header") } @@ -146,31 +156,33 @@ func TestValidateWitnessPreState_EdgeCases(t *testing.T) { } }) - // Test case 4: Mismatch with expected root. - t.Run("Mismatch", func(t *testing.T) { - wrongRoot := common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") + // Test case 4: Parent header not found. + t.Run("ParentNotFound", func(t *testing.T) { + contextHeader := &types.Header{ + Number: big.NewInt(100), + ParentHash: common.HexToHash("0xnonexistent1234567890abcdef1234567890abcdef1234567890abcdef123456"), + Root: common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"), + } witness := &Witness{ - context: &types.Header{ - Number: big.NewInt(100), - ParentHash: common.HexToHash("0xabc"), - }, + context: contextHeader, Headers: []*types.Header{ { Number: big.NewInt(99), - Root: wrongRoot, // witness.Root() will be wrongRoot + Root: common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), }, }, Codes: make(map[string]struct{}), State: make(map[string]struct{}), } - err := ValidateWitnessPreState(witness, dummyRoot) + // Don't add parent header to mock reader - it won't be found. + err := ValidateWitnessPreState(witness, mockReader) if err == nil { - t.Error("Expected validation to fail when witness root doesn't match expected") + t.Error("Expected validation to fail when parent header is not found") } - expectedError := "witness pre-state root mismatch" + expectedError := "parent block header not found" if err != nil && len(err.Error()) > len(expectedError) { if err.Error()[:len(expectedError)] != expectedError { t.Errorf("Expected error message to start with '%s', but got: %v", expectedError, err) @@ -190,6 +202,7 @@ func TestValidateWitnessPreState_MultipleHeaders(t *testing.T) { Root: grandParentStateRoot, } + // Use the actual hash of the grandparent header. grandParentHash := grandParentHeader.Hash() parentHeader := &types.Header{ @@ -198,13 +211,20 @@ func TestValidateWitnessPreState_MultipleHeaders(t *testing.T) { Root: parentStateRoot, } + // Use the actual hash of the parent header. parentHash := parentHeader.Hash() contextHeader := &types.Header{ Number: big.NewInt(100), ParentHash: parentHash, + Root: common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"), } + // Set up mock header reader. + mockReader := NewMockHeaderReader() + mockReader.AddHeader(parentHeader) + mockReader.AddHeader(grandParentHeader) + // Create witness with multiple headers (parent should be first). witness := &Witness{ context: contextHeader, @@ -213,54 +233,13 @@ func TestValidateWitnessPreState_MultipleHeaders(t *testing.T) { State: make(map[string]struct{}), } - // Test validation - should succeed (witness.Root() = parentStateRoot). - err := ValidateWitnessPreState(witness, parentStateRoot) + // Test validation - should succeed (only first header matters for validation). + err := ValidateWitnessPreState(witness, mockReader) if err != nil { t.Errorf("Expected validation to succeed with multiple headers, but got error: %v", err) } } -func TestValidateWitnessPreState_DelayedSRC(t *testing.T) { - // Under delayed SRC, witness.Root() = Headers[0].Root = parent header's - // on-chain Root (= root_{N-2}). The caller passes parentHeader.Root as - // expectedPreStateRoot. The actual pre-state root (root_{N-1}) is validated - // separately in writeBlockAndSetHead. - parentOnChainRoot := common.HexToHash("0xbbbb") // root_{N-2} - - t.Run("Match", func(t *testing.T) { - witness := &Witness{ - context: &types.Header{ - Number: big.NewInt(100), - Root: common.HexToHash("0xaaaa"), // root_{N-1}, irrelevant here - }, - Headers: []*types.Header{{Number: big.NewInt(99), Root: parentOnChainRoot}}, - Codes: make(map[string]struct{}), - State: make(map[string]struct{}), - } - err := ValidateWitnessPreState(witness, parentOnChainRoot) - if err != nil { - t.Errorf("Expected delayed SRC validation to succeed, got: %v", err) - } - }) - - t.Run("Mismatch", func(t *testing.T) { - wrongExpected := common.HexToHash("0xcccc") - witness := &Witness{ - context: &types.Header{ - Number: big.NewInt(100), - Root: common.HexToHash("0xaaaa"), - }, - Headers: []*types.Header{{Number: big.NewInt(99), Root: parentOnChainRoot}}, - Codes: make(map[string]struct{}), - State: make(map[string]struct{}), - } - err := ValidateWitnessPreState(witness, wrongExpected) - if err == nil { - t.Error("Expected delayed SRC validation to fail on mismatch") - } - }) -} - // TestConsensusWithOriginalPeer tests consensus calculation including original peer func TestConsensusWithOriginalPeer(t *testing.T) { t.Run("Case1_OriginalPeer3_RandomPeers2and3_ShouldChoose3", func(t *testing.T) { diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index 8922c64a81..5bb3b1c780 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -202,7 +202,7 @@ func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { return bc.statedb, nil } -func (bc *testBlockChain) PostExecutionStateAt(*types.Header) (*state.StateDB, error) { +func (bc *testBlockChain) PostExecutionStateAt(header *types.Header) (*state.StateDB, error) { return bc.statedb, nil } diff --git a/core/txpool/blobpool/interface.go b/core/txpool/blobpool/interface.go index b1d7b69969..b0f4301175 100644 --- a/core/txpool/blobpool/interface.go +++ b/core/txpool/blobpool/interface.go @@ -43,8 +43,7 @@ type BlockChain interface { StateAt(root common.Hash) (*state.StateDB, error) // PostExecutionStateAt returns a StateDB representing the post-execution - // state of the given block header. Under delayed SRC, uses a non-blocking - // FlatDiff overlay when available; otherwise falls back to resolving the - // actual state root (which may block). + // state of the given block header. Under pipelined SRC, uses a non-blocking + // FlatDiff overlay when available; otherwise falls back to StateAt. PostExecutionStateAt(header *types.Header) (*state.StateDB, error) } diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 63d3721b6b..8e5be7a3fc 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -173,9 +173,8 @@ type BlockChain interface { StateAt(root common.Hash) (*state.StateDB, error) // PostExecutionStateAt returns a StateDB representing the post-execution - // state of the given block header. Under delayed SRC, uses a non-blocking - // FlatDiff overlay when available; otherwise falls back to resolving the - // actual state root (which may block). + // state of the given block header. Under pipelined SRC, uses a non-blocking + // FlatDiff overlay when available; otherwise falls back to StateAt. PostExecutionStateAt(header *types.Header) (*state.StateDB, error) } @@ -1804,6 +1803,42 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { pool.addTxs(reinject, false) } +// ResetSpeculativeState updates the pool's internal state to reflect a new +// block that hasn't been written to the chain yet. This is used by pipelined +// SRC: after block N's transactions are executed but before block N is sealed, +// the miner calls this to update the txpool so that speculative execution of +// block N+1 gets correct pending transactions (with block N's nonces/balances). +// +// Unlike the full reset() path, this does NOT walk the chain for included/ +// discarded transactions (the block isn't in the chain DB). It only: +// 1. Updates currentState and pendingNonces from the provided statedb +// 2. Sets currentHead to the new header +// 3. Demotes transactions with stale nonces +// 4. Promotes newly executable transactions +func (pool *LegacyPool) ResetSpeculativeState(newHead *types.Header, statedb *state.StateDB) { + pool.mu.Lock() + defer pool.mu.Unlock() + + pool.currentHead.Store(newHead) + pool.currentState = statedb + pool.pendingNonces = newNoncer(statedb) + + // Demote transactions that are no longer valid with the new nonces + pool.demoteUnexecutables() + + // Promote transactions that are now executable + promoted := pool.promoteExecutables(nil) + + // Fire events for promoted transactions + if len(promoted) > 0 { + var txs []*types.Transaction + for _, tx := range promoted { + txs = append(txs, tx) + } + pool.txFeed.Send(core.NewTxsEvent{Txs: txs}) + } +} + // promoteExecutables moves transactions that have become processable from the // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index d12759eb88..484cad8433 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -117,7 +117,7 @@ func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { return bc.statedb, nil } -func (bc *testBlockChain) PostExecutionStateAt(*types.Header) (*state.StateDB, error) { +func (bc *testBlockChain) PostExecutionStateAt(header *types.Header) (*state.StateDB, error) { return bc.statedb, nil } diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 7a6f81c99e..3ee0e53d51 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -58,9 +58,8 @@ type BlockChain interface { StateAt(root common.Hash) (*state.StateDB, error) // PostExecutionStateAt returns a StateDB representing the post-execution - // state of the given block header. Under delayed SRC, uses a non-blocking - // FlatDiff overlay when available; otherwise falls back to resolving the - // actual state root (which may block). + // state of the given block header. Under pipelined SRC, uses a non-blocking + // FlatDiff overlay when available; otherwise falls back to StateAt. PostExecutionStateAt(header *types.Header) (*state.StateDB, error) } @@ -559,3 +558,28 @@ func (p *TxPool) Clear() { subpool.Clear() } } + +// SpeculativeResetter is implemented by subpools that support speculative +// state resets for pipelined SRC. This avoids import cycles between txpool +// and legacypool packages. +type SpeculativeResetter interface { + ResetSpeculativeState(newHead *types.Header, statedb *state.StateDB) +} + +// ResetSpeculativeState updates the txpool's state to reflect a block that +// hasn't been written to the chain yet. This is used by pipelined SRC so that +// speculative execution of block N+1 gets correct pending transactions +// (reflecting block N's post-execution nonces and balances via FlatDiff overlay). +func (p *TxPool) ResetSpeculativeState(newHead *types.Header, statedb *state.StateDB) { + // Update the aggregator's state + p.stateLock.Lock() + p.state = statedb + p.stateLock.Unlock() + + // Update subpools that support speculative resets + for _, subpool := range p.subpools { + if sr, ok := subpool.(SpeculativeResetter); ok { + sr.ResetSpeculativeState(newHead, statedb) + } + } +} diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index d07a6e2a05..ddb01122a0 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -536,7 +536,6 @@ func TestReinforceMultiClientPreCompilesTest(t *testing.T) { "IsMadhugiriPro", "IsLisovo", "IsLisovoPro", - "IsDelayedSRC", } if len(actual) != len(expected) { diff --git a/docs/cli/default_config.toml b/docs/cli/default_config.toml index 022a277ef1..0d959900c0 100644 --- a/docs/cli/default_config.toml +++ b/docs/cli/default_config.toml @@ -105,6 +105,8 @@ devfakeauthor = false base-fee-change-denominator = 0 prefetch = false prefetch-gaslimit-percent = 100 + pipelined-src = true + pipelined-src-logs = true [jsonrpc] ipcdisable = false diff --git a/docs/cli/server.md b/docs/cli/server.md index a8e4a3d595..98a978b54e 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -354,6 +354,10 @@ The ```bor server``` command runs the Bor client. - ```miner.interruptcommit```: Interrupt block commit when block creation time is passed (default: true) +- ```miner.pipelined-src```: Enable pipelined state root computation: overlap SRC(N) with block N+1 tx execution (default: true) + +- ```miner.pipelined-src-logs```: Enable verbose logging for pipelined SRC (spawned SRC, SRC completed, block sealed, etc.) (default: true) + - ```miner.prefetch```: Enable transaction prefetching from the pool during block building (default: false) - ```miner.prefetch.gaslimit.percent```: Gas limit percentage for prefetching (e.g., 100 = 100%, 110 = 110%) (default: 100) diff --git a/eth/api_backend.go b/eth/api_backend.go index c068fa1e3b..19b16fb70b 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -288,9 +288,12 @@ func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.B return nil, nil, errors.New("header not found") } - stateDb, err := b.stateAtHeader(header) + stateDb, err := b.eth.BlockChain().StateAt(header.Root) if err != nil { - return nil, nil, err + stateDb, err = b.eth.BlockChain().HistoricState(header.Root) + if err != nil { + return nil, nil, err + } } return stateDb, header, nil } @@ -314,9 +317,12 @@ func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockN return nil, nil, errors.New("hash is not currently canonical") } - stateDb, err := b.stateAtHeader(header) + stateDb, err := b.eth.BlockChain().StateAt(header.Root) if err != nil { - return nil, nil, err + stateDb, err = b.eth.BlockChain().HistoricState(header.Root) + if err != nil { + return nil, nil, err + } } return stateDb, header, nil } @@ -324,21 +330,6 @@ func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockN return nil, nil, errors.New("invalid arguments; neither block nor hash specified") } -// stateAtHeader returns the state database for the given header, correctly -// resolving the state root under delayed SRC where header.Root stores the -// parent's state root rather than this block's post-execution root. -func (b *EthAPIBackend) stateAtHeader(header *types.Header) (*state.StateDB, error) { - bc := b.eth.BlockChain() - stateDb, err := bc.PostExecutionStateAt(header) - if err != nil { - stateDb, err = bc.HistoricState(header.Root) - if err != nil { - return nil, err - } - } - return stateDb, nil -} - func (b *EthAPIBackend) HistoryPruningCutoff() uint64 { bn, _ := b.eth.blockchain.HistoryPruningCutoff() return bn diff --git a/eth/handler.go b/eth/handler.go index 5a0f625bc4..65dbe7eb3a 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -172,9 +172,6 @@ type handler struct { minedBlockSub *event.TypeMuxSubscription blockRange *blockRangeState - witnessReadyCh chan core.WitnessReadyEvent - witnessReadySub event.Subscription - requiredBlocks map[uint64]common.Hash enableBlockTracking bool @@ -622,12 +619,6 @@ func (h *handler) Start(maxPeers int) { h.minedBlockSub = h.eventMux.Subscribe(core.NewMinedBlockEvent{}) go h.minedBroadcastLoop() - // broadcast delayed-SRC witnesses once the SRC goroutine completes - h.wg.Add(1) - h.witnessReadyCh = make(chan core.WitnessReadyEvent, 10) - h.witnessReadySub = h.chain.SubscribeWitnessReadyEvent(h.witnessReadyCh) - go h.witnessBroadcastLoop() - h.wg.Add(1) go h.chainSync.loop() @@ -649,7 +640,6 @@ func (h *handler) Stop() { h.stuckTxsSub.Unsubscribe() // quits stuckTxBroadcastLoop } h.minedBlockSub.Unsubscribe() - h.witnessReadySub.Unsubscribe() // quits witnessBroadcastLoop h.blockRange.stop() // Quit chainSync and txsync64. @@ -847,26 +837,6 @@ func (h *handler) minedBroadcastLoop() { } } -// witnessBroadcastLoop announces delayed-SRC witnesses to peers once the -// background SRC goroutine has finished computing them. Analogous to block -// propagation: we send a hash announcement and let peers fetch on demand. -func (h *handler) witnessBroadcastLoop() { - defer h.wg.Done() - - for { - select { - case ev := <-h.witnessReadyCh: - hash := ev.Block.Hash() - number := ev.Block.NumberU64() - for _, peer := range h.peers.peersWithoutWitness(hash) { - peer.Peer.AsyncSendNewWitnessHash(hash, number) - } - case <-h.witnessReadySub.Err(): - return - } - } -} - // txBroadcastLoop announces new transactions to connected peers. func (h *handler) txBroadcastLoop() { defer h.wg.Done() diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index bc4e168cde..c705d58704 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -449,6 +449,12 @@ type SealerConfig struct { // PrefetchGasLimitPercent is the gas limit percentage for prefetching (e.g., 100 = 100%, 110 = 110%) PrefetchGasLimitPercent uint64 `hcl:"prefetch-gaslimit-percent,optional" toml:"prefetch-gaslimit-percent,optional"` + + // EnablePipelinedSRC enables pipelined state root computation: overlap SRC(N) with block N+1 tx execution + EnablePipelinedSRC bool `hcl:"pipelined-src,optional" toml:"pipelined-src,optional"` + + // PipelinedSRCLogs enables verbose logging for pipelined SRC + PipelinedSRCLogs bool `hcl:"pipelined-src-logs,optional" toml:"pipelined-src-logs,optional"` } type JsonRPCConfig struct { @@ -906,6 +912,8 @@ func DefaultConfig() *Config { PrefetchGasLimitPercent: 100, TargetGasPercentage: 0, // Initialize to 0, will be set from CLI or remain 0 (meaning use default) BaseFeeChangeDenominator: 0, // Initialize to 0, will be set from CLI or remain 0 (meaning use default) + EnablePipelinedSRC: true, + PipelinedSRCLogs: true, }, Gpo: &GpoConfig{ Blocks: 20, @@ -1277,6 +1285,8 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.Miner.BlockTime = c.Sealer.BlockTime n.Miner.EnablePrefetch = c.Sealer.EnablePrefetch n.Miner.PrefetchGasLimitPercent = c.Sealer.PrefetchGasLimitPercent + n.Miner.EnablePipelinedSRC = c.Sealer.EnablePipelinedSRC + n.Miner.PipelinedSRCLogs = c.Sealer.PipelinedSRCLogs // Validate prefetch gas limit percentage if c.Sealer.EnablePrefetch && c.Sealer.PrefetchGasLimitPercent > 150 { diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index 270d9f6bde..e314a568a6 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -438,6 +438,20 @@ func (c *Command) Flags(config *Config) *flagset.Flagset { Default: c.cliConfig.Sealer.PrefetchGasLimitPercent, Group: "Sealer", }) + f.BoolFlag(&flagset.BoolFlag{ + Name: "miner.pipelined-src", + Usage: "Enable pipelined state root computation: overlap SRC(N) with block N+1 tx execution", + Value: &c.cliConfig.Sealer.EnablePipelinedSRC, + Default: c.cliConfig.Sealer.EnablePipelinedSRC, + Group: "Sealer", + }) + f.BoolFlag(&flagset.BoolFlag{ + Name: "miner.pipelined-src-logs", + Usage: "Enable verbose logging for pipelined SRC (spawned SRC, SRC completed, block sealed, etc.)", + Value: &c.cliConfig.Sealer.PipelinedSRCLogs, + Default: c.cliConfig.Sealer.PipelinedSRCLogs, + Group: "Sealer", + }) f.BoolFlag(&flagset.BoolFlag{ Name: "miner.enableDynamicGasLimit", Usage: "Enable dynamic gas limit adjustment based on base fee", diff --git a/miner/fake_miner.go b/miner/fake_miner.go index 4954e7952a..72e39bfa4e 100644 --- a/miner/fake_miner.go +++ b/miner/fake_miner.go @@ -261,7 +261,7 @@ func (bc *testBlockChainBor) StateAt(common.Hash) (*state.StateDB, error) { return bc.statedb, nil } -func (bc *testBlockChainBor) PostExecutionStateAt(*types.Header) (*state.StateDB, error) { +func (bc *testBlockChainBor) PostExecutionStateAt(header *types.Header) (*state.StateDB, error) { return bc.statedb, nil } diff --git a/miner/miner.go b/miner/miner.go index a919717cc2..09aabdd8fc 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -67,6 +67,8 @@ type Config struct { PendingFeeRecipient common.Address `toml:"-"` // Address for pending block rewards. EnablePrefetch bool // Enable transaction prefetching from pool during block building PrefetchGasLimitPercent uint64 // Gas limit percentage for prefetching (e.g., 100 = 100%, 110 = 110%) + EnablePipelinedSRC bool // Enable pipelined state root computation: overlap SRC(N) with block N+1 tx execution + PipelinedSRCLogs bool // Enable verbose logging for pipelined SRC (spawned SRC, SRC completed, block sealed, etc.) } // DefaultConfig contains default settings for miner. diff --git a/miner/pipeline.go b/miner/pipeline.go new file mode 100644 index 0000000000..255e7b1530 --- /dev/null +++ b/miner/pipeline.go @@ -0,0 +1,825 @@ +package miner + +import ( + "crypto/sha256" + "errors" + "fmt" + "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/bor" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" +) + +// Pipelined SRC metrics +var ( + pipelineSpeculativeBlocksCounter = metrics.NewRegisteredCounter("worker/pipelineSpeculativeBlocks", nil) + pipelineSpeculativeAbortsCounter = metrics.NewRegisteredCounter("worker/pipelineSpeculativeAborts", nil) + pipelineEIP2935AbortsCounter = metrics.NewRegisteredCounter("worker/pipelineEIP2935Aborts", nil) + pipelineSRCTimer = metrics.NewRegisteredTimer("worker/pipelineSRCTime", nil) + pipelineFlatDiffExtractTimer = metrics.NewRegisteredTimer("worker/pipelineFlatDiffExtractTime", nil) +) + +// speculativeWorkReq is sent to mainLoop's speculative work channel +// when block N's execution is done and we want to speculatively start N+1. +type speculativeWorkReq struct { + parentHeader *types.Header // block N's header (complete except Root) + flatDiff *state.FlatDiff // block N's state mutations + parentRoot common.Hash // root_{N-1} (last committed trie root) + blockNEnv *environment // block N's execution environment (for assembly later) + stateSyncData []*types.StateSyncData // from FinalizeForPipeline +} + +// placeholderParentHash generates a deterministic placeholder hash for use +// as ParentHash in speculative headers. It must not collide with any real +// block hash. +func placeholderParentHash(blockNumber uint64) common.Hash { + data := append([]byte("pipelined-src-placeholder:"), new(big.Int).SetUint64(blockNumber).Bytes()...) + return sha256.Sum256(data) +} + +// isPipelineEligible checks whether we can use pipelined SRC for the next +// block. Returns false at sprint boundaries in pre-Rio mode (where +// GetCurrentValidatorsByHash needs a real parent hash). +func (w *worker) isPipelineEligible(currentBlockNumber uint64) bool { + if !w.config.EnablePipelinedSRC { + return false + } + if w.chainConfig.Bor == nil { + return false + } + if len(w.chainConfig.Bor.Sprint) == 0 { + return false + } + if !w.IsRunning() || w.syncing.Load() { + return false + } + // Pre-Rio: sprint boundary blocks need real parent hash for validator lookup. + // The check is on number+1 because Prepare() for block N encodes validators + // when IsSprintStart(N+1) is true. + nextBlockNumber := currentBlockNumber + 1 + if !w.chainConfig.Bor.IsRio(new(big.Int).SetUint64(nextBlockNumber)) { + sprint := w.chainConfig.Bor.CalculateSprint(nextBlockNumber) + if bor.IsSprintStart(nextBlockNumber+1, sprint) { + return false + } + } + return true +} + +// commitPipelined is the pipelined version of commit(). Instead of calling +// FinalizeAndAssemble (which blocks on IntermediateRoot), it: +// 1. Calls FinalizeForPipeline (state sync, span commits — no IntermediateRoot) +// 2. Extracts FlatDiff +// 3. Sends a speculativeWorkReq to start N+1 execution +// 4. Returns immediately — the SRC goroutine is spawned by commitSpeculativeWork +// after confirming the speculative Prepare() succeeds. This avoids a trie DB +// race between the SRC goroutine and the fallback path's inline commit. +func (w *worker) commitPipelined(env *environment, start time.Time) error { + if !w.IsRunning() { + return nil + } + + env = env.copy() + + borEngine, ok := w.engine.(*bor.Bor) + if !ok { + log.Error("Pipelined SRC: engine is not Bor") + return nil + } + + // Phase 1: Finalize (state sync, span commits) without IntermediateRoot + stateSyncData, err := borEngine.FinalizeForPipeline(w.chain, env.header, env.state, &types.Body{ + Transactions: env.txs, + }, env.receipts) + if err != nil { + log.Error("Pipelined SRC: FinalizeForPipeline failed", "err", err) + return err + } + + // Phase 2: Extract FlatDiff (~1ms, no trie operations) + flatDiffStart := time.Now() + flatDiff := env.state.CommitSnapshot(w.chainConfig.IsEIP158(env.header.Number)) + pipelineFlatDiffExtractTimer.Update(time.Since(flatDiffStart)) + + // The parent root is root_{N-1}, stored in the parent header. + parent := w.chain.GetHeader(env.header.ParentHash, env.header.Number.Uint64()-1) + if parent == nil { + log.Error("Pipelined SRC: parent not found", "parentHash", env.header.ParentHash) + return nil + } + parentRoot := parent.Root + + w.chain.SetLastFlatDiff(flatDiff, env.header.Hash()) + // Note: this counts block N as "entering the pipeline." If Prepare() fails + // and fallbackToSequential produces the block inline, the counter is slightly + // inflated — the block was produced sequentially, not speculatively. + pipelineSpeculativeBlocksCounter.Inc(1) + + // Phase 3: Send speculative work request for block N+1. + // The SRC goroutine is NOT spawned here — commitSpeculativeWork spawns it + // after confirming Prepare() succeeds. If Prepare() fails, fallbackToSequential + // uses the normal inline FinalizeAndAssemble path (no SRC goroutine). + select { + case w.speculativeWorkCh <- &speculativeWorkReq{ + parentHeader: env.header, + flatDiff: flatDiff, + parentRoot: parentRoot, + blockNEnv: env, + stateSyncData: stateSyncData, + }: + case <-w.exitCh: + return nil + } + + return nil +} + +// commitSpeculativeWork handles a speculativeWorkReq: executes block N+1 +// speculatively using the FlatDiff overlay, then waits for SRC(N) to complete, +// assembles block N, and sends it for sealing. Then it finalizes N+1 and +// seals it as well. +func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { + // Ensure pendingWorkBlock is cleared when this function exits, so the + // next ChainHeadEvent-triggered commitWork can proceed. + defer w.pendingWorkBlock.Store(0) + + blockNHeader := req.parentHeader + blockNNumber := blockNHeader.Number.Uint64() + nextBlockNumber := blockNNumber + 1 + + log.Debug("Pipelined SRC: starting speculative execution", "speculativeBlock", nextBlockNumber, "parent", blockNNumber) + + // --- Build speculative header for N+1 --- + placeholder := placeholderParentHash(blockNNumber) + specReader := newSpeculativeChainReader(w.chain, blockNHeader, placeholder) + specContext := newSpeculativeChainContext(specReader, w.engine) + + // Resolve the EVM coinbase the same way the importer does in + // NewEVMBlockContext(header, chain, nil) — for post-Rio blocks, this + // uses CalculateCoinbase (from the Bor config), falling back to + // w.etherbase() if not configured. We must NOT use w.etherbase() + // directly because the Bor config's Coinbase field may specify a + // different address (e.g. 0xba5e on some networks). + var coinbase common.Address + if w.chainConfig.Bor != nil && w.chainConfig.Bor.IsRio(new(big.Int).SetUint64(nextBlockNumber)) { + coinbase = common.HexToAddress(w.chainConfig.Bor.CalculateCoinbase(nextBlockNumber)) + } + if coinbase == (common.Address{}) { + coinbase = w.etherbase() + } + + specHeader := &types.Header{ + ParentHash: placeholder, + Number: new(big.Int).SetUint64(nextBlockNumber), + GasLimit: core.CalcGasLimit(blockNHeader.GasLimit, w.config.GasCeil), + Time: blockNHeader.Time + w.chainConfig.Bor.CalculatePeriod(nextBlockNumber), + Coinbase: coinbase, + } + if w.chainConfig.IsLondon(specHeader.Number) { + specHeader.BaseFee = eip1559.CalcBaseFee(w.chainConfig, blockNHeader) + } + + // Call Prepare() via the speculative chain reader with waitOnPrepare=false. + // This sets Difficulty, Extra (validator bytes at sprint boundary), and timestamp + // but does NOT sleep. The timing wait is deferred until after the abort check + // to avoid wasting a full block period if the speculative block is discarded. + // NOTE: Prepare() will zero out specHeader.Coinbase. The real coinbase + // is preserved in the local `coinbase` variable above. + if err := w.engine.Prepare(specReader, specHeader, false); err != nil { + log.Warn("Pipelined SRC: speculative Prepare failed, falling back", "err", err) + w.fallbackToSequential(req) + return + } + + // Prepare() succeeded — now spawn the background SRC goroutine for block N. + // This is done HERE (not in commitPipelined) to avoid a trie DB race: + // if Prepare() fails and we fall back, the fallback path does an inline + // FinalizeAndAssemble which also commits to the trie. Having both an SRC + // goroutine AND an inline commit operating on the same parent root causes + // "missing trie node / layer stale" errors. + tmpBlock := types.NewBlockWithHeader(req.parentHeader) + w.chain.SpawnSRCGoroutine(tmpBlock, req.parentRoot, req.flatDiff) + + // --- Open speculative StateDB --- + specState, err := w.chain.StateAtWithFlatDiff(req.parentRoot, req.flatDiff) + if err != nil { + log.Error("Pipelined SRC: failed to open speculative state", "err", err) + w.fallbackToSequential(req) + return + } + specState.StartPrefetcher("miner-speculative", nil, nil) + + // --- Create speculative EVM with SpeculativeGetHashFn --- + blockN1Header := w.chain.GetHeader(blockNHeader.ParentHash, blockNNumber-1) + if blockN1Header == nil { + log.Error("Pipelined SRC: grandparent header not found") + w.fallbackToSequential(req) + return + } + + // srcDone is a lazy resolver for block N's hash, used by SpeculativeGetHashFn. + // Block N's hash isn't known until SRC completes (it depends on the state root). + // If a tx in the speculative block calls BLOCKHASH(N), SpeculativeGetHashFn + // calls srcDone() which blocks on WaitForSRC, resolves the hash, and sets the + // blockhashNAccessed flag to trigger an abort (since the pre-seal hash won't + // match the final on-chain hash). + var blockNHash common.Hash + var blockNHashResolved bool + var resolveMu sync.Mutex + + srcDone := func() common.Hash { + resolveMu.Lock() + defer resolveMu.Unlock() + if blockNHashResolved { + return blockNHash + } + root, _, err := w.chain.WaitForSRC() + if err != nil { + log.Error("Pipelined SRC: SRC failed during BLOCKHASH resolution", "err", err) + return common.Hash{} + } + finalHeader := types.CopyHeader(blockNHeader) + finalHeader.Root = root + finalHeader.UncleHash = types.CalcUncleHash(nil) + blockNHash = finalHeader.Hash() + blockNHashResolved = true + return blockNHash + } + + var blockhashNAccessed atomic.Bool + specGetHash := core.SpeculativeGetHashFn(blockN1Header, specContext, blockNNumber, srcDone, &blockhashNAccessed) + + evmContext := core.NewEVMBlockContext(specHeader, specContext, &coinbase) + evmContext.GetHash = specGetHash + + specEnv := &environment{ + signer: types.MakeSigner(w.chainConfig, specHeader.Number, specHeader.Time), + state: specState, + size: uint64(specHeader.Size()), + coinbase: coinbase, + header: specHeader, + evm: vm.NewEVM(evmContext, specState, w.chainConfig, vm.Config{}), + } + specEnv.evm.SetInterrupt(&w.interruptBlockBuilding) + specEnv.tcount = 0 + + // NOTE: ProcessParentBlockHash is NOT called during speculative execution. + // It will be called after block N is written and the real hash is known, + // before FinalizeAndAssemble for N+1. + + // --- Reset txpool state for speculative execution --- + specTxPoolState, err := w.chain.StateAtWithFlatDiff(req.parentRoot, req.flatDiff) + if err != nil { + log.Error("Pipelined SRC: failed to create txpool speculative state", "err", err) + } else { + w.eth.TxPool().ResetSpeculativeState(blockNHeader, specTxPoolState) + } + + // --- Fill transactions for N+1 --- + // Reset the block building interrupt flag — it may have been set by block N's + // timeout timer. If we don't clear it, fillTransactions → Pending() sees the + // flag and returns an empty map, resulting in txs=0. + w.interruptBlockBuilding.Store(false) + + var specInterrupt atomic.Int32 + w.fillTransactions(&specInterrupt, specEnv) //nolint:errcheck + + // --- Check abort conditions --- + eip2935Abort := false + if w.chainConfig.IsPrague(specHeader.Number) { + dangerousSlot := common.BigToHash(new(big.Int).SetUint64(blockNNumber % params.HistoryServeWindow)) + if specState.WasStorageSlotRead(params.HistoryStorageAddress, dangerousSlot) { + log.Warn("Pipelined SRC: discarding speculative N+1 — EIP-2935 slot accessed", + "block", nextBlockNumber, "slot", dangerousSlot) + eip2935Abort = true + pipelineEIP2935AbortsCounter.Inc(1) + } + } + + // --- Wait for SRC(N) to complete --- + srcStart := time.Now() + root, witnessN, err := w.chain.WaitForSRC() + pipelineSRCTimer.Update(time.Since(srcStart)) + if err != nil { + log.Error("Pipelined SRC: SRC(N) failed", "block", blockNNumber, "err", err) + pipelineSpeculativeAbortsCounter.Inc(1) + return + } + + // --- Assemble and seal block N --- + borEngine, _ := w.engine.(*bor.Bor) + + finalHeaderN := types.CopyHeader(blockNHeader) + finalHeaderN.Root = root + blockN, receiptsN, err := borEngine.AssembleBlock(w.chain, finalHeaderN, req.blockNEnv.state, &types.Body{ + Transactions: req.blockNEnv.txs, + }, req.blockNEnv.receipts, root, req.stateSyncData) + if err != nil { + log.Error("Pipelined SRC: AssembleBlock(N) failed", "err", err) + return + } + + // Block N uses the pipelined write path to avoid a double CommitWithUpdate + // from the same parent root (one from the SRC goroutine, one from the normal + // writeBlockWithState). The SRC goroutine's witness is complete. + select { + case w.taskCh <- &task{receipts: receiptsN, state: req.blockNEnv.state, block: blockN, createdAt: time.Now(), pipelined: true, witnessBytes: witnessN}: + if w.config.PipelinedSRCLogs { + log.Info("Pipelined SRC: block N sent for sealing", "number", blockN.Number(), "txs", len(blockN.Transactions()), "root", root) + } + case <-w.exitCh: + return + } + + // Wait for block N to be written to the chain before sending N+1. + blockNNum := blockN.NumberU64() + waitDeadline := time.After(30 * time.Second) + for { + if current := w.chain.CurrentBlock(); current != nil && current.Number.Uint64() >= blockNNum { + break + } + select { + case <-time.After(50 * time.Millisecond): + case <-waitDeadline: + log.Error("Pipelined SRC: timed out waiting for block N to be written", "number", blockNNum) + return + case <-w.exitCh: + return + } + } + + // Get the REAL block N hash from the chain — this is the signed hash + // written by resultLoop after Seal() modified header.Extra. + chainHead := w.chain.CurrentBlock() + if chainHead == nil || chainHead.Number.Uint64() != blockNNum { + log.Error("Pipelined SRC: chain head mismatch after waiting", "expected", blockNNum, + "got", chainHead.Number.Uint64()) + return + } + realBlockNHash := chainHead.Hash() + rootN := root // state root of the last written block + + // --- CONTINUOUS PIPELINE LOOP --- + // State at this point: + // - Block N is written to chain, realBlockNHash is known + // - Speculative execution of N+1 is complete (specHeader, specState, specEnv) + // - rootN is block N's committed state root + // - eip2935Abort and blockhashNAccessed track N+1's abort conditions + curBlockhashAccessed := &blockhashNAccessed + + for { + // --- Check abort conditions for current speculative block --- + aborted := false + if eip2935Abort { + log.Warn("Pipelined SRC: discarding speculative block — EIP-2935 slot accessed", + "block", nextBlockNumber) + pipelineSpeculativeAbortsCounter.Inc(1) + aborted = true + } + if !aborted && curBlockhashAccessed.Load() { + log.Warn("Pipelined SRC: discarding speculative block — BLOCKHASH(N) was accessed", + "block", nextBlockNumber, "pendingBlockN", blockNNumber) + pipelineSpeculativeAbortsCounter.Inc(1) + aborted = true + } + if aborted { + // Trigger commitWork immediately after we return, rather than + // waiting for the veblopTimer (~1 block period). Without this, + // the delayed commitWork → Prepare() sees the target time as + // already passed and the minBlockBuildTime check pushes the + // timestamp forward by an extra block period. + // + // The goroutine sends to newWorkCh after a small delay to let + // commitSpeculativeWork return and mainLoop re-enter its select. + // + // Known limitation: on chains where blockTime == minBlockBuildTime + // (e.g., 1-second devnets), Prepare() always pushes the timestamp + // because the remaining time (~990ms) is less than minBlockBuildTime + // (1s). This adds an extra 1s gap after every abort. On mainnet + // (2s blocks), the remaining ~1.99s exceeds minBlockBuildTime, so + // blocks stay on schedule. + go func() { + time.Sleep(10 * time.Millisecond) + select { + case w.newWorkCh <- &newWorkReq{timestamp: time.Now().Unix()}: + case <-w.exitCh: + } + }() + break + } + + // --- Finalize current speculative block --- + finalSpecHeader := types.CopyHeader(specHeader) + finalSpecHeader.ParentHash = realBlockNHash + + if w.chainConfig.IsPrague(finalSpecHeader.Number) { + evmCtx := core.NewEVMBlockContext(finalSpecHeader, w.chain, &coinbase) + vmenv := vm.NewEVM(evmCtx, specState, w.chainConfig, vm.Config{}) + core.ProcessParentBlockHash(realBlockNHash, vmenv) + } + + specStateSyncData, err := borEngine.FinalizeForPipeline(w.chain, finalSpecHeader, specState, &types.Body{ + Transactions: specEnv.txs, + }, specEnv.receipts) + if err != nil { + log.Error("Pipelined SRC: FinalizeForPipeline failed", "block", nextBlockNumber, "err", err) + break + } + + flatDiff := specState.CommitSnapshot(w.chainConfig.IsEIP158(finalSpecHeader.Number)) + + // --- Check if we can continue the pipeline for the next block --- + nextNextBlockNumber := nextBlockNumber + 1 + if !w.isPipelineEligible(nextBlockNumber) || !w.IsRunning() { + // Last block in the pipeline — seal synchronously via taskCh so that + // resultLoop emits ChainHeadEvent and normal block production resumes. + w.sealBlockViaTaskCh(borEngine, finalSpecHeader, specState, specEnv.txs, + specEnv.receipts, specStateSyncData, rootN, flatDiff, true) + break + } + + // --- Build speculative environment for the NEXT block (N+2) --- + placeholderNext := placeholderParentHash(nextBlockNumber) + specReaderNext := newSpeculativeChainReader(w.chain, finalSpecHeader, placeholderNext) + specContextNext := newSpeculativeChainContext(specReaderNext, w.engine) + + var coinbaseNext common.Address + if w.chainConfig.Bor != nil && w.chainConfig.Bor.IsRio(new(big.Int).SetUint64(nextNextBlockNumber)) { + coinbaseNext = common.HexToAddress(w.chainConfig.Bor.CalculateCoinbase(nextNextBlockNumber)) + } + if coinbaseNext == (common.Address{}) { + coinbaseNext = w.etherbase() + } + + specHeaderNext := &types.Header{ + ParentHash: placeholderNext, + Number: new(big.Int).SetUint64(nextNextBlockNumber), + GasLimit: core.CalcGasLimit(finalSpecHeader.GasLimit, w.config.GasCeil), + Time: finalSpecHeader.Time + w.chainConfig.Bor.CalculatePeriod(nextNextBlockNumber), + Coinbase: coinbaseNext, + } + if w.chainConfig.IsLondon(specHeaderNext.Number) { + specHeaderNext.BaseFee = eip1559.CalcBaseFee(w.chainConfig, finalSpecHeader) + } + + // Prepare() with waitOnPrepare=false — sets header fields without sleeping. + // The timing wait is deferred to just before sealing, after the abort check. + // This avoids wasting a full block period if the speculative block is aborted. + if err := w.engine.Prepare(specReaderNext, specHeaderNext, false); err != nil { + log.Warn("Pipelined SRC: Prepare failed for next block, sealing current", + "block", nextNextBlockNumber, "err", err) + w.sealBlockViaTaskCh(borEngine, finalSpecHeader, specState, specEnv.txs, + specEnv.receipts, specStateSyncData, rootN, flatDiff, true) + break + } + + // --- Spawn SRC for current speculative block (overlaps with next block's execution) --- + srcSpawnTime := time.Now() + tmpBlockCur := types.NewBlockWithHeader(finalSpecHeader) + w.chain.SpawnSRCGoroutine(tmpBlockCur, rootN, flatDiff) + w.chain.SetLastFlatDiff(flatDiff, finalSpecHeader.Hash()) + if w.config.PipelinedSRCLogs { + log.Info("Pipelined SRC: spawned SRC, starting speculative exec", + "srcBlock", nextBlockNumber, "specExecBlock", nextNextBlockNumber) + } + + // --- Open speculative state for next block --- + specStateNext, err := w.chain.StateAtWithFlatDiff(rootN, flatDiff) + if err != nil { + log.Error("Pipelined SRC: failed to open speculative state for next block", + "block", nextNextBlockNumber, "err", err) + // SRC is already running — wait for it and seal current block + w.sealBlockViaTaskCh(borEngine, finalSpecHeader, specState, specEnv.txs, + specEnv.receipts, specStateSyncData, rootN, flatDiff, false) + break + } + specStateNext.StartPrefetcher("miner-speculative", nil, nil) + + // --- Build SpeculativeGetHashFn for next block --- + grandparentHeader := w.chain.GetHeaderByNumber(blockNNumber) + if grandparentHeader == nil { + log.Error("Pipelined SRC: grandparent header not found for next block", "number", blockNNumber) + w.sealBlockViaTaskCh(borEngine, finalSpecHeader, specState, specEnv.txs, + specEnv.receipts, specStateSyncData, rootN, flatDiff, false) + break + } + + var nextBlockHash common.Hash + var nextBlockHashResolved bool + var nextResolveMu sync.Mutex + + srcDoneNext := func() common.Hash { + nextResolveMu.Lock() + defer nextResolveMu.Unlock() + if nextBlockHashResolved { + return nextBlockHash + } + rootSpec, _, err := w.chain.WaitForSRC() + if err != nil { + log.Error("Pipelined SRC: SRC failed during BLOCKHASH resolution", "err", err) + return common.Hash{} + } + finalH := types.CopyHeader(finalSpecHeader) + finalH.Root = rootSpec + finalH.UncleHash = types.CalcUncleHash(nil) + nextBlockHash = finalH.Hash() + nextBlockHashResolved = true + return nextBlockHash + } + + nextBlockhashAccessed := new(atomic.Bool) + specGetHashNext := core.SpeculativeGetHashFn(grandparentHeader, specContextNext, nextBlockNumber, srcDoneNext, nextBlockhashAccessed) + + evmContextNext := core.NewEVMBlockContext(specHeaderNext, specContextNext, &coinbaseNext) + evmContextNext.GetHash = specGetHashNext + + specEnvNext := &environment{ + signer: types.MakeSigner(w.chainConfig, specHeaderNext.Number, specHeaderNext.Time), + state: specStateNext, + size: uint64(specHeaderNext.Size()), + coinbase: coinbaseNext, + header: specHeaderNext, + evm: vm.NewEVM(evmContextNext, specStateNext, w.chainConfig, vm.Config{}), + } + specEnvNext.evm.SetInterrupt(&w.interruptBlockBuilding) + specEnvNext.tcount = 0 + + // --- Reset txpool and fill transactions for next block --- + specTxPoolStateNext, err := w.chain.StateAtWithFlatDiff(rootN, flatDiff) + if err != nil { + log.Error("Pipelined SRC: failed to create txpool state for next block", "err", err) + } else { + w.eth.TxPool().ResetSpeculativeState(finalSpecHeader, specTxPoolStateNext) + } + + w.interruptBlockBuilding.Store(false) + var specInterruptNext atomic.Int32 + fillStart := time.Now() + w.fillTransactions(&specInterruptNext, specEnvNext) //nolint:errcheck + execElapsed := time.Since(fillStart) + + // --- Check EIP-2935 abort for next block --- + nextEIP2935Abort := false + if w.chainConfig.IsPrague(specHeaderNext.Number) { + dangerousSlot := common.BigToHash(new(big.Int).SetUint64(nextBlockNumber % params.HistoryServeWindow)) + if specStateNext.WasStorageSlotRead(params.HistoryStorageAddress, dangerousSlot) { + log.Warn("Pipelined SRC: EIP-2935 slot accessed in next block", + "block", nextNextBlockNumber, "slot", dangerousSlot) + nextEIP2935Abort = true + pipelineEIP2935AbortsCounter.Inc(1) + } + } + + // --- Wait for SRC of current speculative block --- + srcWaitStart := time.Now() + rootSpec, witnessSpec, err := w.chain.WaitForSRC() + srcWaitElapsed := time.Since(srcWaitStart) + srcTotalElapsed := time.Since(srcSpawnTime) + pipelineSRCTimer.Update(srcTotalElapsed) + if err != nil { + log.Error("Pipelined SRC: SRC failed", "block", nextBlockNumber, "err", err) + pipelineSpeculativeAbortsCounter.Inc(1) + break + } + if w.config.PipelinedSRCLogs { + log.Info("Pipelined SRC: SRC completed", + "block", nextBlockNumber, "srcTotal", srcTotalElapsed, + "srcWait", srcWaitElapsed, "execOverlap", execElapsed) + } + + // --- Assemble current speculative block --- + blockSpec, receiptsSpec, err := borEngine.AssembleBlock(w.chain, finalSpecHeader, specState, &types.Body{ + Transactions: specEnv.txs, + }, specEnv.receipts, rootSpec, specStateSyncData) + if err != nil { + log.Error("Pipelined SRC: AssembleBlock failed", "block", nextBlockNumber, "err", err) + break + } + + // Update pendingWorkBlock BEFORE inline write so that newWorkLoop skips + // the ChainHeadEvent for this block. pendingWorkBlock = nextBlockNumber + 1 + // means "we're working on nextBlockNumber+1, so skip ChainHeadEvent for nextBlockNumber". + w.pendingWorkBlock.Store(nextBlockNumber + 1) + + // --- Wait for the block's target timestamp before sealing --- + // Since Prepare() was called without sleeping, we wait here instead. + // This is AFTER the abort check — if the block was aborted, we skip + // this wait entirely (zero wasted time). + if delay := time.Until(finalSpecHeader.GetActualTime()); delay > 0 { + select { + case <-time.After(delay): + case <-w.exitCh: + return // defer clears pendingWorkBlock + } + } + + // --- Inline seal + write (bypass taskLoop/resultLoop) --- + // Uses emitHeadEvent=false to avoid deadlock: mainLoop is blocked here, + // and chainHeadFeed.Send would block if newWorkLoop's channel fills up. + sealedBlock, err := w.inlineSealAndWrite(blockSpec, receiptsSpec, specState, witnessSpec) + if err != nil { + log.Error("Pipelined SRC: inline seal+write failed", "block", nextBlockNumber, "err", err) + break + } + pipelineSpeculativeBlocksCounter.Inc(1) + + if w.config.PipelinedSRCLogs { + log.Info("Pipelined SRC: block sealed (inline)", "number", sealedBlock.Number(), + "txs", len(sealedBlock.Transactions()), "root", rootSpec) + } + + // --- Shift variables for next iteration --- + blockNNumber = nextBlockNumber + nextBlockNumber = nextNextBlockNumber + rootN = rootSpec + realBlockNHash = sealedBlock.Hash() + specHeader = specHeaderNext + specState = specStateNext + specEnv = specEnvNext + coinbase = coinbaseNext + eip2935Abort = nextEIP2935Abort + curBlockhashAccessed = nextBlockhashAccessed + } +} + +// fallbackToSequential computes the state root inline and assembles block N +// without a background SRC goroutine. This avoids trie DB races between +// background and inline commits. +func (w *worker) fallbackToSequential(req *speculativeWorkReq) { + if w.config.PipelinedSRCLogs { + log.Info("Pipelined SRC: falling back to sequential execution") + } + pipelineSpeculativeAbortsCounter.Inc(1) + + borEngine, ok := w.engine.(*bor.Bor) + if !ok { + return + } + + root := req.blockNEnv.state.IntermediateRoot(w.chainConfig.IsEIP158(req.blockNEnv.header.Number)) + + block, receipts, err := borEngine.AssembleBlock(w.chain, req.blockNEnv.header, req.blockNEnv.state, &types.Body{ + Transactions: req.blockNEnv.txs, + }, req.blockNEnv.receipts, root, req.stateSyncData) + if err != nil { + log.Error("Pipelined SRC: AssembleBlock failed during fallback", "err", err) + return + } + + select { + case w.taskCh <- &task{receipts: receipts, state: req.blockNEnv.state, block: block, createdAt: time.Now()}: + if w.config.PipelinedSRCLogs { + log.Info("Pipelined SRC: fallback block sealed", "number", block.Number(), "root", root) + } + case <-w.exitCh: + } +} + +// sealBlockViaTaskCh spawns SRC (if needed), waits for the root, assembles the +// block, and sends it through the normal taskCh → taskLoop → Seal → resultLoop +// path. Used for the last block in a pipeline run so that resultLoop emits +// ChainHeadEvent and normal block production resumes immediately. +func (w *worker) sealBlockViaTaskCh( + borEngine *bor.Bor, + finalHeader *types.Header, + statedb *state.StateDB, + txs []*types.Transaction, + receipts []*types.Receipt, + stateSyncData []*types.StateSyncData, + rootN common.Hash, + flatDiff *state.FlatDiff, + spawnSRC bool, // false if SRC goroutine is already running +) { + if spawnSRC { + tmpBlock := types.NewBlockWithHeader(finalHeader) + w.chain.SpawnSRCGoroutine(tmpBlock, rootN, flatDiff) + w.chain.SetLastFlatDiff(flatDiff, finalHeader.Hash()) + } + pipelineSpeculativeBlocksCounter.Inc(1) + + rootSpec, witnessSpec, err := w.chain.WaitForSRC() + if err != nil { + log.Error("Pipelined SRC: SRC failed", "block", finalHeader.Number, "err", err) + return + } + + block, blockReceipts, err := borEngine.AssembleBlock(w.chain, finalHeader, statedb, &types.Body{ + Transactions: txs, + }, receipts, rootSpec, stateSyncData) + if err != nil { + log.Error("Pipelined SRC: AssembleBlock failed", "block", finalHeader.Number, "err", err) + return + } + + // Wait for the block's target timestamp before sending to taskCh. + // Since Prepare() was called without sleeping, we wait here instead. + if delay := time.Until(finalHeader.GetActualTime()); delay > 0 { + select { + case <-time.After(delay): + case <-w.exitCh: + return + } + } + + select { + case w.taskCh <- &task{receipts: blockReceipts, state: statedb, block: block, createdAt: time.Now(), pipelined: true, witnessBytes: witnessSpec}: + if w.config.PipelinedSRCLogs { + log.Info("Pipelined SRC: block sealed", "number", block.Number(), + "txs", len(block.Transactions()), "root", rootSpec) + } + case <-w.exitCh: + } +} + +// inlineSealAndWrite seals a pipelined block using a private channel (bypassing +// taskLoop/resultLoop) and writes it directly to the chain. This avoids the +// race condition where rapid submissions to the unbuffered taskCh cause delays +// and duplicate blocks. +// +// Uses emitHeadEvent=false to avoid a deadlock: mainLoop is blocked in +// commitSpeculativeWork, so chainHeadFeed.Send would eventually block when +// newWorkLoop's channel fills up. +func (w *worker) inlineSealAndWrite(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB, witnessBytes []byte) (*types.Block, error) { + // Seal the block via a private channel — reuses Seal() without contention + // on the shared w.resultCh. For primary producers on Bhilai+, delay=0. + sealCh := make(chan *consensus.NewSealedBlockEvent, 1) + stopCh := make(chan struct{}) + + if err := w.engine.Seal(w.chain, block, nil, sealCh, stopCh); err != nil { + return nil, fmt.Errorf("seal failed: %w", err) + } + + var sealedBlock *types.Block + select { + case ev := <-sealCh: + if ev == nil || ev.Block == nil { + return nil, errors.New("nil sealed block from Seal") + } + sealedBlock = ev.Block + case <-time.After(5 * time.Second): + close(stopCh) + return nil, errors.New("inline seal timed out") + case <-w.exitCh: + close(stopCh) + return nil, errors.New("worker stopped during inline seal") + } + + hash := sealedBlock.Hash() + + // Fix up receipt block hashes (same as resultLoop) + sealedReceipts := make([]*types.Receipt, len(receipts)) + var logs []*types.Log + + for i, r := range receipts { + receipt := new(types.Receipt) + sealedReceipts[i] = receipt + *receipt = *r + + receipt.BlockHash = hash + receipt.BlockNumber = sealedBlock.Number() + receipt.TransactionIndex = uint(i) + + receipt.Logs = make([]*types.Log, len(r.Logs)) + for j, l := range r.Logs { + logCopy := new(types.Log) + receipt.Logs[j] = logCopy + *logCopy = *l + logCopy.BlockHash = hash + } + + logs = append(logs, receipt.Logs...) + } + + // Write to chain WITHOUT emitting ChainHeadEvent (emitHeadEvent=false). + _, err := w.chain.WriteBlockAndSetHeadPipelined(sealedBlock, sealedReceipts, logs, statedb, false, witnessBytes) + if err != nil { + return nil, fmt.Errorf("write to chain failed: %w", err) + } + + log.Info("Successfully sealed new block", "number", sealedBlock.Number(), + "sealhash", w.engine.SealHash(sealedBlock.Header()), "hash", hash, + "elapsed", "inline") + + // Broadcast the block to peers + w.mux.Post(core.NewMinedBlockEvent{Block: sealedBlock, SealedAt: time.Now()}) + + sealedBlocksCounter.Inc(1) + if sealedBlock.Transactions().Len() == 0 { + sealedEmptyBlocksCounter.Inc(1) + } + w.clearPending(sealedBlock.NumberU64()) + + return sealedBlock, nil +} diff --git a/miner/speculative_chain_reader.go b/miner/speculative_chain_reader.go new file mode 100644 index 0000000000..5bf05199d2 --- /dev/null +++ b/miner/speculative_chain_reader.go @@ -0,0 +1,115 @@ +package miner + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" +) + +// speculativeChainReader wraps a real ChainHeaderReader and intercepts +// hash-based lookups for a pending block whose hash is not yet known +// (because its state root is still being computed by the SRC goroutine). +// +// During pipelined SRC, block N+1's Prepare() needs to look up block N's +// header — but block N hasn't been written to the chain DB yet. The wrapper +// maps a deterministic placeholder hash to block N's provisional header +// (complete except for Root), allowing Prepare() and snapshot walks to proceed. +// +// The snapshot walk (bor.go:686) starts from header.ParentHash. For the +// speculative header, that's the placeholder hash. The wrapper returns +// pendingParentHeader for that lookup. Subsequent walk steps use +// pendingParentHeader.ParentHash (= hash(block_{N-1})), which is in the +// real chain DB, so the walk continues normally. +type speculativeChainReader struct { + inner consensus.ChainHeaderReader + pendingParentHeader *types.Header // block N's header (complete except Root) + placeholderHash common.Hash // the placeholder used as block N+1's ParentHash +} + +// newSpeculativeChainReader creates a wrapper that intercepts lookups for +// the pending parent block. +// +// pendingParentHeader must have all fields set except Root. The caller must +// ensure that pendingParentHeader.ParentHash points to a block that IS in +// the chain DB (block N-1). +// +// placeholderHash is a deterministic sentinel used as ParentHash in the +// speculative block N+1 header. It must NOT collide with any real block hash. +func newSpeculativeChainReader( + inner consensus.ChainHeaderReader, + pendingParentHeader *types.Header, + placeholderHash common.Hash, +) *speculativeChainReader { + return &speculativeChainReader{ + inner: inner, + pendingParentHeader: pendingParentHeader, + placeholderHash: placeholderHash, + } +} + +func (s *speculativeChainReader) Config() *params.ChainConfig { + return s.inner.Config() +} + +func (s *speculativeChainReader) CurrentHeader() *types.Header { + return s.inner.CurrentHeader() +} + +func (s *speculativeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { + if hash == s.placeholderHash && number == s.pendingParentHeader.Number.Uint64() { + return s.pendingParentHeader + } + return s.inner.GetHeader(hash, number) +} + +func (s *speculativeChainReader) GetHeaderByNumber(number uint64) *types.Header { + if number == s.pendingParentHeader.Number.Uint64() { + return s.pendingParentHeader + } + return s.inner.GetHeaderByNumber(number) +} + +func (s *speculativeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { + if hash == s.placeholderHash { + return s.pendingParentHeader + } + return s.inner.GetHeaderByHash(hash) +} + +func (s *speculativeChainReader) GetTd(hash common.Hash, number uint64) *big.Int { + if hash == s.placeholderHash && number == s.pendingParentHeader.Number.Uint64() { + // Return the parent's TD. This is an approximation — the real TD + // would include block N's difficulty, but Bor's Prepare() does not + // use TD from GetTd. Seal() uses it for broadcast, but that happens + // after the real header is assembled. + return s.inner.GetTd(s.pendingParentHeader.ParentHash, s.pendingParentHeader.Number.Uint64()-1) + } + return s.inner.GetTd(hash, number) +} + +// speculativeChainContext wraps speculativeChainReader and adds the Engine() +// method, satisfying core.ChainContext. This is needed because +// NewEVMBlockContext takes a ChainContext. +type speculativeChainContext struct { + *speculativeChainReader + engine consensus.Engine +} + +// newSpeculativeChainContext creates a ChainContext backed by the speculative +// reader and the given consensus engine. +func newSpeculativeChainContext( + reader *speculativeChainReader, + engine consensus.Engine, +) *speculativeChainContext { + return &speculativeChainContext{ + speculativeChainReader: reader, + engine: engine, + } +} + +func (s *speculativeChainContext) Engine() consensus.Engine { + return s.engine +} diff --git a/miner/speculative_chain_reader_test.go b/miner/speculative_chain_reader_test.go new file mode 100644 index 0000000000..1dd0a1cf8b --- /dev/null +++ b/miner/speculative_chain_reader_test.go @@ -0,0 +1,204 @@ +package miner + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" +) + +// mockChainHeaderReader implements consensus.ChainHeaderReader for testing. +type mockChainHeaderReader struct { + headers map[common.Hash]*types.Header + byNum map[uint64]*types.Header +} + +func newMockChainHeaderReader() *mockChainHeaderReader { + return &mockChainHeaderReader{ + headers: make(map[common.Hash]*types.Header), + byNum: make(map[uint64]*types.Header), + } +} + +func (m *mockChainHeaderReader) addHeader(h *types.Header) { + m.headers[h.Hash()] = h + m.byNum[h.Number.Uint64()] = h +} + +func (m *mockChainHeaderReader) Config() *params.ChainConfig { return params.TestChainConfig } +func (m *mockChainHeaderReader) CurrentHeader() *types.Header { return nil } +func (m *mockChainHeaderReader) GetTd(common.Hash, uint64) *big.Int { return big.NewInt(1) } + +func (m *mockChainHeaderReader) GetHeader(hash common.Hash, number uint64) *types.Header { + h, ok := m.headers[hash] + if ok && h.Number.Uint64() == number { + return h + } + return nil +} + +func (m *mockChainHeaderReader) GetHeaderByNumber(number uint64) *types.Header { + return m.byNum[number] +} + +func (m *mockChainHeaderReader) GetHeaderByHash(hash common.Hash) *types.Header { + return m.headers[hash] +} + +func TestSpeculativeChainReader_InterceptsPlaceholder(t *testing.T) { + inner := newMockChainHeaderReader() + + // Build a simple chain: block 8 (committed), block 9 (pending) + header8 := &types.Header{Number: big.NewInt(8), Extra: []byte("block8")} + inner.addHeader(header8) + + // Block 9 is pending — not in the chain DB + pendingHeader9 := &types.Header{ + Number: big.NewInt(9), + ParentHash: header8.Hash(), + Extra: []byte("block9-pending"), + } + + placeholder := common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + reader := newSpeculativeChainReader(inner, pendingHeader9, placeholder) + + // GetHeader with placeholder hash and number 9 should return pending header + got := reader.GetHeader(placeholder, 9) + if got == nil { + t.Fatal("GetHeader(placeholder, 9) returned nil") + } + if got.Number.Uint64() != 9 { + t.Errorf("expected block 9, got %d", got.Number.Uint64()) + } + if string(got.Extra) != "block9-pending" { + t.Errorf("expected pending header extra, got %s", string(got.Extra)) + } + + // GetHeaderByHash with placeholder should return pending header + got = reader.GetHeaderByHash(placeholder) + if got == nil { + t.Fatal("GetHeaderByHash(placeholder) returned nil") + } + if got.Number.Uint64() != 9 { + t.Errorf("expected block 9, got %d", got.Number.Uint64()) + } + + // GetHeaderByNumber(9) should return pending header + got = reader.GetHeaderByNumber(9) + if got == nil { + t.Fatal("GetHeaderByNumber(9) returned nil") + } + if string(got.Extra) != "block9-pending" { + t.Errorf("expected pending header, got %s", string(got.Extra)) + } +} + +func TestSpeculativeChainReader_DelegatesNonPlaceholder(t *testing.T) { + inner := newMockChainHeaderReader() + + header7 := &types.Header{Number: big.NewInt(7), Extra: []byte("block7")} + header8 := &types.Header{Number: big.NewInt(8), Extra: []byte("block8")} + inner.addHeader(header7) + inner.addHeader(header8) + + pendingHeader9 := &types.Header{ + Number: big.NewInt(9), + ParentHash: header8.Hash(), + } + + placeholder := common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + reader := newSpeculativeChainReader(inner, pendingHeader9, placeholder) + + // Looking up block 8 by its real hash should delegate to inner + got := reader.GetHeader(header8.Hash(), 8) + if got == nil { + t.Fatal("GetHeader(block8Hash, 8) returned nil") + } + if string(got.Extra) != "block8" { + t.Errorf("expected block8 header, got %s", string(got.Extra)) + } + + // GetHeaderByNumber(7) should delegate + got = reader.GetHeaderByNumber(7) + if got == nil { + t.Fatal("GetHeaderByNumber(7) returned nil") + } + if string(got.Extra) != "block7" { + t.Errorf("expected block7 header, got %s", string(got.Extra)) + } + + // Unknown hash should return nil + got = reader.GetHeader(common.HexToHash("0x1234"), 99) + if got != nil { + t.Error("expected nil for unknown hash") + } +} + +func TestSpeculativeChainReader_WalkThroughPending(t *testing.T) { + // Simulate the snapshot walk: start at pending block 9, walk to block 8 (in chain) + inner := newMockChainHeaderReader() + + header7 := &types.Header{Number: big.NewInt(7), Extra: []byte("block7")} + header8 := &types.Header{Number: big.NewInt(8), ParentHash: header7.Hash(), Extra: []byte("block8")} + inner.addHeader(header7) + inner.addHeader(header8) + + pendingHeader9 := &types.Header{ + Number: big.NewInt(9), + ParentHash: header8.Hash(), + Extra: []byte("block9-pending"), + } + + placeholder := common.HexToHash("0xdeadbeef00000000000000000000000000000000000000000000000000000000") + reader := newSpeculativeChainReader(inner, pendingHeader9, placeholder) + + // Step 1: look up block 9 via placeholder → returns pending header + h9 := reader.GetHeader(placeholder, 9) + if h9 == nil { + t.Fatal("step 1: pending header not found") + } + + // Step 2: walk to block 8 using h9.ParentHash (= header8.Hash(), a real hash) + h8 := reader.GetHeader(h9.ParentHash, 8) + if h8 == nil { + t.Fatal("step 2: block 8 not found via ParentHash walk") + } + if string(h8.Extra) != "block8" { + t.Errorf("step 2: expected block8, got %s", string(h8.Extra)) + } + + // Step 3: walk to block 7 using h8.ParentHash + h7 := reader.GetHeader(h8.ParentHash, 7) + if h7 == nil { + t.Fatal("step 3: block 7 not found via ParentHash walk") + } + if string(h7.Extra) != "block7" { + t.Errorf("step 3: expected block7, got %s", string(h7.Extra)) + } +} + +func TestSpeculativeChainReader_Config(t *testing.T) { + inner := newMockChainHeaderReader() + pendingHeader := &types.Header{Number: big.NewInt(5)} + reader := newSpeculativeChainReader(inner, pendingHeader, common.Hash{}) + + if reader.Config() != params.TestChainConfig { + t.Error("Config() should delegate to inner") + } +} + +func TestSpeculativeChainContext_Engine(t *testing.T) { + inner := newMockChainHeaderReader() + pendingHeader := &types.Header{Number: big.NewInt(5)} + reader := newSpeculativeChainReader(inner, pendingHeader, common.Hash{}) + + var mockEngine consensus.Engine // nil for testing + ctx := newSpeculativeChainContext(reader, mockEngine) + + if ctx.Engine() != mockEngine { + t.Error("Engine() should return the provided engine") + } +} diff --git a/miner/worker.go b/miner/worker.go index ece318d1d9..a46c716cda 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -263,6 +263,8 @@ type task struct { createdAt time.Time productionElapsed time.Duration // elapsed from after prepareWork to task submission (excludes sealing wait); used for workerMgaspsTimer and workerBlockExecutionTimer intermediateRootTime time.Duration // time spent in IntermediateRoot inside FinalizeAndAssemble; subtracted when computing workerBlockExecutionTimer + pipelined bool // If true, state was already committed by SRC goroutine — skip CommitWithUpdate in writeBlockWithState + witnessBytes []byte // RLP-encoded witness from SRC goroutine (for pipelined blocks) } // txFits reports whether the transaction fits into the block size limit. @@ -411,6 +413,9 @@ type worker struct { noempty atomic.Bool makeWitness bool + + // Pipelined SRC: speculative work channel for block N+1 execution + speculativeWorkCh chan *speculativeWorkReq } //nolint:staticcheck @@ -441,6 +446,7 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus blockTime: config.BlockTime, slowTxTracker: newSlowTxTopTracker(), makeWitness: makeWitness, + speculativeWorkCh: make(chan *speculativeWorkReq, 1), } worker.noempty.Store(true) // Subscribe for transaction insertion events (whether from network or resurrects) @@ -839,6 +845,9 @@ func (w *worker) mainLoop() { w.commitWork(req.interrupt, req.noempty, req.timestamp) } + case req := <-w.speculativeWorkCh: + w.commitSpeculativeWork(req) + case req := <-w.getWorkCh: req.result <- w.generateWork(req.params, false) @@ -1101,9 +1110,15 @@ func (w *worker) resultLoop() { } // Commit block and state to database. + // For pipelined blocks, state was already committed by the SRC goroutine — + // use WriteBlockAndSetHeadPipelined to skip the redundant CommitWithUpdate. writeStart := time.Now() - _, err = w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true) writeElapsed := time.Since(writeStart) + if task.pipelined { + _, err = w.chain.WriteBlockAndSetHeadPipelined(block, receipts, logs, task.state, true, task.witnessBytes) + } else { + _, err = w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true) + } writeBlockAndSetHeadTimer.Update(writeElapsed) if err != nil { @@ -1167,26 +1182,7 @@ func (w *worker) makeEnv(header *types.Header, coinbase common.Address, witness return nil, fmt.Errorf("parent block not found") } var err error - if w.chainConfig.Bor != nil && w.chainConfig.Bor.IsDelayedSRC(header.Number) { - // Under delayed SRC, the actual pre-state for executing block N is - // root_{N-1} = GetPostStateRoot(parent.ParentHash). - // G_{N-1} has already finished (it was the sync point during parent's - // validation), so this lookup is immediate — no blocking. - // G_N (computing root_N from FlatDiff_N) is still running concurrently. - // We open state at root_{N-1} + FlatDiff_N overlay, which gives a - // complete view of block N's post-execution state without waiting for G_N. - baseRoot := w.chain.GetPostStateRoot(parent.ParentHash) - if baseRoot == (common.Hash{}) { - return nil, fmt.Errorf("delayed state root unavailable for grandparent %s", parent.ParentHash) - } - flatDiff := w.chain.GetLastFlatDiff() - if flatDiff == nil { - return nil, fmt.Errorf("no flat diff available for delayed SRC block building") - } - state, err = w.chain.StateAtWithFlatDiff(baseRoot, flatDiff) - } else { - state, err = w.chain.StateAt(parent.Root) - } + state, err = w.chain.StateAt(parent.Root) if err != nil { return nil, err } @@ -1947,8 +1943,15 @@ func (w *worker) commitWork(interrupt *atomic.Int32, noempty bool, timestamp int } // Clear the pending work block number when commitWork completes (success or failure). + // If the pipelined path was taken, pendingWorkBlock was set to N+1 by + // buildAndCommitBlock — don't overwrite it back to 0 in that case. + currentBlockNum := w.chain.CurrentBlock().Number.Uint64() defer func() { - w.pendingWorkBlock.Store(0) + // Only clear if the pipeline didn't advance pendingWorkBlock beyond + // what commitWork originally set it to. + if w.pendingWorkBlock.Load() <= currentBlockNum+1 { + w.pendingWorkBlock.Store(0) + } }() // Set the coinbase if the worker is running or it's required @@ -1974,21 +1977,11 @@ func (w *worker) commitWork(interrupt *atomic.Int32, noempty bool, timestamp int timestamp: uint64(timestamp), coinbase: coinbase, parentHash: parent.Hash(), + statedb: state, prefetchReader: prefetchReader, processReader: processReader, prefetchedTxHashes: &sync.Map{}, } - // Default to state (correct for pre-fork and activation boundary). - // Under delayed SRC, parent.Root = root_{N-1} and misses block N's mutations; - // overlay flatDiff_N to get the correct pre-state when it is available. - genParams.statedb = state - if w.chainConfig.Bor != nil && w.chainConfig.Bor.IsDelayedSRC(new(big.Int).Add(parent.Number, big.NewInt(1))) { - if flatDiff := w.chain.GetLastFlatDiff(); flatDiff != nil { - if s, ferr := w.chain.StateAtWithFlatDiff(parent.Root, flatDiff); ferr == nil { - genParams.statedb = s - } - } - } var interruptPrefetch atomic.Bool newBlockNumber := new(big.Int).Add(parent.Number, common.Big1) @@ -2083,7 +2076,16 @@ func (w *worker) buildAndCommitBlock(interrupt *atomic.Int32, noempty bool, genP return } // Submit the generated block for consensus sealing. - _ = w.commit(work.copy(), w.fullTaskHook, true, start, genParams) + // If pipelining is eligible, use commitPipelined to overlap SRC with N+1 execution. + if w.isPipelineEligible(work.header.Number.Uint64()) { + // Set pendingWorkBlock to N+1 so that when ChainHeadEvent fires for + // block N, newWorkLoop's de-duplication check sees that N+1 is already + // being worked on and skips the redundant commitWork. + w.pendingWorkBlock.Store(work.header.Number.Uint64() + 1) + _ = w.commitPipelined(work, start) + } else { + _ = w.commit(work.copy(), w.fullTaskHook, true, start, genParams) + } // Swap out the old work with the new one, terminating any leftover // prefetcher processes in the mean time and starting a new one. diff --git a/params/config.go b/params/config.go index 9397f08afd..b03fdbd325 100644 --- a/params/config.go +++ b/params/config.go @@ -954,7 +954,6 @@ type BorConfig struct { LisovoBlock *big.Int `json:"lisovoBlock"` // Lisovo switch block (nil = no fork, 0 = already on lisovo) LisovoProBlock *big.Int `json:"lisovoProBlock"` // LisovoPro switch block (nil = no fork, 0 = already on lisovoPro) GiuglianoBlock *big.Int `json:"giuglianoBlock"` // Giugliano switch block (nil = no fork, 0 = already on giugliano) - DelayedSRCBlock *big.Int `json:"delayedSRCBlock"` // DelayedSRC switch block (nil = no fork, 0 = already on delayedSRC) } // String implements the stringer interface, returning the consensus engine details. @@ -1030,10 +1029,6 @@ func (c *BorConfig) IsGiugliano(number *big.Int) bool { return isBlockForked(c.GiuglianoBlock, number) } -func (c *BorConfig) IsDelayedSRC(number *big.Int) bool { - return isBlockForked(c.DelayedSRCBlock, number) -} - // GetTargetGasPercentage returns the target gas percentage for gas limit calculation. // After Lisovo hard fork, this value can be configured via CLI flags (stored in BorConfig at runtime). // It validates the configured value and falls back to defaults if invalid or nil. @@ -1880,7 +1875,6 @@ type Rules struct { IsMadhugiriPro bool IsLisovo bool IsLisovoPro bool - IsDelayedSRC bool } // Rules ensures c's ChainID is not nil. @@ -1916,6 +1910,5 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, _ uint64) Rules { IsMadhugiriPro: c.Bor != nil && c.Bor.IsMadhugiriPro(num), IsLisovo: c.Bor != nil && c.Bor.IsLisovo(num), IsLisovoPro: c.Bor != nil && c.Bor.IsLisovoPro(num), - IsDelayedSRC: c.Bor != nil && c.Bor.IsDelayedSRC(num), } } diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go index d91e4f267a..62279765eb 100644 --- a/tests/bor/bor_test.go +++ b/tests/bor/bor_test.go @@ -36,7 +36,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/stateless" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -2934,375 +2933,163 @@ func getMockedSpannerWithSpanRotation(t *testing.T, validator1, validator2 commo return spanner } -// TestDelayedStateRoot verifies the Delayed SRC protocol across the hard fork -// boundary. Before the fork, block[N].Header.Root is the actual post-execution -// state root of block N. After the fork, block[N].Header.Root stores the -// post-execution state root of block N-1 (the parent), computed concurrently -// by a background goroutine. -func TestDelayedStateRoot(t *testing.T) { +// TestPipelinedSRC_BasicBlockProduction verifies that a single miner with +// pipelined SRC enabled can produce multiple consecutive blocks correctly. +// This exercises the full pipeline: commitPipelined → FlatDiff extraction → +// background SRC goroutine → speculative N+1 execution → block assembly → seal. +func TestPipelinedSRC_BasicBlockProduction(t *testing.T) { t.Parallel() log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true))) + fdlimit.Raise(2048) - const delayedSRCBlock = 5 - - updateGenesis := func(gen *core.Genesis) { - gen.Config.Bor.DelayedSRCBlock = big.NewInt(delayedSRCBlock) - // Large sprint to avoid hitting sprint boundaries that invoke StateSyncEvents. - gen.Config.Bor.Sprint = map[string]uint64{"0": 64} + faucets := make([]*ecdsa.PrivateKey, 128) + for i := 0; i < len(faucets); i++ { + faucets[i], _ = crypto.GenerateKey() } - init := buildEthereumInstance(t, rawdb.NewMemoryDatabase(), updateGenesis) - chain := init.ethereum.BlockChain() - engine := init.ethereum.Engine() - _bor := engine.(*bor.Bor) - defer _bor.Close() - - span0 := createMockSpan(addr, chain.Config().ChainID.String()) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - h := createMockHeimdall(ctrl, &span0, &span0) - _bor.SetHeimdallClient(h) - - validators := borSpan.ConvertHeimdallValSetToBorValSet(span0.ValidatorSet).Validators - spanner := getMockedSpanner(t, validators) - _bor.SetSpanner(spanner) - - // Build and insert 7 blocks: blocks 1-4 are pre-fork, blocks 5-7 are post-fork. - // insertNewBlock calls t.Fatalf on error, so a ValidateState failure here means - // the goroutine computed the wrong root or the protocol invariant was violated. - const numBlocks = 7 - blocks := make([]*types.Block, numBlocks+1) - blocks[0] = init.genesis.ToBlock() - - for i := 1; i <= numBlocks; i++ { - blocks[i] = buildNextBlock(t, _bor, chain, blocks[i-1], nil, init.genesis.Config.Bor, nil, validators, false, nil, nil) - insertNewBlock(t, chain, blocks[i]) - } - - // Pre-fork invariant: GetPostStateRoot(block_N) == block_N.Header.Root, - // because header.Root IS the block's own post-execution state root before the fork. - for i := 1; i < delayedSRCBlock; i++ { - got := chain.GetPostStateRoot(blocks[i].Hash()) - require.NotEqual(t, common.Hash{}, got, "pre-fork block %d: delayed root should not be zero", i) - require.Equal(t, blocks[i].Header().Root, got, - "pre-fork block %d: GetPostStateRoot should match header.Root", i) - } - - // Post-fork invariant: block[N].Header.Root == GetPostStateRoot(block[N-1]). - // For N == delayedSRCBlock this also covers the activation boundary where block[N-1] - // is still pre-fork (its delayed root equals its own header.Root). - for i := delayedSRCBlock; i <= numBlocks; i++ { - parentDelayedRoot := chain.GetPostStateRoot(blocks[i-1].Hash()) - require.NotEqual(t, common.Hash{}, parentDelayedRoot, - "block %d parent: delayed root should not be zero", i) - require.Equal(t, parentDelayedRoot, blocks[i].Header().Root, - "post-fork block %d: header.Root should equal GetPostStateRoot(parent)", i) - } - - // The last inserted block's delayed state root is computed by a background goroutine - // and stored in pendingSRC (no child block has been inserted to carry it in its - // header.Root). GetPostStateRoot waits for that goroutine and returns - // its result directly. - lastRoot := chain.GetPostStateRoot(blocks[numBlocks].Hash()) - require.NotEqual(t, common.Hash{}, lastRoot, - "last post-fork block: delayed root from in-flight goroutine should not be zero") -} - -// TestDelayedStateRootImport extends TestDelayedStateRoot to verify that the -// stateless witness for each post-fork block is correctly built and persisted -// by the background SRC goroutine. After block[N+1] is inserted, G_N has -// finished (ValidateState(N+1) is the sync point inside processBlock), so the -// witness for block N must already be in the database. -func TestDelayedStateRootImport(t *testing.T) { - t.Parallel() - log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true))) + genesis := InitGenesis(t, faucets, "./testdata/genesis_2val.json", 16) + genesis.Config.Bor.Period = map[string]uint64{"0": 2} + genesis.Config.Bor.Sprint = map[string]uint64{"0": 16} + genesis.Config.Bor.RioBlock = big.NewInt(0) // Enable Rio so snapshot uses spanByBlockNumber (no ecrecover needed) - const delayedSRCBlock = 5 + // Start a single miner with pipelined SRC enabled + stack, ethBackend, err := InitMinerWithPipelinedSRC(genesis, keys[0], true) + require.NoError(t, err) + defer stack.Close() - updateGenesis := func(gen *core.Genesis) { - gen.Config.Bor.DelayedSRCBlock = big.NewInt(delayedSRCBlock) - gen.Config.Bor.Sprint = map[string]uint64{"0": 64} + for stack.Server().NodeInfo().Ports.Listener == 0 { + time.Sleep(250 * time.Millisecond) } - init := buildEthereumInstance(t, rawdb.NewMemoryDatabase(), updateGenesis) - chain := init.ethereum.BlockChain() - engine := init.ethereum.Engine() - _bor := engine.(*bor.Bor) - defer _bor.Close() - - span0 := createMockSpan(addr, chain.Config().ChainID.String()) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - h := createMockHeimdall(ctrl, &span0, &span0) - _bor.SetHeimdallClient(h) - - validators := borSpan.ConvertHeimdallValSetToBorValSet(span0.ValidatorSet).Validators - spanner := getMockedSpanner(t, validators) - _bor.SetSpanner(spanner) + // Start mining + err = ethBackend.StartMining() + require.NoError(t, err) - // Build and insert 9 blocks: blocks 1-4 are pre-fork, blocks 5-9 are post-fork. - const numBlocks = 9 - blocks := make([]*types.Block, numBlocks+1) - blocks[0] = init.genesis.ToBlock() - - for i := 1; i <= numBlocks; i++ { - blocks[i] = buildNextBlock(t, _bor, chain, blocks[i-1], nil, init.genesis.Config.Bor, nil, validators, false, nil, nil) - insertNewBlock(t, chain, blocks[i]) - - // After inserting block[i], the sync point inside processBlock has already - // waited for G_{i-1} to finish. Therefore the witness for block[i-1] must - // be in the database — but only for post-fork blocks (i-1 >= delayedSRCBlock). - if i > delayedSRCBlock { - prevHash := blocks[i-1].Hash() - witnessBytes := chain.GetWitness(prevHash) - require.NotNil(t, witnessBytes, - "witness for block %d should be in DB after inserting block %d", i-1, i) - - w, err := stateless.GetWitnessFromRlp(witnessBytes) - require.NoError(t, err, "witness for block %d: RLP decode failed", i-1) - - // Under delayed SRC the goroutine embeds parentRoot (= root_{i-2}) as - // w.Header().Root. block[i-1].Header().Root is also root_{i-2} by the - // protocol invariant (post-fork header stores parent's actual state root). - require.Equal(t, blocks[i-1].Header().Root, w.Header().Root, - "block %d witness: Header.Root should equal block's header.Root (pre-state root)", i-1) + // Wait for the miner to produce at least 10 blocks + targetBlock := uint64(10) + deadline := time.After(60 * time.Second) + for { + select { + case <-deadline: + currentNum := ethBackend.BlockChain().CurrentBlock().Number.Uint64() + t.Fatalf("Timed out waiting for block %d, current block: %d", targetBlock, currentNum) + default: + time.Sleep(500 * time.Millisecond) + if ethBackend.BlockChain().CurrentBlock().Number.Uint64() >= targetBlock { + goto done + } } } +done: - // Wait for G_{numBlocks} (the last goroutine) to finish. - lastRoot := chain.GetPostStateRoot(blocks[numBlocks].Hash()) - require.NotEqual(t, common.Hash{}, lastRoot, - "last post-fork block: delayed root from in-flight goroutine should not be zero") - - // With G_{numBlocks} done, the witness for the last block is now in the database. - lastWitnessBytes := chain.GetWitness(blocks[numBlocks].Hash()) - require.NotNil(t, lastWitnessBytes, - "witness for last block should be in DB after goroutine completes") + chain := ethBackend.BlockChain() + currentNum := chain.CurrentBlock().Number.Uint64() + t.Logf("Miner produced %d blocks with pipelined SRC", currentNum) + + // Verify chain integrity: each block's parent hash matches the previous block's hash + for i := uint64(1); i <= currentNum; i++ { + block := chain.GetBlockByNumber(i) + require.NotNil(t, block, "block %d not found", i) + + if i > 0 { + parent := chain.GetBlockByNumber(i - 1) + require.NotNil(t, parent, "parent block %d not found", i-1) + require.Equal(t, parent.Hash(), block.ParentHash(), + "block %d ParentHash mismatch: expected %x, got %x", i, parent.Hash(), block.ParentHash()) + } - lastWitness, err := stateless.GetWitnessFromRlp(lastWitnessBytes) - require.NoError(t, err, "witness for last block: RLP decode failed") - require.Equal(t, blocks[numBlocks].Header().Root, lastWitness.Header().Root, - "last block witness: Header.Root should equal block's header.Root") + // Verify state root is valid (can open state at this root) + _, err := chain.StateAt(block.Root()) + require.NoError(t, err, "cannot open state at block %d root %x", i, block.Root()) + } } -// TestDelayedStateRootMiner verifies the Delayed SRC protocol on the block -// production (miner) path. writeBlockAndSetHead defers CommitWithUpdate to a -// background goroutine and stores the resulting FlatDiff so the miner can open -// the next block's state immediately via NewWithFlatBase without waiting for -// the goroutine to commit the trie. -func TestDelayedStateRootMiner(t *testing.T) { +// TestPipelinedSRC_WithTransactions verifies that the pipelined SRC miner +// correctly includes transactions in blocks. +func TestPipelinedSRC_WithTransactions(t *testing.T) { t.Parallel() log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true))) + fdlimit.Raise(2048) - const delayedSRCBlock = 3 - const targetBlock = 7 + faucets := make([]*ecdsa.PrivateKey, 128) + for i := 0; i < len(faucets); i++ { + faucets[i], _ = crypto.GenerateKey() + } - // Build a genesis with DelayedSRCBlock=3 and a large sprint to avoid - // hitting sprint boundaries that trigger Heimdall StateSyncEvents calls. - genesis := InitGenesis(t, nil, "./testdata/genesis.json", 64) - genesis.Config.Bor.DelayedSRCBlock = big.NewInt(delayedSRCBlock) + genesis := InitGenesis(t, faucets, "./testdata/genesis_2val.json", 16) + genesis.Config.Bor.Period = map[string]uint64{"0": 2} + genesis.Config.Bor.Sprint = map[string]uint64{"0": 16} + genesis.Config.Bor.RioBlock = big.NewInt(0) // Enable Rio for pipelined SRC - stack, ethBackend, err := InitMiner(genesis, key, true) + stack, ethBackend, err := InitMinerWithPipelinedSRC(genesis, keys[0], true) require.NoError(t, err) defer stack.Close() - chain := ethBackend.BlockChain() - - // Subscribe to both feeds before mining starts so we don't miss any events. - headCh := make(chan core.ChainHeadEvent, 20) - headSub := chain.SubscribeChainHeadEvent(headCh) - defer headSub.Unsubscribe() + for stack.Server().NodeInfo().Ports.Listener == 0 { + time.Sleep(250 * time.Millisecond) + } - witnessCh := make(chan core.WitnessReadyEvent, 20) - witnessSub := chain.SubscribeWitnessReadyEvent(witnessCh) - defer witnessSub.Unsubscribe() + err = ethBackend.StartMining() + require.NoError(t, err) - require.NoError(t, ethBackend.StartMining()) + // Wait for a few blocks first + for ethBackend.BlockChain().CurrentBlock().Number.Uint64() < 2 { + time.Sleep(500 * time.Millisecond) + } - // Collect ChainHeadEvents until we reach targetBlock; also drain - // WitnessReadyEvents that arrive concurrently. - witnessByBlock := make(map[uint64]*stateless.Witness) + // Submit transactions + txpool := ethBackend.TxPool() + senderKey := pkey1 + recipientAddr := crypto.PubkeyToAddress(pkey2.PublicKey) + signer := types.LatestSignerForChainID(genesis.Config.ChainID) - timeout := time.After(120 * time.Second) -collectLoop: - for { - select { - case ev := <-headCh: - if ev.Header.Number.Uint64() >= targetBlock { - break collectLoop - } - case ev := <-witnessCh: - witnessByBlock[ev.Block.NumberU64()] = ev.Witness - case <-timeout: - t.Fatal("timeout waiting for miner to produce blocks") - } + nonce := txpool.Nonce(crypto.PubkeyToAddress(senderKey.PublicKey)) + txCount := 10 + + for i := 0; i < txCount; i++ { + tx := types.NewTransaction( + nonce+uint64(i), + recipientAddr, + big.NewInt(1000), + 21000, + big.NewInt(30000000000), + nil, + ) + signedTx, err := types.SignTx(tx, signer, senderKey) + require.NoError(t, err) + errs := txpool.Add([]*types.Transaction{signedTx}, true) + require.Nil(t, errs[0], "failed to add tx %d", i) } - // Drain any events already queued in the channels (non-blocking). -drainLoop: + // Wait for transactions to be included in blocks + deadline := time.After(60 * time.Second) for { select { - case <-headCh: - case ev := <-witnessCh: - witnessByBlock[ev.Block.NumberU64()] = ev.Witness + case <-deadline: + t.Fatal("Timed out waiting for transactions to be included") default: - break drainLoop + time.Sleep(500 * time.Millisecond) + // Check if all transactions have been mined + currentNonce := txpool.Nonce(crypto.PubkeyToAddress(senderKey.PublicKey)) + if currentNonce >= nonce+uint64(txCount) { + goto txsDone + } } } +txsDone: - // Wait briefly for witnesses that G_N fires slightly after the corresponding - // ChainHeadEvent (the goroutine for block N finishes before ChainHeadEvent for - // block N+1, so witnesses for blocks < targetBlock should already be queued). - witnessTimer := time.NewTimer(5 * time.Second) - defer witnessTimer.Stop() -waitWitness: - for { - select { - case ev := <-witnessCh: - witnessByBlock[ev.Block.NumberU64()] = ev.Witness - case <-witnessTimer.C: - break waitWitness + chain := ethBackend.BlockChain() + currentNum := chain.CurrentBlock().Number.Uint64() + t.Logf("All %d transactions included by block %d", txCount, currentNum) + + // Verify we can find the transactions in the blocks + totalTxs := 0 + for i := uint64(1); i <= currentNum; i++ { + block := chain.GetBlockByNumber(i) + if block != nil { + totalTxs += len(block.Transactions()) } } - - // Pre-fork invariant: GetPostStateRoot(block_N) == block_N.Header.Root. - for i := uint64(1); i < delayedSRCBlock; i++ { - h := chain.GetHeaderByNumber(i) - require.NotNil(t, h, "pre-fork block %d not found", i) - got := chain.GetPostStateRoot(h.Hash()) - require.NotEqual(t, common.Hash{}, got, "pre-fork block %d: delayed root should not be zero", i) - require.Equal(t, h.Root, got, "pre-fork block %d: delayed root should equal header.Root", i) - } - - // Post-fork header root invariant: block[N].Root == GetPostStateRoot(block[N-1]). - for i := uint64(delayedSRCBlock); i <= targetBlock; i++ { - h := chain.GetHeaderByNumber(i) - require.NotNil(t, h, "post-fork block %d not found", i) - ph := chain.GetHeaderByNumber(i - 1) - require.NotNil(t, ph, "parent block %d not found", i-1) - - parentDelayedRoot := chain.GetPostStateRoot(ph.Hash()) - require.NotEqual(t, common.Hash{}, parentDelayedRoot, - "block %d parent: delayed root should not be zero", i) - require.Equal(t, parentDelayedRoot, h.Root, - "post-fork block %d: header.Root should equal GetPostStateRoot(parent)", i) - } - - // GetLastFlatDiff must be non-nil: writeBlockAndSetHead stores the FlatDiff - // from each post-fork sealed block so the miner can build the next block - // immediately without waiting for the SRC goroutine. - flatDiff := chain.GetLastFlatDiff() - require.NotNil(t, flatDiff, "GetLastFlatDiff() should be non-nil after post-fork mining") - - // WitnessReadyEvent must have been received for each post-fork block up to - // targetBlock-1. For block targetBlock the goroutine may still be running - // (it finishes before the next ChainHeadEvent, which we did not wait for). - for i := uint64(delayedSRCBlock); i < targetBlock; i++ { - w, ok := witnessByBlock[i] - require.True(t, ok, "WitnessReadyEvent not received for post-fork block %d", i) - require.NotNil(t, w, "witness for block %d should not be nil", i) - - h := chain.GetHeaderByNumber(i) - require.NotNil(t, h, "block %d header not found", i) - // The goroutine embeds parentRoot as w.Header().Root, and - // block[N].Header().Root is also parentRoot under delayed SRC. - require.Equal(t, h.Root, w.Header().Root, - "block %d witness: Header.Root should equal block's header.Root", i) - } -} - -// TestDelayedStateRootCrashRecovery simulates a crash where the SRC goroutine's -// persisted post-state root is lost. On reopening the blockchain, the startup -// recovery re-executes the head block, restoring the FlatDiff and spawning the -// SRC goroutine so PostExecutionStateAt returns correct state. -func TestDelayedStateRootCrashRecovery(t *testing.T) { - t.Parallel() - log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true))) - - const delayedSRCBlock = 3 - - init := buildEthereumInstance(t, rawdb.NewMemoryDatabase(), func(gen *core.Genesis) { - gen.Config.Bor.DelayedSRCBlock = big.NewInt(delayedSRCBlock) - gen.Config.Bor.Sprint = map[string]uint64{"0": 64} - }) - - chain := init.ethereum.BlockChain() - engine := init.ethereum.Engine() - _bor := engine.(*bor.Bor) - defer _bor.Close() - - span0 := createMockSpan(addr, chain.Config().ChainID.String()) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - h := createMockHeimdall(ctrl, &span0, &span0) - _bor.SetHeimdallClient(h) - - validators := borSpan.ConvertHeimdallValSetToBorValSet(span0.ValidatorSet).Validators - spanner := getMockedSpanner(t, validators) - _bor.SetSpanner(spanner) - - // Build and insert blocks past the fork boundary. - const numBlocks = 7 - blocks := make([]*types.Block, numBlocks+1) - blocks[0] = init.genesis.ToBlock() - for i := 1; i <= numBlocks; i++ { - blocks[i] = buildNextBlock(t, _bor, chain, blocks[i-1], nil, init.genesis.Config.Bor, nil, validators, false, nil, nil) - insertNewBlock(t, chain, blocks[i]) - } - - // Wait for the last SRC goroutine to finish and record its root. - headHash := chain.CurrentBlock().Hash() - expectedRoot := chain.GetPostStateRoot(headHash) - require.NotEqual(t, common.Hash{}, expectedRoot, "post-state root should be computed") - - // Record the post-execution state for comparison after recovery. - preState, err := chain.PostExecutionStateAt(chain.CurrentBlock()) - require.NoError(t, err) - checkAddr := common.HexToAddress("0x0000000000000000000000000000000000001000") - expectedBalance := preState.GetBalance(checkAddr) - - // Grab a reference to the underlying DB before stopping. - db := init.ethereum.ChainDb() - - // Stop the chain cleanly (journals trie state). - chain.Stop() - - // Simulate crash: delete the persisted post-state root for the head block - // so that GetPostStateRoot returns empty on the next startup. - key := append(rawdb.PostStateRootPrefix, headHash.Bytes()...) - require.NoError(t, db.Delete(key)) - - // Also delete the child block's reference (there is no child block for the - // head, but verify ReadPostStateRoot returns empty now). - got := rawdb.ReadPostStateRoot(db, headHash) - require.Equal(t, common.Hash{}, got, "post-state root should be deleted from DB") - - // Reopen the blockchain on the same DB. The startup recovery should detect - // the missing post-state root and re-execute the head block. - chain2, err := core.NewBlockChain(db, init.genesis, engine, core.DefaultConfig()) - require.NoError(t, err) - defer chain2.Stop() - - // Verify the head block is unchanged. - require.Equal(t, headHash, chain2.CurrentBlock().Hash(), "head block should be the same after reopen") - - // Verify PostExecutionStateAt returns correct state (via the recovered FlatDiff). - postState, err := chain2.PostExecutionStateAt(chain2.CurrentBlock()) - require.NoError(t, err, "PostExecutionStateAt should succeed after recovery") - require.Equal(t, expectedBalance, postState.GetBalance(checkAddr), - "recovered state should match pre-crash state") - - // Verify GetPostStateRoot works (the SRC goroutine spawned by recovery - // should compute the root; wait for it). - recoveredRoot := chain2.GetPostStateRoot(headHash) - require.Equal(t, expectedRoot, recoveredRoot, - "recovered post-state root should match original") - - // Verify the root was persisted by the recovery goroutine. - persistedRoot := rawdb.ReadPostStateRoot(db, headHash) - require.Equal(t, expectedRoot, persistedRoot, - "post-state root should be re-persisted after recovery") + require.GreaterOrEqual(t, totalTxs, txCount, + "expected at least %d transactions across all blocks, got %d", txCount, totalTxs) } diff --git a/tests/bor/helper.go b/tests/bor/helper.go index 81df9ab851..1c94456b87 100644 --- a/tests/bor/helper.go +++ b/tests/bor/helper.go @@ -752,3 +752,70 @@ func InitMinerWithOptions(genesis *core.Genesis, privKey *ecdsa.PrivateKey, with return stack, ethBackend, err } + +// InitMinerWithPipelinedSRC creates a miner node with pipelined SRC enabled. +func InitMinerWithPipelinedSRC(genesis *core.Genesis, privKey *ecdsa.PrivateKey, withoutHeimdall bool) (*node.Node, *eth.Ethereum, error) { + datadir, err := os.MkdirTemp("", "InitMiner-"+uuid.New().String()) + if err != nil { + return nil, nil, err + } + + config := &node.Config{ + Name: "geth", + Version: params.Version, + DataDir: datadir, + P2P: p2p.Config{ + ListenAddr: "0.0.0.0:0", + NoDiscovery: true, + MaxPeers: 25, + }, + UseLightweightKDF: true, + } + stack, err := node.New(config) + if err != nil { + return nil, nil, err + } + + ethBackend, err := eth.New(stack, ðconfig.Config{ + Genesis: genesis, + NetworkId: genesis.Config.ChainID.Uint64(), + SyncMode: downloader.FullSync, + DatabaseCache: 256, + DatabaseHandles: 256, + TxPool: legacypool.DefaultConfig, + GPO: ethconfig.Defaults.GPO, + Miner: miner.Config{ + Etherbase: crypto.PubkeyToAddress(privKey.PublicKey), + GasCeil: genesis.GasLimit * 11 / 10, + GasPrice: big.NewInt(1), + Recommit: time.Second, + CommitInterruptFlag: true, + EnablePipelinedSRC: true, + PipelinedSRCLogs: true, + }, + WithoutHeimdall: withoutHeimdall, + }) + if err != nil { + return nil, nil, err + } + + keydir2 := stack.KeyStoreDir() + n2, p2 := keystore.StandardScryptN, keystore.StandardScryptP + kStore2 := keystore.NewKeyStore(keydir2, n2, p2) + + _, err = kStore2.ImportECDSA(privKey, "") + if err != nil { + return nil, nil, err + } + + acc2 := kStore2.Accounts()[0] + err = kStore2.Unlock(acc2, "") + if err != nil { + return nil, nil, err + } + + ethBackend.AccountManager().AddBackend(kStore2) + + err = stack.Start() + return stack, ethBackend, err +} From ceca519ebee6f4ba523d6626d9a209aa2201fd55 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Wed, 1 Apr 2026 13:59:29 +0530 Subject: [PATCH 2/9] miner: run speculative fillTransactions concurrently with SRC and removed the post tx execution buffer time --- miner/pipeline.go | 127 +++++++++++++++++++++++++++++++--------------- miner/worker.go | 9 ++-- 2 files changed, 93 insertions(+), 43 deletions(-) diff --git a/miner/pipeline.go b/miner/pipeline.go index 255e7b1530..2fa23e95c0 100644 --- a/miner/pipeline.go +++ b/miner/pipeline.go @@ -287,34 +287,48 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { w.eth.TxPool().ResetSpeculativeState(blockNHeader, specTxPoolState) } - // --- Fill transactions for N+1 --- - // Reset the block building interrupt flag — it may have been set by block N's - // timeout timer. If we don't clear it, fillTransactions → Pending() sees the - // flag and returns an empty map, resulting in txs=0. - w.interruptBlockBuilding.Store(false) - - var specInterrupt atomic.Int32 - w.fillTransactions(&specInterrupt, specEnv) //nolint:errcheck - - // --- Check abort conditions --- - eip2935Abort := false - if w.chainConfig.IsPrague(specHeader.Number) { - dangerousSlot := common.BigToHash(new(big.Int).SetUint64(blockNNumber % params.HistoryServeWindow)) - if specState.WasStorageSlotRead(params.HistoryStorageAddress, dangerousSlot) { - log.Warn("Pipelined SRC: discarding speculative N+1 — EIP-2935 slot accessed", - "block", nextBlockNumber, "slot", dangerousSlot) - eip2935Abort = true - pipelineEIP2935AbortsCounter.Inc(1) + // --- Fill transactions for N+1 (in goroutine) --- + // fillTransactions runs concurrently with SRC(N) so that sealing block N + // is not delayed by filling block N+1's transactions. + initialFillDone := make(chan struct{}) + var eip2935Abort bool + + go func() { + defer close(initialFillDone) + + specStopFn := createInterruptTimer( + specHeader.Number.Uint64(), + specHeader.GetActualTime(), + &w.interruptBlockBuilding, + &w.interruptFlagSetAt, + true, // pipelinedSRC — no 500ms buffer + ) + + var specInterrupt atomic.Int32 + w.fillTransactions(&specInterrupt, specEnv) //nolint:errcheck + specStopFn() + + // Check abort conditions (needs fill to be done). + if w.chainConfig.IsPrague(specHeader.Number) { + dangerousSlot := common.BigToHash(new(big.Int).SetUint64(blockNNumber % params.HistoryServeWindow)) + if specState.WasStorageSlotRead(params.HistoryStorageAddress, dangerousSlot) { + log.Warn("Pipelined SRC: discarding speculative N+1 — EIP-2935 slot accessed", + "block", nextBlockNumber, "slot", dangerousSlot) + eip2935Abort = true + pipelineEIP2935AbortsCounter.Inc(1) + } } - } + }() // --- Wait for SRC(N) to complete --- + // No longer blocked by fillTransactions — block N is sealed as soon as SRC finishes. srcStart := time.Now() root, witnessN, err := w.chain.WaitForSRC() pipelineSRCTimer.Update(time.Since(srcStart)) if err != nil { log.Error("Pipelined SRC: SRC(N) failed", "block", blockNNumber, "err", err) pipelineSpeculativeAbortsCounter.Inc(1) + <-initialFillDone // wait for goroutine before returning return } @@ -371,6 +385,10 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { realBlockNHash := chainHead.Hash() rootN := root // state root of the last written block + // Wait for the initial fillTransactions goroutine to finish before entering + // the loop — the loop's first iteration checks abort conditions from the fill. + <-initialFillDone + // --- CONTINUOUS PIPELINE LOOP --- // State at this point: // - Block N is written to chain, realBlockNHash is known @@ -556,7 +574,9 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { specEnvNext.evm.SetInterrupt(&w.interruptBlockBuilding) specEnvNext.tcount = 0 - // --- Reset txpool and fill transactions for next block --- + // --- Reset txpool and fill transactions for next block (in goroutine) --- + // fillTransactions runs concurrently with SRC so that sealing block N + // is not delayed by filling block N+1's transactions. specTxPoolStateNext, err := w.chain.StateAtWithFlatDiff(rootN, flatDiff) if err != nil { log.Error("Pipelined SRC: failed to create txpool state for next block", "err", err) @@ -564,25 +584,43 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { w.eth.TxPool().ResetSpeculativeState(finalSpecHeader, specTxPoolStateNext) } - w.interruptBlockBuilding.Store(false) - var specInterruptNext atomic.Int32 - fillStart := time.Now() - w.fillTransactions(&specInterruptNext, specEnvNext) //nolint:errcheck - execElapsed := time.Since(fillStart) - - // --- Check EIP-2935 abort for next block --- - nextEIP2935Abort := false - if w.chainConfig.IsPrague(specHeaderNext.Number) { - dangerousSlot := common.BigToHash(new(big.Int).SetUint64(nextBlockNumber % params.HistoryServeWindow)) - if specStateNext.WasStorageSlotRead(params.HistoryStorageAddress, dangerousSlot) { - log.Warn("Pipelined SRC: EIP-2935 slot accessed in next block", - "block", nextNextBlockNumber, "slot", dangerousSlot) - nextEIP2935Abort = true - pipelineEIP2935AbortsCounter.Inc(1) + fillDone := make(chan struct{}) + var nextEIP2935Abort bool + var fillElapsed time.Duration + + go func() { + defer close(fillDone) + + specStopFnNext := createInterruptTimer( + specHeaderNext.Number.Uint64(), + specHeaderNext.GetActualTime(), + &w.interruptBlockBuilding, + &w.interruptFlagSetAt, + true, // pipelinedSRC — no 500ms buffer + ) + + var specInterruptNext atomic.Int32 + fillStart := time.Now() + w.fillTransactions(&specInterruptNext, specEnvNext) //nolint:errcheck + specStopFnNext() + fillElapsed = time.Since(fillStart) + + // Check EIP-2935 abort for next block (needs fill to be done + // so WasStorageSlotRead can inspect accessed slots). + if w.chainConfig.IsPrague(specHeaderNext.Number) { + dangerousSlot := common.BigToHash(new(big.Int).SetUint64(nextBlockNumber % params.HistoryServeWindow)) + if specStateNext.WasStorageSlotRead(params.HistoryStorageAddress, dangerousSlot) { + log.Warn("Pipelined SRC: EIP-2935 slot accessed in next block", + "block", nextNextBlockNumber, "slot", dangerousSlot) + nextEIP2935Abort = true + pipelineEIP2935AbortsCounter.Inc(1) + } } - } + }() // --- Wait for SRC of current speculative block --- + // No longer blocked by fillTransactions — SRC result is collected as + // soon as the goroutine completes, allowing immediate sealing. srcWaitStart := time.Now() rootSpec, witnessSpec, err := w.chain.WaitForSRC() srcWaitElapsed := time.Since(srcWaitStart) @@ -591,12 +629,12 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { if err != nil { log.Error("Pipelined SRC: SRC failed", "block", nextBlockNumber, "err", err) pipelineSpeculativeAbortsCounter.Inc(1) + <-fillDone // wait for goroutine before breaking break } if w.config.PipelinedSRCLogs { log.Info("Pipelined SRC: SRC completed", - "block", nextBlockNumber, "srcTotal", srcTotalElapsed, - "srcWait", srcWaitElapsed, "execOverlap", execElapsed) + "block", nextBlockNumber, "srcWait", srcWaitElapsed) } // --- Assemble current speculative block --- @@ -605,6 +643,7 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { }, specEnv.receipts, rootSpec, specStateSyncData) if err != nil { log.Error("Pipelined SRC: AssembleBlock failed", "block", nextBlockNumber, "err", err) + <-fillDone // wait for goroutine before breaking break } @@ -621,7 +660,8 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { select { case <-time.After(delay): case <-w.exitCh: - return // defer clears pendingWorkBlock + <-fillDone // wait for goroutine before returning + return // defer clears pendingWorkBlock } } @@ -631,13 +671,20 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { sealedBlock, err := w.inlineSealAndWrite(blockSpec, receiptsSpec, specState, witnessSpec) if err != nil { log.Error("Pipelined SRC: inline seal+write failed", "block", nextBlockNumber, "err", err) + <-fillDone // wait for goroutine before breaking break } + + // Wait for fillTransactions goroutine to finish before next iteration. + // The abort conditions (EIP-2935, BLOCKHASH) are checked at the top of + // the next loop iteration, which requires fill to be complete. + <-fillDone pipelineSpeculativeBlocksCounter.Inc(1) if w.config.PipelinedSRCLogs { log.Info("Pipelined SRC: block sealed (inline)", "number", sealedBlock.Number(), - "txs", len(sealedBlock.Transactions()), "root", rootSpec) + "txs", len(sealedBlock.Transactions()), "root", rootSpec, + "fillBlock", nextNextBlockNumber, "fillElapsed", fillElapsed) } // --- Shift variables for next iteration --- diff --git a/miner/worker.go b/miner/worker.go index a46c716cda..51c1661c41 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -890,6 +890,7 @@ func (w *worker) mainLoop() { w.current.header.GetActualTime(), &w.interruptBlockBuilding, &w.interruptFlagSetAt, + w.config.EnablePipelinedSRC, ) } @@ -2026,6 +2027,7 @@ func (w *worker) buildAndCommitBlock(interrupt *atomic.Int32, noempty bool, genP work.header.GetActualTime(), &w.interruptBlockBuilding, &w.interruptFlagSetAt, + w.config.EnablePipelinedSRC, ) } @@ -2226,11 +2228,12 @@ func (w *worker) prefetchFromPool(parent *types.Header, throwaway *state.StateDB // createInterruptTimer creates and starts a timer based on the header's timestamp for block building // and toggles the flag when the timer expires. -func createInterruptTimer(number uint64, actualTimestamp time.Time, interruptBlockBuilding *atomic.Bool, interruptFlagSetAt *atomic.Int64) func() { +func createInterruptTimer(number uint64, actualTimestamp time.Time, interruptBlockBuilding *atomic.Bool, interruptFlagSetAt *atomic.Int64, pipelinedSRC bool) func() { delay := time.Until(actualTimestamp) - // Reduce the timeout by 500ms to give some buffer for state root computation - if delay > 1*time.Second { + // Reserve 500ms for state root computation — unless pipelined SRC is enabled, + // in which case SRC runs in the background and fillTransactions gets the full block time. + if !pipelinedSRC && delay > 1*time.Second { delay -= 500 * time.Millisecond } From 3dcdf8049a200aaa51732795bfa9023826e06b86 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Wed, 1 Apr 2026 20:29:23 +0530 Subject: [PATCH 3/9] miner: async DB write, concurrent fill, and interrupt timer improvements --- core/blockchain_reader.go | 7 +++ miner/pipeline.go | 94 +++++++++++++++++++++++++++++---------- 2 files changed, 77 insertions(+), 24 deletions(-) diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 58a8a49e13..509356a311 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -182,6 +182,13 @@ func (bc *BlockChain) HasWitness(hash common.Hash) bool { return bc.witnessStore.HasWitness(hash) } +// CacheWitness adds a witness to the in-memory cache without writing to the +// persistent store. Used by pipelined SRC to make witnesses available to the +// WIT protocol immediately after broadcast, before the async DB write completes. +func (bc *BlockChain) CacheWitness(hash common.Hash, witness []byte) { + bc.witnessCache.Add(hash, witness) +} + // WriteWitness writes the witness to the witness store and updates the cache. func (bc *BlockChain) WriteWitness(hash common.Hash, witness []byte) { bc.witnessStore.WriteWitness(hash, witness) diff --git a/miner/pipeline.go b/miner/pipeline.go index 2fa23e95c0..d2b1943216 100644 --- a/miner/pipeline.go +++ b/miner/pipeline.go @@ -396,6 +396,8 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { // - rootN is block N's committed state root // - eip2935Abort and blockhashNAccessed track N+1's abort conditions curBlockhashAccessed := &blockhashNAccessed + var prevDBWriteDone chan struct{} // tracks the previous iteration's async DB write + var lastSealedHeader *types.Header // header of the last inline-sealed block (for grandparent lookup) for { // --- Check abort conditions for current speculative block --- @@ -438,6 +440,15 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { break } + // --- Wait for previous async DB write before finalize --- + // FinalizeForPipeline may call into state sync / span commit code that + // reads block headers and state from the chain DB. If the previous + // inline-sealed block hasn't been persisted yet, those lookups fail. + if prevDBWriteDone != nil { + <-prevDBWriteDone + prevDBWriteDone = nil + } + // --- Finalize current speculative block --- finalSpecHeader := types.CopyHeader(specHeader) finalSpecHeader.ParentHash = realBlockNHash @@ -526,7 +537,14 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { specStateNext.StartPrefetcher("miner-speculative", nil, nil) // --- Build SpeculativeGetHashFn for next block --- - grandparentHeader := w.chain.GetHeaderByNumber(blockNNumber) + // Use lastSealedHeader if available (the async DB write may not have + // persisted it yet), otherwise fall back to the chain DB. + var grandparentHeader *types.Header + if lastSealedHeader != nil && lastSealedHeader.Number.Uint64() == blockNNumber { + grandparentHeader = lastSealedHeader + } else { + grandparentHeader = w.chain.GetHeaderByNumber(blockNNumber) + } if grandparentHeader == nil { log.Error("Pipelined SRC: grandparent header not found for next block", "number", blockNNumber) w.sealBlockViaTaskCh(borEngine, finalSpecHeader, specState, specEnv.txs, @@ -661,16 +679,19 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { case <-time.After(delay): case <-w.exitCh: <-fillDone // wait for goroutine before returning - return // defer clears pendingWorkBlock + if prevDBWriteDone != nil { + <-prevDBWriteDone + } + return // defer clears pendingWorkBlock } } - // --- Inline seal + write (bypass taskLoop/resultLoop) --- - // Uses emitHeadEvent=false to avoid deadlock: mainLoop is blocked here, - // and chainHeadFeed.Send would block if newWorkLoop's channel fills up. - sealedBlock, err := w.inlineSealAndWrite(blockSpec, receiptsSpec, specState, witnessSpec) + // --- Inline seal + broadcast (bypass taskLoop/resultLoop) --- + // prevDBWriteDone was already awaited before FinalizeForPipeline above. + // The DB write runs asynchronously — the pipeline proceeds without waiting. + sealedBlock, dbWriteDone, err := w.inlineSealAndBroadcast(blockSpec, receiptsSpec, specState, witnessSpec) if err != nil { - log.Error("Pipelined SRC: inline seal+write failed", "block", nextBlockNumber, "err", err) + log.Error("Pipelined SRC: inline seal failed", "block", nextBlockNumber, "err", err) <-fillDone // wait for goroutine before breaking break } @@ -679,6 +700,7 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { // The abort conditions (EIP-2935, BLOCKHASH) are checked at the top of // the next loop iteration, which requires fill to be complete. <-fillDone + prevDBWriteDone = dbWriteDone pipelineSpeculativeBlocksCounter.Inc(1) if w.config.PipelinedSRCLogs { @@ -688,6 +710,7 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { } // --- Shift variables for next iteration --- + lastSealedHeader = sealedBlock.Header() blockNNumber = nextBlockNumber nextBlockNumber = nextNextBlockNumber rootN = rootSpec @@ -699,6 +722,11 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { eip2935Abort = nextEIP2935Abort curBlockhashAccessed = nextBlockhashAccessed } + + // Wait for the last async DB write to complete before exiting. + if prevDBWriteDone != nil { + <-prevDBWriteDone + } } // fallbackToSequential computes the state root inline and assembles block N @@ -790,37 +818,41 @@ func (w *worker) sealBlockViaTaskCh( } } -// inlineSealAndWrite seals a pipelined block using a private channel (bypassing -// taskLoop/resultLoop) and writes it directly to the chain. This avoids the -// race condition where rapid submissions to the unbuffered taskCh cause delays -// and duplicate blocks. +// inlineSealAndBroadcast seals a pipelined block using a private channel +// (bypassing taskLoop/resultLoop), broadcasts it to peers immediately, and +// writes to the chain DB asynchronously. This avoids blocking the pipeline +// on the DB write — the next iteration can start as soon as the block is sealed. +// +// Returns the sealed block and a channel that closes when the async DB write +// completes. The caller must wait on writeDone before the node can serve the +// block data from DB, but the pipeline can proceed immediately. // // Uses emitHeadEvent=false to avoid a deadlock: mainLoop is blocked in // commitSpeculativeWork, so chainHeadFeed.Send would eventually block when // newWorkLoop's channel fills up. -func (w *worker) inlineSealAndWrite(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB, witnessBytes []byte) (*types.Block, error) { +func (w *worker) inlineSealAndBroadcast(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB, witnessBytes []byte) (*types.Block, chan struct{}, error) { // Seal the block via a private channel — reuses Seal() without contention // on the shared w.resultCh. For primary producers on Bhilai+, delay=0. sealCh := make(chan *consensus.NewSealedBlockEvent, 1) stopCh := make(chan struct{}) if err := w.engine.Seal(w.chain, block, nil, sealCh, stopCh); err != nil { - return nil, fmt.Errorf("seal failed: %w", err) + return nil, nil, fmt.Errorf("seal failed: %w", err) } var sealedBlock *types.Block select { case ev := <-sealCh: if ev == nil || ev.Block == nil { - return nil, errors.New("nil sealed block from Seal") + return nil, nil, errors.New("nil sealed block from Seal") } sealedBlock = ev.Block case <-time.After(5 * time.Second): close(stopCh) - return nil, errors.New("inline seal timed out") + return nil, nil, errors.New("inline seal timed out") case <-w.exitCh: close(stopCh) - return nil, errors.New("worker stopped during inline seal") + return nil, nil, errors.New("worker stopped during inline seal") } hash := sealedBlock.Hash() @@ -849,17 +881,19 @@ func (w *worker) inlineSealAndWrite(block *types.Block, receipts []*types.Receip logs = append(logs, receipt.Logs...) } - // Write to chain WITHOUT emitting ChainHeadEvent (emitHeadEvent=false). - _, err := w.chain.WriteBlockAndSetHeadPipelined(sealedBlock, sealedReceipts, logs, statedb, false, witnessBytes) - if err != nil { - return nil, fmt.Errorf("write to chain failed: %w", err) - } - log.Info("Successfully sealed new block", "number", sealedBlock.Number(), "sealhash", w.engine.SealHash(sealedBlock.Header()), "hash", hash, "elapsed", "inline") - // Broadcast the block to peers + // Cache the witness so the WIT protocol can serve it to stateless peers + // immediately, without waiting for the async DB write. + if len(witnessBytes) > 0 { + w.chain.CacheWitness(hash, witnessBytes) + } + + // Broadcast to peers BEFORE writing to DB — the block is fully valid and + // sealed, so peers can start processing it immediately. The DB write is + // not needed for broadcast. w.mux.Post(core.NewMinedBlockEvent{Block: sealedBlock, SealedAt: time.Now()}) sealedBlocksCounter.Inc(1) @@ -868,5 +902,17 @@ func (w *worker) inlineSealAndWrite(block *types.Block, receipts []*types.Receip } w.clearPending(sealedBlock.NumberU64()) - return sealedBlock, nil + // Write to chain DB asynchronously — the pipeline can proceed with the + // next iteration using sealedBlock.Hash() directly, without waiting for + // the DB write to complete. + writeDone := make(chan struct{}) + go func() { + defer close(writeDone) + _, err := w.chain.WriteBlockAndSetHeadPipelined(sealedBlock, sealedReceipts, logs, statedb, false, witnessBytes) + if err != nil { + log.Error("Pipelined SRC: async DB write failed", "block", sealedBlock.Number(), "err", err) + } + }() + + return sealedBlock, writeDone, nil } From 07345ad7dc438d772bf77faae61ba80b5507c53f Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Wed, 1 Apr 2026 21:38:41 +0530 Subject: [PATCH 4/9] llint fix --- core/evm.go | 3 +-- miner/speculative_chain_reader_test.go | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/core/evm.go b/core/evm.go index de46fac729..32d1760695 100644 --- a/core/evm.go +++ b/core/evm.go @@ -172,8 +172,7 @@ func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash // // srcDone is called at most once and must return hash(block_N) after SRC(N) // completes. It may block. -func SpeculativeGetHashFn(blockN1Header *types.Header, chain ChainContext, - pendingBlockN uint64, srcDone func() common.Hash, blockhashNAccessed *atomic.Bool) func(uint64) common.Hash { +func SpeculativeGetHashFn(blockN1Header *types.Header, chain ChainContext, pendingBlockN uint64, srcDone func() common.Hash, blockhashNAccessed *atomic.Bool) func(uint64) common.Hash { blockN1Hash := blockN1Header.Hash() diff --git a/miner/speculative_chain_reader_test.go b/miner/speculative_chain_reader_test.go index 1dd0a1cf8b..57dae5ba07 100644 --- a/miner/speculative_chain_reader_test.go +++ b/miner/speculative_chain_reader_test.go @@ -28,8 +28,8 @@ func (m *mockChainHeaderReader) addHeader(h *types.Header) { m.byNum[h.Number.Uint64()] = h } -func (m *mockChainHeaderReader) Config() *params.ChainConfig { return params.TestChainConfig } -func (m *mockChainHeaderReader) CurrentHeader() *types.Header { return nil } +func (m *mockChainHeaderReader) Config() *params.ChainConfig { return params.TestChainConfig } +func (m *mockChainHeaderReader) CurrentHeader() *types.Header { return nil } func (m *mockChainHeaderReader) GetTd(common.Hash, uint64) *big.Int { return big.NewInt(1) } func (m *mockChainHeaderReader) GetHeader(hash common.Hash, number uint64) *types.Header { From a86db5009997fa0c80e5e1eb8852ae48cfd76ca9 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Thu, 2 Apr 2026 10:32:26 +0530 Subject: [PATCH 5/9] addressed comments and fix test, lint --- core/blockchain.go | 42 +++++++++++++++++++--------------- core/evm.go | 1 - core/stateless/witness.go | 29 +++++++++++++++++++---- core/stateless/witness_test.go | 14 ++++++------ miner/pipeline.go | 27 +++++++++++++++++----- miner/worker.go | 2 +- tests/bor/bor_test.go | 7 ++++++ 7 files changed, 84 insertions(+), 38 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index e3ebe86177..9784d3b487 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -450,9 +450,9 @@ type BlockChain struct { // The miner uses it together with the grandparent's committed root to open // a StateDB via NewWithFlatBase, allowing block N+1 execution to start // before the SRC goroutine finishes. - lastFlatDiff *state.FlatDiff - lastFlatDiffBlockHash common.Hash - lastFlatDiffMu sync.RWMutex + lastFlatDiff *state.FlatDiff + lastFlatDiffBlockNum uint64 + lastFlatDiffMu sync.RWMutex } // NewBlockChain returns a fully initialised block chain using information @@ -2761,7 +2761,7 @@ func (bc *BlockChain) insertChainStatelessParallel(chain types.Blocks, witnesses if witnesses[i].HeaderReader() != nil { headerReader = witnesses[i].HeaderReader() } - if err := stateless.ValidateWitnessPreState(witnesses[i], headerReader); err != nil { + if err := stateless.ValidateWitnessPreState(witnesses[i], headerReader, block.Header()); err != nil { stopHeaders() return int(processed.Load()), fmt.Errorf("post-import witness validation failed for block %d: %w", block.NumberU64(), err) } @@ -2925,7 +2925,7 @@ func (bc *BlockChain) insertChainStatelessSequential(chain types.Blocks, witness if witnesses[i].HeaderReader() != nil { headerReader = witnesses[i].HeaderReader() } - if err := stateless.ValidateWitnessPreState(witnesses[i], headerReader); err != nil { + if err := stateless.ValidateWitnessPreState(witnesses[i], headerReader, block.Header()); err != nil { return int(processed.Load()), fmt.Errorf("post-import witness validation failed for block %d: %w", block.NumberU64(), err) } } @@ -3228,7 +3228,7 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, if witnesses[it.processed()-1].HeaderReader() != nil { headerReader = witnesses[it.processed()-1].HeaderReader() } - if err := stateless.ValidateWitnessPreState(witnesses[it.processed()-1], headerReader); err != nil { + if err := stateless.ValidateWitnessPreState(witnesses[it.processed()-1], headerReader, block.Header()); err != nil { log.Error("Witness validation failed during chain insertion", "blockNumber", block.Number(), "blockHash", block.Hash(), "err", err) bc.reportBlock(block, &ProcessResult{}, err) followupInterrupt.Store(true) @@ -4368,12 +4368,14 @@ func (bc *BlockChain) WriteBlockAndSetHeadPipelined(block *types.Block, receipts // behind the actual post-execution state). func (bc *BlockChain) PostExecutionStateAt(header *types.Header) (*state.StateDB, error) { // Fast path: if we have the FlatDiff for this block, use it as an overlay. + // Matching by block number (not hash) because the hash may not be final + // at the time SetLastFlatDiff is called (Root and seal signature are added later). bc.lastFlatDiffMu.RLock() flatDiff := bc.lastFlatDiff - flatDiffHash := bc.lastFlatDiffBlockHash + flatDiffBlockNum := bc.lastFlatDiffBlockNum bc.lastFlatDiffMu.RUnlock() - if flatDiff != nil && flatDiffHash == header.Hash() { + if flatDiff != nil && flatDiffBlockNum == header.Number.Uint64() { return state.NewWithFlatBase(header.Root, bc.statedb, flatDiff) } @@ -4402,6 +4404,12 @@ func (bc *BlockChain) SpawnSRCGoroutine(block *types.Block, parentRoot common.Ha go func() { defer bc.wg.Done() defer pending.wg.Done() + defer func() { + if r := recover(); r != nil { + log.Error("Pipelined SRC: panic in SRC goroutine", "block", block.NumberU64(), "err", r) + pending.err = fmt.Errorf("SRC goroutine panicked: %v", r) + } + }() tmpDB, err := state.New(parentRoot, bc.statedb) if err != nil { @@ -4510,18 +4518,14 @@ func (bc *BlockChain) GetLastFlatDiff() *state.FlatDiff { return bc.lastFlatDiff } -// GetLastFlatDiffBlockHash returns the block hash associated with the cached FlatDiff. -func (bc *BlockChain) GetLastFlatDiffBlockHash() common.Hash { - bc.lastFlatDiffMu.RLock() - defer bc.lastFlatDiffMu.RUnlock() - return bc.lastFlatDiffBlockHash -} - -// SetLastFlatDiff stores the FlatDiff and its source block hash. -func (bc *BlockChain) SetLastFlatDiff(diff *state.FlatDiff, blockHash common.Hash) { +// SetLastFlatDiff stores the FlatDiff and the block number it belongs to. +// The block number is used by PostExecutionStateAt to match the FlatDiff +// to the correct block (hash matching is unreliable because Root and seal +// signature are not available when FlatDiff is captured). +func (bc *BlockChain) SetLastFlatDiff(diff *state.FlatDiff, blockNum uint64) { bc.lastFlatDiffMu.Lock() bc.lastFlatDiff = diff - bc.lastFlatDiffBlockHash = blockHash + bc.lastFlatDiffBlockNum = blockNum bc.lastFlatDiffMu.Unlock() } @@ -4547,7 +4551,7 @@ func (bc *BlockChain) ProcessBlockWithWitnesses(block *types.Block, witness *sta } else { headerReader = bc } - if err := stateless.ValidateWitnessPreState(witness, headerReader); err != nil { + if err := stateless.ValidateWitnessPreState(witness, headerReader, block.Header()); err != nil { log.Error("Witness validation failed during stateless processing", "blockNumber", block.Number(), "blockHash", block.Hash(), "err", err) return nil, nil, fmt.Errorf("witness validation failed: %w", err) } diff --git a/core/evm.go b/core/evm.go index 32d1760695..bbab20df29 100644 --- a/core/evm.go +++ b/core/evm.go @@ -173,7 +173,6 @@ func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash // srcDone is called at most once and must return hash(block_N) after SRC(N) // completes. It may block. func SpeculativeGetHashFn(blockN1Header *types.Header, chain ChainContext, pendingBlockN uint64, srcDone func() common.Hash, blockhashNAccessed *atomic.Bool) func(uint64) common.Hash { - blockN1Hash := blockN1Header.Hash() // olderFn handles blocks N-2 and below via the standard chain walk. diff --git a/core/stateless/witness.go b/core/stateless/witness.go index 57f01e85d3..019374b002 100644 --- a/core/stateless/witness.go +++ b/core/stateless/witness.go @@ -35,8 +35,11 @@ type HeaderReader interface { GetHeader(hash common.Hash, number uint64) *types.Header } -// ValidateWitnessPreState validates that the witness pre-state root matches the parent block's state root. -func ValidateWitnessPreState(witness *Witness, headerReader HeaderReader) error { +// ValidateWitnessPreState validates that the witness pre-state root matches +// the parent block's state root. The expectedBlock header is the block being +// imported — the witness context must match it (ParentHash and Number) to +// prevent a malicious peer from substituting a witness for a different block. +func ValidateWitnessPreState(witness *Witness, headerReader HeaderReader, expectedBlock *types.Header) error { if witness == nil { return fmt.Errorf("witness is nil") } @@ -52,6 +55,19 @@ func ValidateWitnessPreState(witness *Witness, headerReader HeaderReader) error return fmt.Errorf("witness context header is nil") } + // Verify the witness is for the expected block — a malicious peer could + // craft a witness with a different ParentHash to bypass the pre-state check. + if expectedBlock != nil { + if contextHeader.ParentHash != expectedBlock.ParentHash { + return fmt.Errorf("witness ParentHash mismatch: witness=%x, expected=%x, blockNumber=%d", + contextHeader.ParentHash, expectedBlock.ParentHash, expectedBlock.Number.Uint64()) + } + if contextHeader.Number.Uint64() != expectedBlock.Number.Uint64() { + return fmt.Errorf("witness block number mismatch: witness=%d, expected=%d", + contextHeader.Number.Uint64(), expectedBlock.Number.Uint64()) + } + } + // Get the parent block header from the chain. parentHeader := headerReader.GetHeader(contextHeader.ParentHash, contextHeader.Number.Uint64()-1) if parentHeader == nil { @@ -96,9 +112,14 @@ func NewWitness(context *types.Header, chain HeaderReader) (*Witness, error) { } headers = append(headers, parent) } - // Create the witness with a reconstructed gutted out block + // Create the witness with a copy of the context header to prevent + // callers from mutating Root/ReceiptHash after witness creation. + ctx := types.CopyHeader(context) + ctx.Root = common.Hash{} + ctx.ReceiptHash = common.Hash{} + return &Witness{ - context: context, + context: ctx, Headers: headers, Codes: make(map[string]struct{}), State: make(map[string]struct{}), diff --git a/core/stateless/witness_test.go b/core/stateless/witness_test.go index 6d662020fd..f1330a66d7 100644 --- a/core/stateless/witness_test.go +++ b/core/stateless/witness_test.go @@ -42,7 +42,7 @@ func TestValidateWitnessPreState_Success(t *testing.T) { } // Test validation - should succeed. - err := ValidateWitnessPreState(witness, mockReader) + err := ValidateWitnessPreState(witness, mockReader, nil) if err != nil { t.Errorf("Expected validation to succeed, but got error: %v", err) } @@ -88,7 +88,7 @@ func TestValidateWitnessPreState_StateMismatch(t *testing.T) { } // Test validation - should fail. - err := ValidateWitnessPreState(witness, mockReader) + err := ValidateWitnessPreState(witness, mockReader, nil) if err == nil { t.Error("Expected validation to fail due to state root mismatch, but it succeeded") } @@ -106,7 +106,7 @@ func TestValidateWitnessPreState_EdgeCases(t *testing.T) { // Test case 1: Nil witness. t.Run("NilWitness", func(t *testing.T) { - err := ValidateWitnessPreState(nil, mockReader) + err := ValidateWitnessPreState(nil, mockReader, nil) if err == nil { t.Error("Expected validation to fail for nil witness") } @@ -124,7 +124,7 @@ func TestValidateWitnessPreState_EdgeCases(t *testing.T) { State: make(map[string]struct{}), } - err := ValidateWitnessPreState(witness, mockReader) + err := ValidateWitnessPreState(witness, mockReader, nil) if err == nil { t.Error("Expected validation to fail for witness with no headers") } @@ -147,7 +147,7 @@ func TestValidateWitnessPreState_EdgeCases(t *testing.T) { State: make(map[string]struct{}), } - err := ValidateWitnessPreState(witness, mockReader) + err := ValidateWitnessPreState(witness, mockReader, nil) if err == nil { t.Error("Expected validation to fail for witness with nil context header") } @@ -177,7 +177,7 @@ func TestValidateWitnessPreState_EdgeCases(t *testing.T) { } // Don't add parent header to mock reader - it won't be found. - err := ValidateWitnessPreState(witness, mockReader) + err := ValidateWitnessPreState(witness, mockReader, nil) if err == nil { t.Error("Expected validation to fail when parent header is not found") } @@ -234,7 +234,7 @@ func TestValidateWitnessPreState_MultipleHeaders(t *testing.T) { } // Test validation - should succeed (only first header matters for validation). - err := ValidateWitnessPreState(witness, mockReader) + err := ValidateWitnessPreState(witness, mockReader, nil) if err != nil { t.Errorf("Expected validation to succeed with multiple headers, but got error: %v", err) } diff --git a/miner/pipeline.go b/miner/pipeline.go index d2b1943216..ae4f4be9e3 100644 --- a/miner/pipeline.go +++ b/miner/pipeline.go @@ -121,7 +121,7 @@ func (w *worker) commitPipelined(env *environment, start time.Time) error { } parentRoot := parent.Root - w.chain.SetLastFlatDiff(flatDiff, env.header.Hash()) + w.chain.SetLastFlatDiff(flatDiff, env.header.Number.Uint64()) // Note: this counts block N as "entering the pipeline." If Prepare() fails // and fallbackToSequential produces the block inline, the counter is slightly // inflated — the block was produced sequentially, not speculatively. @@ -216,6 +216,9 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { specState, err := w.chain.StateAtWithFlatDiff(req.parentRoot, req.flatDiff) if err != nil { log.Error("Pipelined SRC: failed to open speculative state", "err", err) + // SRC goroutine is already running — wait for it to finish before + // fallbackToSequential does IntermediateRoot on the same parent root. + w.chain.WaitForSRC() //nolint:errcheck w.fallbackToSequential(req) return } @@ -225,6 +228,9 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { blockN1Header := w.chain.GetHeader(blockNHeader.ParentHash, blockNNumber-1) if blockN1Header == nil { log.Error("Pipelined SRC: grandparent header not found") + // SRC goroutine is already running — wait for it to finish before + // fallbackToSequential does IntermediateRoot on the same parent root. + w.chain.WaitForSRC() //nolint:errcheck w.fallbackToSequential(req) return } @@ -291,6 +297,7 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { // fillTransactions runs concurrently with SRC(N) so that sealing block N // is not delayed by filling block N+1's transactions. initialFillDone := make(chan struct{}) + defer func() { <-initialFillDone }() // ensure goroutine is drained on all return paths var eip2935Abort bool go func() { @@ -328,12 +335,15 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { if err != nil { log.Error("Pipelined SRC: SRC(N) failed", "block", blockNNumber, "err", err) pipelineSpeculativeAbortsCounter.Inc(1) - <-initialFillDone // wait for goroutine before returning return } // --- Assemble and seal block N --- - borEngine, _ := w.engine.(*bor.Bor) + borEngine, ok := w.engine.(*bor.Bor) + if !ok { + log.Error("Pipelined SRC: engine is not Bor") + return + } finalHeaderN := types.CopyHeader(blockNHeader) finalHeaderN.Root = root @@ -377,7 +387,11 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { // Get the REAL block N hash from the chain — this is the signed hash // written by resultLoop after Seal() modified header.Extra. chainHead := w.chain.CurrentBlock() - if chainHead == nil || chainHead.Number.Uint64() != blockNNum { + if chainHead == nil { + log.Error("Pipelined SRC: chain head is nil after waiting", "expected", blockNNum) + return + } + if chainHead.Number.Uint64() != blockNNum { log.Error("Pipelined SRC: chain head mismatch after waiting", "expected", blockNNum, "got", chainHead.Number.Uint64()) return @@ -387,6 +401,7 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { // Wait for the initial fillTransactions goroutine to finish before entering // the loop — the loop's first iteration checks abort conditions from the fill. + // (The defer also drains this, but we need the results here, not just cleanup.) <-initialFillDone // --- CONTINUOUS PIPELINE LOOP --- @@ -518,7 +533,7 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { srcSpawnTime := time.Now() tmpBlockCur := types.NewBlockWithHeader(finalSpecHeader) w.chain.SpawnSRCGoroutine(tmpBlockCur, rootN, flatDiff) - w.chain.SetLastFlatDiff(flatDiff, finalSpecHeader.Hash()) + w.chain.SetLastFlatDiff(flatDiff, finalSpecHeader.Number.Uint64()) if w.config.PipelinedSRCLogs { log.Info("Pipelined SRC: spawned SRC, starting speculative exec", "srcBlock", nextBlockNumber, "specExecBlock", nextNextBlockNumber) @@ -780,7 +795,7 @@ func (w *worker) sealBlockViaTaskCh( if spawnSRC { tmpBlock := types.NewBlockWithHeader(finalHeader) w.chain.SpawnSRCGoroutine(tmpBlock, rootN, flatDiff) - w.chain.SetLastFlatDiff(flatDiff, finalHeader.Hash()) + w.chain.SetLastFlatDiff(flatDiff, finalHeader.Number.Uint64()) } pipelineSpeculativeBlocksCounter.Inc(1) diff --git a/miner/worker.go b/miner/worker.go index 51c1661c41..3258b851d9 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1114,12 +1114,12 @@ func (w *worker) resultLoop() { // For pipelined blocks, state was already committed by the SRC goroutine — // use WriteBlockAndSetHeadPipelined to skip the redundant CommitWithUpdate. writeStart := time.Now() - writeElapsed := time.Since(writeStart) if task.pipelined { _, err = w.chain.WriteBlockAndSetHeadPipelined(block, receipts, logs, task.state, true, task.witnessBytes) } else { _, err = w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true) } + writeElapsed := time.Since(writeStart) writeBlockAndSetHeadTimer.Update(writeElapsed) if err != nil { diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go index 62279765eb..8def74981e 100644 --- a/tests/bor/bor_test.go +++ b/tests/bor/bor_test.go @@ -3082,6 +3082,13 @@ txsDone: currentNum := chain.CurrentBlock().Number.Uint64() t.Logf("All %d transactions included by block %d", txCount, currentNum) + // Wait for async DB writes to complete — pipelined SRC writes blocks + // asynchronously, so GetBlockByNumber may not find them immediately. + // Also, the speculative fill may have advanced the nonce before the block + // containing the txs is sealed, so re-read currentNum after waiting. + time.Sleep(2 * time.Second) + currentNum = chain.CurrentBlock().Number.Uint64() + // Verify we can find the transactions in the blocks totalTxs := 0 for i := uint64(1); i <= currentNum; i++ { From 8d5ed1b48a0427dd9855c0684c6eb7b1f2a914bf Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Thu, 2 Apr 2026 12:40:16 +0530 Subject: [PATCH 6/9] core/stateless: (fix unit test) fix NewWitness zeroing breaking witness manager hash matching --- core/stateless/witness.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/stateless/witness.go b/core/stateless/witness.go index 019374b002..175c9694b3 100644 --- a/core/stateless/witness.go +++ b/core/stateless/witness.go @@ -113,10 +113,12 @@ func NewWitness(context *types.Header, chain HeaderReader) (*Witness, error) { headers = append(headers, parent) } // Create the witness with a copy of the context header to prevent - // callers from mutating Root/ReceiptHash after witness creation. + // callers from mutating the header after witness creation. + // Note: Root and ReceiptHash are NOT zeroed here — they are zeroed at the + // point of stateless execution (ProcessBlockWithWitnesses) where they are + // recomputed. Zeroing here would break the witness manager's hash matching + // (handleBroadcast uses witness.Header().Hash() to look up pending blocks). ctx := types.CopyHeader(context) - ctx.Root = common.Hash{} - ctx.ReceiptHash = common.Hash{} return &Witness{ context: ctx, From 0e2da863f06d2e3190841238f5740529b1d96c14 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Thu, 9 Apr 2026 16:05:42 +0530 Subject: [PATCH 7/9] core, consensus/bor, eth, triedb: pipelined state root computation for block import Overlap SRC(N) with execution of block N+1 on importing/RPC nodes. After executing block N, defer IntermediateRoot + CommitWithUpdate to a background SRC goroutine and immediately proceed to block N+1 using a FlatDiff overlay for state reads. Cross-call persistence allows the SRC to run across insertChain boundaries. Key changes: - Pipeline path in insertChainWithWitnesses with ValidateStateCheap - FlatDiff overlay in StateAt, StateAtWithReaders, PostExecutionStateAt - Path DB reader chained fallback for concurrent layer flattening - Trie-only reader for SRC witness generation (no flat reader bypass) - WIT handler waits for pipelined witness before returning empty - WitnessReadyEvent for announcing witnesses to stateless peers - PropagateReadsTo in checkAndCommitSpan for witness completeness - Feature gated: --pipeline.enable-import-src --- consensus/bor/bor.go | 7 + core/block_validator.go | 31 ++ core/blockchain.go | 431 ++++++++++++++++++++++++-- core/blockchain_reader.go | 129 +++++++- core/blockchain_test.go | 458 +++++++++++++++++++++++++++- core/events.go | 9 + core/state/database.go | 13 + core/state/statedb.go | 16 + core/state/statedb_pipeline_test.go | 210 +++++++++++++ core/types.go | 5 + docs/cli/default_config.toml | 4 + docs/cli/server.md | 6 + eth/api_debug.go | 4 +- eth/backend.go | 4 +- eth/ethconfig/config.go | 4 + eth/handler.go | 43 ++- eth/handler_wit.go | 29 +- internal/cli/server/config.go | 19 ++ internal/cli/server/flags.go | 14 + miner/pipeline.go | 6 +- tests/bor/bor_test.go | 308 +++++++++++++++++++ tests/bor/helper.go | 69 +++++ triedb/pathdb/reader.go | 33 +- 23 files changed, 1804 insertions(+), 48 deletions(-) diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 63a38bd902..9f3fbfb358 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -1677,6 +1677,13 @@ func (c *Bor) checkAndCommitSpan( tempState.IntermediateRoot(false) + // Propagate addresses accessed during GetCurrentSpan back to the original + // state so they appear in the FlatDiff ReadSet. Without this, the pipelined + // SRC goroutine's witness won't capture their trie proof nodes (the copy's + // reads aren't tracked on the original), causing stateless execution to fail + // with missing trie nodes for the validator contract. + tempState.PropagateReadsTo(state.Inner()) + if c.needToCommitSpan(span, headerNumber) { return c.FetchAndCommitSpan(ctx, span.Id+1, state, header, chain) } diff --git a/core/block_validator.go b/core/block_validator.go index e17fb4f6b7..22962695f3 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -127,6 +127,37 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { return nil } +// ValidateStateCheap validates the cheap (non-trie) post-state checks: gas used, +// bloom filter, receipt root, and requests hash. It does NOT compute the state +// root (IntermediateRoot), which is the expensive operation. Used by the pipelined +// import path where IntermediateRoot is deferred to a background SRC goroutine. +func (v *BlockValidator) ValidateStateCheap(block *types.Block, statedb *state.StateDB, res *ProcessResult) error { + if res == nil { + return errors.New("nil ProcessResult value") + } + header := block.Header() + if block.GasUsed() != res.GasUsed { + return fmt.Errorf("%w (remote: %d local: %d)", ErrGasUsedMismatch, block.GasUsed(), res.GasUsed) + } + rbloom := types.MergeBloom(res.Receipts) + if rbloom != header.Bloom { + return fmt.Errorf("%w (remote: %x local: %x)", ErrBloomMismatch, header.Bloom, rbloom) + } + receiptSha := types.DeriveSha(res.Receipts, trie.NewStackTrie(nil)) + if receiptSha != header.ReceiptHash { + return fmt.Errorf("%w (remote: %x local: %x)", ErrReceiptRootMismatch, header.ReceiptHash, receiptSha) + } + if header.RequestsHash != nil { + reqhash := types.CalcRequestsHash(res.Requests) + if reqhash != *header.RequestsHash { + return fmt.Errorf("%w (remote: %x local: %x)", ErrRequestsHashMismatch, *header.RequestsHash, reqhash) + } + } else if res.Requests != nil { + return errors.New("block has requests before prague fork") + } + return nil +} + // ValidateState validates the various changes that happen after a state transition, // such as amount of used gas, the receipt roots and the state root itself. func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error { diff --git a/core/blockchain.go b/core/blockchain.go index 9784d3b487..1ae1f0eab5 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -145,6 +145,12 @@ var ( blockBatchWriteTimer = metrics.NewRegisteredTimer("chain/batch/write", nil) // time to flush the block batch to disk (blockBatch.Write) — spikes indicate DB compaction stalls stateCommitTimer = metrics.NewRegisteredTimer("chain/state/commit", nil) // time for statedb.CommitWithUpdate — in pathdb mode, spikes indicate diff layer flushes + // Pipelined import SRC metrics + pipelineImportBlocksCounter = metrics.NewRegisteredCounter("chain/imports/pipelined/blocks", nil) + pipelineImportSRCTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/src", nil) + pipelineImportCollectTimer = metrics.NewRegisteredTimer("chain/imports/pipelined/collect", nil) + pipelineImportFallbackCounter = metrics.NewRegisteredCounter("chain/imports/pipelined/fallback", nil) + errInsertionInterrupted = errors.New("insertion is interrupted") errChainStopped = errors.New("blockchain is stopped") errInvalidOldChain = errors.New("invalid old chain") @@ -257,6 +263,21 @@ type BlockChainConfig struct { // MilestoneFetcher returns the latest milestone end block from Heimdall. MilestoneFetcher func(ctx context.Context) (uint64, error) + + // EnablePipelinedImportSRC enables pipelined state root computation during + // block import: overlap SRC(N) with tx execution of block N+1. + EnablePipelinedImportSRC bool + + // PipelinedImportSRCLogs enables verbose logging for the import pipeline. + PipelinedImportSRCLogs bool +} + +// PipelineImportOpts configures ProcessBlock for pipelined import mode. +// When non-nil, ProcessBlock opens state at CommittedParentRoot (with optional +// FlatDiff overlay) and uses ValidateStateCheap instead of full ValidateState. +type PipelineImportOpts struct { + CommittedParentRoot common.Hash // Last committed trie root (grandparent when FlatDiff is set) + FlatDiff *state.FlatDiff // Previous block's state overlay (nil for first block in pipeline) } // DefaultConfig returns the default config. @@ -355,6 +376,24 @@ type pendingSRCState struct { err error } +// pendingImportSRCState stores the state of a block whose SRC goroutine has +// been spawned. Block metadata is written to DB immediately; the state commit +// runs in the background. An auto-collection goroutine waits for SRC to finish +// and immediately writes the witness + handles trie GC, so collection doesn't +// depend on the arrival of the next block. +type pendingImportSRCState struct { + block *types.Block + flatDiff *state.FlatDiff + committedRoot common.Hash // last committed trie root when SRC was spawned + procTime time.Duration // for gcproc accumulation + + // collectedCh is closed when auto-collection completes (verify root, + // write witness, trie GC). Callers block on <-collectedCh. + collectedCh chan struct{} + collectedRoot common.Hash // verified root (set before closing collectedCh) + collectedErr error // non-nil if SRC failed or root mismatch +} + // BlockChain represents the canonical chain given a database with a genesis // block. The Blockchain manages chain imports, reverts, chain reorganisations. // @@ -389,6 +428,7 @@ type BlockChain struct { chainHeadFeed event.Feed logsFeed event.Feed blockProcFeed event.Feed + witnessReadyFeed event.Feed blockProcCounter int32 scope event.SubscriptionScope genesisBlock *types.Block @@ -446,13 +486,20 @@ type BlockChain struct { pendingSRC *pendingSRCState pendingSRCMu sync.Mutex + // pendingImportSRC tracks a block whose SRC goroutine is in-flight during + // pipelined import. Persists across insertChain calls. + pendingImportSRC *pendingImportSRCState + pendingImportSRCMu sync.Mutex + // lastFlatDiff holds the FlatDiff from the most recently committed block. // The miner uses it together with the grandparent's committed root to open // a StateDB via NewWithFlatBase, allowing block N+1 execution to start // before the SRC goroutine finishes. - lastFlatDiff *state.FlatDiff - lastFlatDiffBlockNum uint64 - lastFlatDiffMu sync.RWMutex + lastFlatDiff *state.FlatDiff + lastFlatDiffBlockNum uint64 + lastFlatDiffParentRoot common.Hash // committed root that the FlatDiff is based on + lastFlatDiffBlockRoot common.Hash // the block's own state root (from header) + lastFlatDiffMu sync.RWMutex } // NewBlockChain returns a fully initialised block chain using information @@ -732,7 +779,7 @@ func NewParallelBlockChain(db ethdb.Database, genesis *Genesis, engine consensus return bc, nil } -func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header, witness *stateless.Witness, followupInterrupt *atomic.Bool) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, vtime time.Duration, blockEndErr error) { +func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header, witness *stateless.Witness, followupInterrupt *atomic.Bool, pipeOpts *PipelineImportOpts) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, vtime time.Duration, blockEndErr error) { // Process the block using processor and parallelProcessor at the same time, take the one which finishes first, cancel the other, and return the result ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -757,24 +804,38 @@ func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header, wit }() } - parentRoot := parent.Root - prefetch, process, err := bc.statedb.ReadersWithCacheStats(parentRoot) + // When pipelined import is active, the parent root may not be committed to the + // trie DB yet (SRC goroutine still running). Use the last committed root for + // readers and overlay the previous block's FlatDiff for correct reads. + readerRoot := parent.Root + if pipeOpts != nil { + readerRoot = pipeOpts.CommittedParentRoot + } + + prefetch, process, err := bc.statedb.ReadersWithCacheStats(readerRoot) if err != nil { return nil, nil, 0, nil, 0, err } - throwaway, err := state.NewWithReader(parentRoot, bc.statedb, prefetch) + throwaway, err := state.NewWithReader(readerRoot, bc.statedb, prefetch) if err != nil { return nil, nil, 0, nil, 0, err } - statedb, err := state.NewWithReader(parentRoot, bc.statedb, process) + statedb, err := state.NewWithReader(readerRoot, bc.statedb, process) if err != nil { return nil, nil, 0, nil, 0, err } - parallelStatedb, err := state.NewWithReader(parentRoot, bc.statedb, process) + parallelStatedb, err := state.NewWithReader(readerRoot, bc.statedb, process) if err != nil { return nil, nil, 0, nil, 0, err } + // Apply FlatDiff overlay so reads see the previous block's post-state. + if pipeOpts != nil && pipeOpts.FlatDiff != nil { + throwaway.SetFlatDiffRef(pipeOpts.FlatDiff) + statedb.SetFlatDiffRef(pipeOpts.FlatDiff) + parallelStatedb.SetFlatDiffRef(pipeOpts.FlatDiff) + } + // Upload the statistics of reader at the end defer func() { stats := prefetch.GetStats() @@ -847,7 +908,11 @@ func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header, wit blockExecutionParallelTimer.UpdateSince(pstart) if err == nil { vstart := time.Now() - err = bc.validator.ValidateState(block, parallelStatedb, res, false) + if pipeOpts != nil { + err = bc.validator.ValidateStateCheap(block, parallelStatedb, res) + } else { + err = bc.validator.ValidateState(block, parallelStatedb, res, false) + } vtime = time.Since(vstart) } if res == nil { @@ -867,7 +932,11 @@ func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header, wit blockExecutionSerialTimer.UpdateSince(pstart) if err == nil { vstart := time.Now() - err = bc.validator.ValidateState(block, statedb, res, false) + if pipeOpts != nil { + err = bc.validator.ValidateStateCheap(block, statedb, res) + } else { + err = bc.validator.ValidateState(block, statedb, res, false) + } vtime = time.Since(vstart) } if res == nil { @@ -1677,6 +1746,11 @@ func (bc *BlockChain) stopWithoutSaving() { if bc.stateSizer != nil { bc.stateSizer.Stop() } + // Flush any pending import SRC before waiting for goroutines. + if err := bc.flushPendingImportSRC(); err != nil { + log.Error("Failed to flush pending import SRC during shutdown", "err", err) + } + // Now wait for all chain modifications to end and persistent goroutines to exit. // // Note: Close waits for the mutex to become available, i.e. any running chain @@ -3190,11 +3264,50 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, if parent == nil { parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) } - statedb, err := state.New(parent.Root, bc.statedb) - if err != nil { - return nil, it.index, err + + // --- Pipelined import: check for pending SRC from previous block --- + pipelineActive := bc.cfg.EnablePipelinedImportSRC && setHead && !bc.cfg.Stateless + var pipeOpts *PipelineImportOpts + + if pipelineActive { + if bc.cfg.PipelinedImportSRCLogs { + log.Info("Pipelined import: started processing block", + "block", block.NumberU64(), "txs", len(block.Transactions())) + } + + bc.pendingImportSRCMu.Lock() + pending := bc.pendingImportSRC + bc.pendingImportSRCMu.Unlock() + + if pending != nil { + if block.ParentHash() == pending.block.Hash() { + // This block continues from the pending one — use FlatDiff overlay + pipeOpts = &PipelineImportOpts{ + CommittedParentRoot: pending.committedRoot, + FlatDiff: pending.flatDiff, + } + } else { + // Block doesn't follow pending (reorg/gap) — flush first + if err := bc.flushPendingImportSRC(); err != nil { + log.Error("Pipelined import: flush failed on mismatch", "err", err) + } + } + } + + // No pending state — first block in pipeline. Still enter the + // pipeline so the SRC goroutine persists for the next insertChain + // call, enabling cross-call overlap with block N+1. + if pipeOpts == nil { + pipeOpts = &PipelineImportOpts{ + CommittedParentRoot: parent.Root, + } + } } + // Note: ProcessBlock opens its own statedbs internally. The statedb + // created here in the original code was only used for activeState tracking. + // With pipelined import, ProcessBlock handles all state opening. + // If we are past Byzantium, enable prefetching to pull in trie node paths // while processing transactions. Before Byzantium the prefetcher is mostly // useless due to the intermediate root hashing after each transaction. @@ -3208,12 +3321,7 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, return nil, it.index, err } } - // Bor: We start the prefetcher in process block function called below - // and not here as we copy state for block-stm in that function. Also, - // we don't want to start duplicate prefetchers per block. - // statedb.StartPrefetcher("chain", witness) } - activeState = statedb var followupInterrupt atomic.Bool @@ -3249,7 +3357,7 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, } } - receipts, logs, usedGas, statedb, vtime, err := bc.ProcessBlock(block, parent, witness, &followupInterrupt) + receipts, logs, usedGas, statedb, vtime, err := bc.ProcessBlock(block, parent, witness, &followupInterrupt, pipeOpts) bc.statedb.TrieDB().SetReadBackend(nil) bc.statedb.EnableSnapInReader() activeState = statedb @@ -3257,9 +3365,158 @@ func (bc *BlockChain) insertChainWithWitnesses(chain types.Blocks, setHead bool, if err != nil { bc.reportBlock(block, &ProcessResult{Receipts: receipts}, err) followupInterrupt.Store(true) + // Flush any pending import SRC before returning on error + if pipelineActive { + _ = bc.flushPendingImportSRC() + } return nil, it.index, err } + // --- Pipelined import: extract FlatDiff, collect previous SRC, write metadata, spawn SRC --- + if pipelineActive { + flatDiff := statedb.CommitSnapshot(bc.chainConfig.IsEIP158(block.Number())) + + // Collect previous pending SRC (verify root + trie GC) + bc.pendingImportSRCMu.Lock() + pending := bc.pendingImportSRC + bc.pendingImportSRCMu.Unlock() + + var committedRoot common.Hash + if pending != nil { + if bc.cfg.PipelinedImportSRCLogs { + log.Info("Pipelined import: collecting previous SRC", + "block", block.NumberU64(), "pendingBlock", pending.block.NumberU64()) + } + collectStart := time.Now() + var collectErr error + committedRoot, collectErr = bc.collectPendingImportSRC() + pipelineImportCollectTimer.UpdateSince(collectStart) + if collectErr != nil { + followupInterrupt.Store(true) + return nil, it.index - 1, collectErr + } + } else { + // First block in pipeline — parent root is already committed + committedRoot = parent.Root + } + + // BOR state sync feed + bc.stateSyncMu.RLock() + for _, data := range bc.GetStateSync() { + bc.stateSyncFeed.Send(StateSyncEvent{Data: data}) + } + bc.stateSyncMu.RUnlock() + + proctime := time.Since(start) + + // Store FlatDiff BEFORE writing metadata. writeBlockAndSetHeadPipelined + // emits ChainEvent which triggers subscribers that read state. FlatDiff + // must be available so PostExecutionStateAt works for those reads. + bc.SetLastFlatDiff(flatDiff, block.NumberU64(), committedRoot, block.Root()) + + // Write block metadata to DB immediately (so sync protocol sees it). + // State commit is deferred to the SRC goroutine. emitHeadEvent=false + // because the deferred ChainHeadEvent at end of insertChain handles it. + _, writeErr := bc.writeBlockAndSetHeadPipelined( + block, receipts, logs, statedb, false, nil) + if writeErr != nil { + followupInterrupt.Store(true) + return nil, it.index, writeErr + } + + // Spawn SRC goroutine for current block + tmpBlock := types.NewBlockWithHeader(block.Header()).WithBody(*block.Body()) + bc.SpawnSRCGoroutine(tmpBlock, committedRoot, flatDiff) + + // Store as pending — persists across insertChain calls + newPending := &pendingImportSRCState{ + block: block, + flatDiff: flatDiff, + committedRoot: committedRoot, + procTime: proctime, + collectedCh: make(chan struct{}), + } + bc.pendingImportSRCMu.Lock() + bc.pendingImportSRC = newPending + bc.pendingImportSRCMu.Unlock() + + // Spawn auto-collection goroutine: waits for SRC to finish, then + // immediately verifies root, writes witness, and handles trie GC. + // This way collection doesn't depend on the next block's arrival. + srcStart := time.Now() + bc.wg.Add(1) + go func(p *pendingImportSRCState, srcStart time.Time) { + defer bc.wg.Done() + defer close(p.collectedCh) + + root, witnessBytes, err := bc.WaitForSRC() + pipelineImportSRCTimer.UpdateSince(srcStart) + if err != nil { + log.Error("Pipelined import: SRC goroutine failed", + "block", p.block.NumberU64(), "err", err) + p.collectedErr = err + return + } + + if root != p.block.Root() { + p.collectedErr = fmt.Errorf("pipelined import: root mismatch (expected: %x got: %x) block: %d", + p.block.Root(), root, p.block.NumberU64()) + log.Error("Pipelined import: root mismatch, reverting chain head", + "block", p.block.NumberU64(), "expected", p.block.Root(), "got", root) + bc.reportBlock(p.block, nil, p.collectedErr) + if parentBlock := bc.GetBlock(p.block.ParentHash(), p.block.NumberU64()-1); parentBlock != nil { + bc.writeHeadBlock(parentBlock) + } + return + } + + p.collectedRoot = root + + if bc.cfg.PipelinedImportSRCLogs { + log.Info("Pipelined import: SRC verified", + "block", p.block.NumberU64(), "root", root) + } + + // Write witness and announce availability to peers + if len(witnessBytes) > 0 { + bc.WriteWitness(p.block.Hash(), witnessBytes) + bc.witnessReadyFeed.Send(WitnessReadyEvent{ + BlockHash: p.block.Hash(), + BlockNumber: p.block.NumberU64(), + }) + } + + // Trie GC + bc.handleImportTrieGC(root, p.block.NumberU64(), p.procTime) + + pipelineImportBlocksCounter.Inc(1) + }(newPending, srcStart) + + if bc.cfg.PipelinedImportSRCLogs { + log.Info("Pipelined import: spawned SRC", + "block", block.NumberU64(), "committedRoot", committedRoot, + "txs", len(block.Transactions())) + } + + followupInterrupt.Store(true) + + // Update stats and report + stats.processed++ + stats.usedGas += usedGas + lastCanon = block + + var snapDiffItems, snapBufItems common.StorageSize + if bc.snaps != nil { + snapDiffItems, snapBufItems = bc.snaps.Size() + } + trieDiffNodes, trieBufNodes, _ := bc.triedb.Size() + stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, setHead, false) + + continue // Skip normal write path + } + + // --- Normal (non-pipelined) write path --- + // BOR state sync feed related changes bc.stateSyncMu.RLock() for _, data := range bc.GetStateSync() { @@ -4246,12 +4503,23 @@ func (bc *BlockChain) SubscribeChain2HeadEvent(ch chan<- Chain2HeadEvent) event. // The state commit is handled separately by the SRC goroutine that already // called CommitWithUpdate. This avoids the "layer stale" error that occurs // when two CommitWithUpdate calls diverge from the same parent root. +// WriteBlockAndSetHeadPipelined is the public variant that acquires the chain mutex. +// Used by the miner pipeline (resultLoop) where the mutex is not already held. func (bc *BlockChain) WriteBlockAndSetHeadPipelined(block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB, emitHeadEvent bool, witnessBytes []byte) (WriteStatus, error) { if !bc.chainmu.TryLock() { return NonStatTy, errChainStopped } defer bc.chainmu.Unlock() + return bc.writeBlockAndSetHeadPipelined(block, receipts, logs, statedb, emitHeadEvent, witnessBytes) +} + +// writeBlockAndSetHeadPipelined is the internal implementation. It writes block +// data (header, body, receipts) to the database and sets it as the chain head, +// WITHOUT committing trie state. The state commit is handled by the SRC goroutine. +// This function does NOT acquire the chain mutex — the caller must ensure +// proper synchronization (e.g., called from insertChainWithWitnesses). +func (bc *BlockChain) writeBlockAndSetHeadPipelined(block *types.Block, receipts []*types.Receipt, logs []*types.Log, statedb *state.StateDB, emitHeadEvent bool, witnessBytes []byte) (WriteStatus, error) { // Write block data without state commit ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) if ptd == nil { @@ -4373,10 +4641,14 @@ func (bc *BlockChain) PostExecutionStateAt(header *types.Header) (*state.StateDB bc.lastFlatDiffMu.RLock() flatDiff := bc.lastFlatDiff flatDiffBlockNum := bc.lastFlatDiffBlockNum + flatDiffParentRoot := bc.lastFlatDiffParentRoot bc.lastFlatDiffMu.RUnlock() if flatDiff != nil && flatDiffBlockNum == header.Number.Uint64() { - return state.NewWithFlatBase(header.Root, bc.statedb, flatDiff) + // Open at the parent's committed root (which IS in the trie DB) and + // overlay the FlatDiff. We cannot use header.Root because it may not + // be committed yet (pipelined import SRC still running). + return state.NewWithFlatBase(flatDiffParentRoot, bc.statedb, flatDiff) } // Slow path: use the committed state root directly. @@ -4411,7 +4683,11 @@ func (bc *BlockChain) SpawnSRCGoroutine(block *types.Block, parentRoot common.Ha } }() - tmpDB, err := state.New(parentRoot, bc.statedb) + // Use NewTrieOnly to force all reads through the MPT (no flat/snapshot + // readers). This ensures every account and storage read walks the trie, + // capturing proof-path nodes in the witness. With normal readers, the + // flat reader short-circuits the trie and proof paths are never captured. + tmpDB, err := state.NewTrieOnly(parentRoot, bc.statedb) if err != nil { log.Error("Pipelined SRC: failed to open tmpDB", "parentRoot", parentRoot, "err", err) pending.err = err @@ -4482,6 +4758,10 @@ func (bc *BlockChain) SpawnSRCGoroutine(block *types.Block, parentRoot common.Ha log.Error("Pipelined SRC: failed to encode witness", "block", block.NumberU64(), "err", err) } else { pending.witness = witBuf.Bytes() + // Cache the witness immediately so GetWitness can serve it + // before the auto-collection goroutine writes it to DB. + // For imported blocks the hash is already final (block is sealed). + bc.witnessCache.Add(block.Hash(), pending.witness) } } @@ -4509,6 +4789,109 @@ func (bc *BlockChain) WaitForSRC() (common.Hash, []byte, error) { return pending.root, pending.witness, nil } +// flushPendingImportSRC collects the pending import SRC goroutine (if any), +// verifies the root, writes the block to DB, handles trie GC, and clears +// the pending state. Called on shutdown, reorg, and when an incoming block +// doesn't continue from the pending block. +// flushPendingImportSRC waits for the auto-collection goroutine to finish +// and clears the pending state. Called on shutdown and when an incoming block +// doesn't follow the pending one (reorg/gap). +func (bc *BlockChain) flushPendingImportSRC() error { + bc.pendingImportSRCMu.Lock() + pending := bc.pendingImportSRC + bc.pendingImportSRC = nil + bc.pendingImportSRCMu.Unlock() + + if pending == nil { + return nil + } + + pipelineImportFallbackCounter.Inc(1) + + // Wait for auto-collection to finish (it handles verify, witness, trie GC) + <-pending.collectedCh + return pending.collectedErr +} + +// collectPendingImportSRC collects the pending import SRC goroutine, writes +// the previous block, and returns the new committed root. Unlike flush, this +// does NOT clear pendingImportSRC (the caller replaces it with the new block). +// collectPendingImportSRC waits for the auto-collection goroutine to finish +// and returns the committed root. The actual work (verify root, write witness, +// trie GC) is done by the auto-collection goroutine spawned alongside the SRC. +func (bc *BlockChain) collectPendingImportSRC() (common.Hash, error) { + bc.pendingImportSRCMu.Lock() + pending := bc.pendingImportSRC + bc.pendingImportSRCMu.Unlock() + + if pending == nil { + return common.Hash{}, errors.New("no pending import SRC") + } + + // Wait for auto-collection goroutine to finish + <-pending.collectedCh + + if pending.collectedErr != nil { + return common.Hash{}, pending.collectedErr + } + return pending.collectedRoot, nil +} + +// handleImportTrieGC performs trie garbage collection after a pipelined import +// SRC has committed the state. Replicates writeBlockWithState's GC logic. +func (bc *BlockChain) handleImportTrieGC(root common.Hash, blockNum uint64, procTime time.Duration) { + bc.gcproc += procTime + + if bc.triedb.Scheme() == rawdb.PathScheme { + return + } + if bc.cfg.ArchiveMode { + _ = bc.triedb.Commit(root, false) + return + } + + bc.triedb.Reference(root, common.Hash{}) + bc.triegc.Push(root, -int64(blockNum)) + + triesInMemory := bc.cfg.GetTriesInMemory() + if blockNum <= triesInMemory { + return + } + + _, nodes, imgs := bc.triedb.Size() + limit := common.StorageSize(bc.cfg.TrieDirtyLimit) * 1024 * 1024 + if nodes > limit || imgs > 4*1024*1024 { + _ = bc.triedb.Cap(limit - ethdb.IdealBatchSize) + } + + chosen := blockNum - triesInMemory + flushInterval := time.Duration(bc.flushInterval.Load()) + if bc.gcproc > flushInterval { + header := bc.GetHeaderByNumber(chosen) + if header == nil { + log.Warn("Reorg in progress, trie commit postponed", "number", chosen) + } else { + if chosen < bc.lastWrite+triesInMemory && bc.gcproc >= 2*flushInterval { + log.Info("State in memory for too long, committing", + "time", bc.gcproc, "allowance", flushInterval, + "optimum", float64(chosen-bc.lastWrite)/float64(triesInMemory)) + } + _ = bc.triedb.Commit(header.Root, true) + bc.lastWrite = chosen + bc.gcproc = 0 + } + } + + for !bc.triegc.Empty() { + r, number := bc.triegc.Pop() + if uint64(-number) > chosen { + bc.triegc.Push(r, number) + break + } + bc.triedb.Dereference(r) + } +} + // GetLastFlatDiff returns the FlatDiff captured from the most recently committed // block. The miner uses this to open a NewWithFlatBase StateDB without waiting // for the current SRC goroutine to finish. @@ -4522,10 +4905,12 @@ func (bc *BlockChain) GetLastFlatDiff() *state.FlatDiff { // The block number is used by PostExecutionStateAt to match the FlatDiff // to the correct block (hash matching is unreliable because Root and seal // signature are not available when FlatDiff is captured). -func (bc *BlockChain) SetLastFlatDiff(diff *state.FlatDiff, blockNum uint64) { +func (bc *BlockChain) SetLastFlatDiff(diff *state.FlatDiff, blockNum uint64, parentRoot common.Hash, blockRoot common.Hash) { bc.lastFlatDiffMu.Lock() bc.lastFlatDiff = diff bc.lastFlatDiffBlockNum = blockNum + bc.lastFlatDiffParentRoot = parentRoot + bc.lastFlatDiffBlockRoot = blockRoot bc.lastFlatDiffMu.Unlock() } diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 509356a311..72a17e0f60 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -21,6 +21,8 @@ import ( "fmt" "math/big" + "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc/eip4844" @@ -158,7 +160,9 @@ func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { } // GetWitness retrieves a witness in RLP encoding from the database by hash, -// caching it if found. +// caching it if found. If the witness is not yet available but the pipelined +// import SRC goroutine is generating it for this block, GetWitness blocks +// until the SRC completes and the witness is written. func (bc *BlockChain) GetWitness(hash common.Hash) []byte { // Short circuit if the witness is already in the cache, retrieve otherwise if cached, ok := bc.witnessCache.Get(hash); ok { @@ -166,6 +170,11 @@ func (bc *BlockChain) GetWitness(hash common.Hash) []byte { } witness := bc.witnessStore.ReadWitness(hash) + if len(witness) == 0 { + // Witness not in DB yet — check if the pipelined import SRC goroutine + // is currently generating it. If so, wait for it to finish. + witness = bc.waitForPipelinedWitness(hash) + } if len(witness) == 0 { return nil } @@ -174,6 +183,58 @@ func (bc *BlockChain) GetWitness(hash common.Hash) []byte { return witness } +// waitForPipelinedWitness waits for a witness that is being generated by +// the pipelined import SRC goroutine. It handles two cases: +// +// 1. The requested block IS the current pendingImportSRC — block on its +// collectedCh until the SRC finishes and the witness is written. +// +// 2. The requested block is in the current import batch but hasn't been +// processed yet (or SRC just completed) — poll the witness cache briefly +// since the batch processes blocks rapidly (~2ms each). +// +// Returns nil if the witness doesn't appear within the timeout. +func (bc *BlockChain) waitForPipelinedWitness(hash common.Hash) []byte { + if !bc.cfg.EnablePipelinedImportSRC { + return nil + } + + // Fast path: check if this is the current pending SRC block. + bc.pendingImportSRCMu.Lock() + pending := bc.pendingImportSRC + bc.pendingImportSRCMu.Unlock() + + if pending != nil && pending.block.Hash() == hash { + <-pending.collectedCh + if w, ok := bc.witnessCache.Get(hash); ok { + return w + } + return bc.witnessStore.ReadWitness(hash) + } + + // Slow path: the block might be in the current import batch but not yet + // processed, or the SRC just completed and the witness is being written. + // Poll the witness cache with a short interval. The import pipeline + // processes blocks at ~2ms each and caches the witness immediately when + // SRC completes, so the wait is typically very short. + deadline := time.NewTimer(2 * time.Second) + defer deadline.Stop() + + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if w, ok := bc.witnessCache.Get(hash); ok { + return w + } + case <-deadline.C: + return nil + } + } +} + // HasWitness checks if a witness is present in the cache or database. func (bc *BlockChain) HasWitness(hash common.Hash) bool { if bc.witnessCache.Contains(hash) { @@ -473,9 +534,22 @@ func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { } // HasState checks if state trie is fully present in the database or not. +// For pipelined import, also returns true if the hash matches a pending +// import SRC block whose state will be committed momentarily. func (bc *BlockChain) HasState(hash common.Hash) bool { _, err := bc.statedb.OpenTrie(hash) - return err == nil + if err == nil { + return true + } + // Check if the state is being committed by a pipelined import SRC goroutine. + // The block metadata is already in DB; the state commit is in-flight. + bc.pendingImportSRCMu.Lock() + pending := bc.pendingImportSRC + bc.pendingImportSRCMu.Unlock() + if pending != nil && pending.block.Root() == hash { + return true + } + return false } // HasBlockAndState checks if a block and associated state trie is fully present @@ -517,6 +591,24 @@ func (bc *BlockChain) State() (*state.StateDB, error) { // StateAt returns a new mutable state based on a particular point in time. func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { + // Fast path: if this is the latest pipelined import block whose SRC hasn't + // committed yet, use FlatDiff overlay. This allows eth_call, eth_estimateGas, + // and other state readers to work during the brief window between metadata + // write and SRC completion. + bc.lastFlatDiffMu.RLock() + flatDiff := bc.lastFlatDiff + flatDiffBlockRoot := bc.lastFlatDiffBlockRoot + flatDiffParentRoot := bc.lastFlatDiffParentRoot + bc.lastFlatDiffMu.RUnlock() + + if flatDiff != nil && root == flatDiffBlockRoot { + sdb, err := state.NewWithFlatBase(flatDiffParentRoot, bc.statedb, flatDiff) + if err != nil { + return state.New(root, bc.statedb) + } + return sdb, nil + } + return state.New(root, bc.statedb) } @@ -526,19 +618,40 @@ func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { // is for actual transaction processing. This enables independent cache hit/miss tracking // for both phases of block production. func (bc *BlockChain) StateAtWithReaders(root common.Hash) (*state.StateDB, *state.StateDB, state.ReaderWithStats, state.ReaderWithStats, error) { - prefetchReader, processReader, err := bc.statedb.ReadersWithCacheStats(root) + // If the root matches the latest pipelined import block (whose SRC hasn't + // committed yet), open readers at the committed parent root and apply the + // FlatDiff overlay. This allows the miner to build pending blocks even when + // the chain head's state root is not yet committed to the trie DB. + readerRoot := root + bc.lastFlatDiffMu.RLock() + flatDiff := bc.lastFlatDiff + flatDiffBlockRoot := bc.lastFlatDiffBlockRoot + flatDiffParentRoot := bc.lastFlatDiffParentRoot + bc.lastFlatDiffMu.RUnlock() + + if flatDiff != nil && root == flatDiffBlockRoot { + readerRoot = flatDiffParentRoot + } + + prefetchReader, processReader, err := bc.statedb.ReadersWithCacheStats(readerRoot) if err != nil { return nil, nil, nil, nil, err } - statedb, err := state.NewWithReader(root, bc.statedb, processReader) + statedb, err := state.NewWithReader(readerRoot, bc.statedb, processReader) if err != nil { return nil, nil, nil, nil, err } - throwaway, err := state.NewWithReader(root, bc.statedb, prefetchReader) + throwaway, err := state.NewWithReader(readerRoot, bc.statedb, prefetchReader) if err != nil { return nil, nil, nil, nil, err } + // Apply FlatDiff overlay so the miner sees the latest block's post-state. + if flatDiff != nil && root == flatDiffBlockRoot { + statedb.SetFlatDiffRef(flatDiff) + throwaway.SetFlatDiffRef(flatDiff) + } + return statedb, throwaway, prefetchReader, processReader, nil } @@ -666,6 +779,12 @@ func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscript return bc.scope.Track(bc.logsFeed.Subscribe(ch)) } +// SubscribeWitnessReadyEvent registers a subscription for witness availability +// events from the pipelined import SRC goroutine. +func (bc *BlockChain) SubscribeWitnessReadyEvent(ch chan<- WitnessReadyEvent) event.Subscription { + return bc.scope.Track(bc.witnessReadyFeed.Subscribe(ch)) +} + // SubscribeBlockProcessingEvent registers a subscription of bool where true means // block processing has started while false means it has stopped. func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription { diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 1a8c73bc0f..145dc0d990 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -185,7 +185,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { if err != nil { return err } - receipts, logs, usedGas, statedb, _, err := blockchain.ProcessBlock(block, blockchain.GetBlockByHash(block.ParentHash()).Header(), nil, nil) + receipts, logs, usedGas, statedb, _, err := blockchain.ProcessBlock(block, blockchain.GetBlockByHash(block.ParentHash()).Header(), nil, nil, nil) res := &ProcessResult{ Receipts: receipts, Logs: logs, @@ -6427,3 +6427,459 @@ func TestWriteBlockMetrics(t *testing.T) { t.Error("stateCommitTimer mean duration should be non-negative") } } + +// --------------------------------------------------------------------------- +// Pipelined Import SRC Tests +// --------------------------------------------------------------------------- + +// pipelinedConfig returns a BlockChainConfig with pipelined import SRC enabled. +func pipelinedConfig(scheme string) *BlockChainConfig { + cfg := DefaultConfig().WithStateScheme(scheme) + cfg.EnablePipelinedImportSRC = true + cfg.PipelinedImportSRCLogs = true + return cfg +} + +// TestPipelinedImportSRC_MultipleBlocks generates 10 blocks with transactions and +// inserts them into two chains — one with pipelined SRC enabled and one without. +// The state roots of every canonical block must match between both chains. +func TestPipelinedImportSRC_MultipleBlocks(t *testing.T) { + testPipelinedImportSRC_MultipleBlocks(t, rawdb.HashScheme) + testPipelinedImportSRC_MultipleBlocks(t, rawdb.PathScheme) +} + +func testPipelinedImportSRC_MultipleBlocks(t *testing.T, scheme string) { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key.PublicKey) + recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef") + funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether)) + gspec = &Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: types.GenesisAlloc{addr: {Balance: funds}}, + BaseFee: big.NewInt(params.InitialBaseFee), + } + signer = types.LatestSigner(gspec.Config) + engine = ethash.NewFaker() + ) + + // Generate 10 blocks with a simple transfer in each. + _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 10, func(i int, gen *BlockGen) { + tx, _ := types.SignTx( + types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), + signer, key, + ) + gen.AddTx(tx) + }) + + // Chain with pipeline enabled. + pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme)) + if err != nil { + t.Fatalf("failed to create pipeline chain: %v", err) + } + defer pipeChain.Stop() + + // Reference chain without pipeline. + refChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, DefaultConfig().WithStateScheme(scheme)) + if err != nil { + t.Fatalf("failed to create reference chain: %v", err) + } + defer refChain.Stop() + + if _, err := pipeChain.InsertChain(blocks, false); err != nil { + t.Fatalf("pipeline chain: failed to insert blocks: %v", err) + } + if _, err := refChain.InsertChain(blocks, false); err != nil { + t.Fatalf("reference chain: failed to insert blocks: %v", err) + } + + // Both chains must agree on head. + if pipeChain.CurrentBlock().Number.Uint64() != 10 { + t.Fatalf("pipeline chain head = %d, want 10", pipeChain.CurrentBlock().Number.Uint64()) + } + if refChain.CurrentBlock().Number.Uint64() != 10 { + t.Fatalf("reference chain head = %d, want 10", refChain.CurrentBlock().Number.Uint64()) + } + + // All canonical blocks must have matching state roots. + for i := uint64(1); i <= 10; i++ { + pipeBlock := pipeChain.GetBlockByNumber(i) + refBlock := refChain.GetBlockByNumber(i) + if pipeBlock == nil || refBlock == nil { + t.Fatalf("block %d: missing on pipeline(%v) or reference(%v)", i, pipeBlock == nil, refBlock == nil) + } + if pipeBlock.Root() != refBlock.Root() { + t.Errorf("block %d: state root mismatch pipeline=%s reference=%s", i, pipeBlock.Root(), refBlock.Root()) + } + if pipeBlock.Hash() != refBlock.Hash() { + t.Errorf("block %d: block hash mismatch pipeline=%s reference=%s", i, pipeBlock.Hash(), refBlock.Hash()) + } + } +} + +// TestPipelinedImportSRC_SingleBlock inserts a single block with pipeline enabled +// and verifies correctness of the state. +func TestPipelinedImportSRC_SingleBlock(t *testing.T) { + testPipelinedImportSRC_SingleBlock(t, rawdb.HashScheme) + testPipelinedImportSRC_SingleBlock(t, rawdb.PathScheme) +} + +func testPipelinedImportSRC_SingleBlock(t *testing.T, scheme string) { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key.PublicKey) + recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef") + funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether)) + gspec = &Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: types.GenesisAlloc{addr: {Balance: funds}}, + BaseFee: big.NewInt(params.InitialBaseFee), + } + signer = types.LatestSigner(gspec.Config) + engine = ethash.NewFaker() + ) + + _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, gen *BlockGen) { + tx, _ := types.SignTx( + types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), + signer, key, + ) + gen.AddTx(tx) + }) + + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme)) + if err != nil { + t.Fatalf("failed to create chain: %v", err) + } + defer chain.Stop() + + if _, err := chain.InsertChain(blocks, false); err != nil { + t.Fatalf("failed to insert block: %v", err) + } + + if chain.CurrentBlock().Number.Uint64() != 1 { + t.Fatalf("head = %d, want 1", chain.CurrentBlock().Number.Uint64()) + } + + statedb, err := chain.StateAt(blocks[0].Root()) + if err != nil { + t.Fatalf("StateAt failed: %v", err) + } + + // Recipient should have received 1000 wei. + bal := statedb.GetBalance(recipient) + if bal.IsZero() { + t.Error("recipient balance should be non-zero after transfer") + } +} + +// TestPipelinedImportSRC_CrossCallPersistence inserts blocks across two separate +// InsertChain calls with pipelined SRC and verifies that state persists correctly +// between calls (the pending SRC from the first batch is flushed before the +// second batch begins). +func TestPipelinedImportSRC_CrossCallPersistence(t *testing.T) { + testPipelinedImportSRC_CrossCallPersistence(t, rawdb.HashScheme) + testPipelinedImportSRC_CrossCallPersistence(t, rawdb.PathScheme) +} + +func testPipelinedImportSRC_CrossCallPersistence(t *testing.T, scheme string) { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key.PublicKey) + recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef") + funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether)) + gspec = &Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: types.GenesisAlloc{addr: {Balance: funds}}, + BaseFee: big.NewInt(params.InitialBaseFee), + } + signer = types.LatestSigner(gspec.Config) + engine = ethash.NewFaker() + ) + + _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 6, func(i int, gen *BlockGen) { + tx, _ := types.SignTx( + types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), + signer, key, + ) + gen.AddTx(tx) + }) + + // Pipeline chain: split insertion across two calls. + pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme)) + if err != nil { + t.Fatalf("failed to create pipeline chain: %v", err) + } + defer pipeChain.Stop() + + if _, err := pipeChain.InsertChain(blocks[:3], false); err != nil { + t.Fatalf("pipeline: first batch insert failed: %v", err) + } + if _, err := pipeChain.InsertChain(blocks[3:], false); err != nil { + t.Fatalf("pipeline: second batch insert failed: %v", err) + } + + // Reference chain: single call. + refChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, DefaultConfig().WithStateScheme(scheme)) + if err != nil { + t.Fatalf("failed to create reference chain: %v", err) + } + defer refChain.Stop() + + if _, err := refChain.InsertChain(blocks, false); err != nil { + t.Fatalf("reference: insert failed: %v", err) + } + + if pipeChain.CurrentBlock().Number.Uint64() != 6 { + t.Fatalf("pipeline head = %d, want 6", pipeChain.CurrentBlock().Number.Uint64()) + } + + for i := uint64(1); i <= 6; i++ { + pipeBlock := pipeChain.GetBlockByNumber(i) + refBlock := refChain.GetBlockByNumber(i) + if pipeBlock == nil || refBlock == nil { + t.Fatalf("block %d missing", i) + } + if pipeBlock.Root() != refBlock.Root() { + t.Errorf("block %d: state root mismatch pipeline=%s reference=%s", i, pipeBlock.Root(), refBlock.Root()) + } + } +} + +// TestPipelinedImportSRC_Reorg inserts a main chain and then a longer fork to +// trigger a reorg. Verifies that the fork becomes canonical and all state roots +// are valid after the reorg. +func TestPipelinedImportSRC_Reorg(t *testing.T) { + testPipelinedImportSRC_Reorg(t, rawdb.HashScheme) + testPipelinedImportSRC_Reorg(t, rawdb.PathScheme) +} + +func testPipelinedImportSRC_Reorg(t *testing.T, scheme string) { + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether)) + gspec = &Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: types.GenesisAlloc{ + addr1: {Balance: funds}, + addr2: {Balance: funds}, + }, + BaseFee: big.NewInt(params.InitialBaseFee), + } + signer = types.LatestSigner(gspec.Config) + engine = ethash.NewFaker() + ) + + // Main chain: 5 blocks, transfers from addr1. + _, mainBlocks, _ := GenerateChainWithGenesis(gspec, engine, 5, func(i int, gen *BlockGen) { + tx, _ := types.SignTx( + types.NewTransaction(gen.TxNonce(addr1), common.HexToAddress("0x1111"), big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), + signer, key1, + ) + gen.AddTx(tx) + }) + + // Fork chain: 7 blocks branching from genesis, using addr2 so it creates + // different state. Longer chain so it becomes canonical. + _, forkBlocks, _ := GenerateChainWithGenesis(gspec, engine, 7, func(i int, gen *BlockGen) { + tx, _ := types.SignTx( + types.NewTransaction(gen.TxNonce(addr2), common.HexToAddress("0x2222"), big.NewInt(2000), params.TxGas, gen.header.BaseFee, nil), + signer, key2, + ) + gen.AddTx(tx) + }) + + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme)) + if err != nil { + t.Fatalf("failed to create chain: %v", err) + } + defer chain.Stop() + + // Insert main chain. + if _, err := chain.InsertChain(mainBlocks, false); err != nil { + t.Fatalf("main chain insert failed: %v", err) + } + if chain.CurrentBlock().Number.Uint64() != 5 { + t.Fatalf("after main: head = %d, want 5", chain.CurrentBlock().Number.Uint64()) + } + + // Insert fork chain — should trigger reorg since it's longer. + if _, err := chain.InsertChain(forkBlocks, false); err != nil { + t.Fatalf("fork chain insert failed: %v", err) + } + if chain.CurrentBlock().Number.Uint64() != 7 { + t.Fatalf("after fork: head = %d, want 7", chain.CurrentBlock().Number.Uint64()) + } + + // Verify the fork is now canonical by checking block hashes. + for i := uint64(1); i <= 7; i++ { + canonical := chain.GetBlockByNumber(i) + if canonical == nil { + t.Fatalf("missing canonical block %d after reorg", i) + } + if canonical.Hash() != forkBlocks[i-1].Hash() { + t.Errorf("block %d: canonical hash %s != fork hash %s", i, canonical.Hash(), forkBlocks[i-1].Hash()) + } + } + + // Verify state is accessible for the canonical head. + statedb, err := chain.StateAt(chain.CurrentBlock().Root) + if err != nil { + t.Fatalf("StateAt head failed: %v", err) + } + // addr2 sent 2000 wei per block for 7 blocks => should have less than initial funds. + bal := statedb.GetBalance(addr2) + if bal.IsZero() { + t.Error("addr2 balance should be non-zero") + } +} + +// TestPipelinedImportSRC_StateAtDuringPipeline generates blocks that modify +// account balances and verifies that StateAt returns correct balances for each +// block's root after pipelined insertion. +func TestPipelinedImportSRC_StateAtDuringPipeline(t *testing.T) { + testPipelinedImportSRC_StateAtDuringPipeline(t, rawdb.HashScheme) + testPipelinedImportSRC_StateAtDuringPipeline(t, rawdb.PathScheme) +} + +func testPipelinedImportSRC_StateAtDuringPipeline(t *testing.T, scheme string) { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key.PublicKey) + recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef") + funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether)) + txValue = big.NewInt(10000) // 10000 wei per block + gspec = &Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: types.GenesisAlloc{addr: {Balance: funds}}, + BaseFee: big.NewInt(params.InitialBaseFee), + } + signer = types.LatestSigner(gspec.Config) + engine = ethash.NewFaker() + ) + + numBlocks := 5 + _, blocks, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, func(i int, gen *BlockGen) { + tx, _ := types.SignTx( + types.NewTransaction(gen.TxNonce(addr), recipient, txValue, params.TxGas, gen.header.BaseFee, nil), + signer, key, + ) + gen.AddTx(tx) + }) + + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme)) + if err != nil { + t.Fatalf("failed to create chain: %v", err) + } + defer chain.Stop() + + if _, err := chain.InsertChain(blocks, false); err != nil { + t.Fatalf("failed to insert chain: %v", err) + } + + // Verify state at each block root shows monotonically increasing recipient balance. + var prevBal *uint256.Int + for i := 0; i < numBlocks; i++ { + statedb, err := chain.StateAt(blocks[i].Root()) + if err != nil { + t.Fatalf("block %d: StateAt failed: %v", i+1, err) + } + bal := statedb.GetBalance(recipient) + if bal.IsZero() { + t.Errorf("block %d: recipient balance is zero, expected non-zero", i+1) + } + if prevBal != nil && bal.Cmp(prevBal) <= 0 { + t.Errorf("block %d: recipient balance %s should be greater than previous %s", i+1, bal, prevBal) + } + prevBal = bal.Clone() + } + + // Final balance should equal txValue * numBlocks. + expectedBal := new(big.Int).Mul(txValue, big.NewInt(int64(numBlocks))) + finalState, _ := chain.StateAt(blocks[numBlocks-1].Root()) + got := finalState.GetBalance(recipient).ToBig() + if got.Cmp(expectedBal) != 0 { + t.Errorf("final recipient balance: got %s, want %s", got, expectedBal) + } +} + +// TestPipelinedImportSRC_ValidateStateCheap verifies that blocks inserted with +// pipelined SRC pass all cheap validation checks (gas used, bloom filter, +// receipt root). This is implicitly tested by successful insertion, but this +// test explicitly verifies no errors by comparing against a reference chain. +func TestPipelinedImportSRC_ValidateStateCheap(t *testing.T) { + testPipelinedImportSRC_ValidateStateCheap(t, rawdb.HashScheme) + testPipelinedImportSRC_ValidateStateCheap(t, rawdb.PathScheme) +} + +func testPipelinedImportSRC_ValidateStateCheap(t *testing.T, scheme string) { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key.PublicKey) + recipient = common.HexToAddress("0x00000000000000000000000000000000deadbeef") + funds = new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether)) + gspec = &Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: types.GenesisAlloc{addr: {Balance: funds}}, + BaseFee: big.NewInt(params.InitialBaseFee), + } + signer = types.LatestSigner(gspec.Config) + engine = ethash.NewFaker() + ) + + _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 8, func(i int, gen *BlockGen) { + tx, _ := types.SignTx( + types.NewTransaction(gen.TxNonce(addr), recipient, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), + signer, key, + ) + gen.AddTx(tx) + }) + + // Insert with pipeline — any ValidateStateCheap failure would surface as + // an InsertChain error. + pipeChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, pipelinedConfig(scheme)) + if err != nil { + t.Fatalf("failed to create pipeline chain: %v", err) + } + defer pipeChain.Stop() + + n, err := pipeChain.InsertChain(blocks, false) + if err != nil { + t.Fatalf("pipeline InsertChain failed at block %d: %v", n, err) + } + + // Reference chain for comparison. + refChain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, DefaultConfig().WithStateScheme(scheme)) + if err != nil { + t.Fatalf("failed to create reference chain: %v", err) + } + defer refChain.Stop() + + if _, err := refChain.InsertChain(blocks, false); err != nil { + t.Fatalf("reference InsertChain failed: %v", err) + } + + // Verify: every block has matching gas, bloom, receipt root, and state root. + for i := uint64(1); i <= 8; i++ { + pBlock := pipeChain.GetBlockByNumber(i) + rBlock := refChain.GetBlockByNumber(i) + if pBlock == nil || rBlock == nil { + t.Fatalf("block %d missing", i) + } + if pBlock.GasUsed() != rBlock.GasUsed() { + t.Errorf("block %d: gas used mismatch %d vs %d", i, pBlock.GasUsed(), rBlock.GasUsed()) + } + if pBlock.Bloom() != rBlock.Bloom() { + t.Errorf("block %d: bloom filter mismatch", i) + } + if pBlock.ReceiptHash() != rBlock.ReceiptHash() { + t.Errorf("block %d: receipt hash mismatch %s vs %s", i, pBlock.ReceiptHash(), rBlock.ReceiptHash()) + } + if pBlock.Root() != rBlock.Root() { + t.Errorf("block %d: state root mismatch %s vs %s", i, pBlock.Root(), rBlock.Root()) + } + } +} diff --git a/core/events.go b/core/events.go index fadecdedf7..19afd30300 100644 --- a/core/events.go +++ b/core/events.go @@ -19,6 +19,7 @@ package core import ( "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/stateless" "github.com/ethereum/go-ethereum/core/types" ) @@ -49,6 +50,14 @@ type ChainSideEvent struct { Header *types.Header } +// WitnessReadyEvent is posted when a pipelined import SRC goroutine finishes +// and writes the witness to the database. The handler uses this to announce +// witness availability to peers via the WIT protocol. +type WitnessReadyEvent struct { + BlockHash common.Hash + BlockNumber uint64 +} + type ChainHeadEvent struct { Header *types.Header } diff --git a/core/state/database.go b/core/state/database.go index 53745b86e8..5fbc565617 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -200,6 +200,19 @@ func NewDatabaseForTesting() *CachingDB { return NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil) } +// TrieOnlyReader returns a state reader that uses only the trie (MPT), skipping +// flat/snapshot readers. This ensures all account and storage reads walk the trie, +// which is required for witness building — the witness captures trie nodes during +// the walk. Without this, flat readers short-circuit the trie and proof paths are +// never captured. +func (db *CachingDB) TrieOnlyReader(stateRoot common.Hash) (Reader, error) { + tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache) + if err != nil { + return nil, err + } + return newReader(newCachingCodeReader(db.disk, db.codeCache, db.codeSizeCache), tr), nil +} + // Reader returns a state reader associated with the specified state root. func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) { var readers []StateReader diff --git a/core/state/statedb.go b/core/state/statedb.go index d08acd805a..2206226f33 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -197,6 +197,18 @@ func New(root common.Hash, db Database) (*StateDB, error) { return NewWithReader(root, db, reader) } +// NewTrieOnly creates a new state that uses only the trie reader (no flat/snapshot +// readers). This forces all account and storage reads to walk the MPT, which is +// required for witness building — the witness captures trie nodes during the walk. +// Used by the pipelined SRC goroutine to ensure the witness is complete. +func NewTrieOnly(root common.Hash, db *CachingDB) (*StateDB, error) { + reader, err := db.TrieOnlyReader(root) + if err != nil { + return nil, err + } + return NewWithReader(root, db, reader) +} + // NewWithReader creates a new state for the specified state root. Unlike New, // this function accepts an additional Reader which is bound to the given root. func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, error) { @@ -1037,6 +1049,10 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject { s.setStateObject(obj) return obj } + // Account not in FlatDiff — check if it was destructed in FlatDiff. + if _, ok := s.flatDiffRef.Destructs[addr]; ok { + return nil + } } s.AccountLoaded++ diff --git a/core/state/statedb_pipeline_test.go b/core/state/statedb_pipeline_test.go index a6ee4d1552..c2eec48c42 100644 --- a/core/state/statedb_pipeline_test.go +++ b/core/state/statedb_pipeline_test.go @@ -4,10 +4,13 @@ import ( "testing" "github.com/holiman/uint256" + "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/stateless" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/triedb" ) @@ -127,3 +130,210 @@ func TestCommitSnapshot_CapturesWrites(t *testing.T) { t.Errorf("expected slot value 0xbbbb, got %x", slots[slot]) } } + +func TestFlatDiffOverlay_DestructedAccountReturnsNil(t *testing.T) { + db := NewDatabaseForTesting() + sdb, err := New(types.EmptyRootHash, db) + require.NoError(t, err) + + addr := common.HexToAddress("0xdead01") + sdb.CreateAccount(addr) + sdb.SetBalance(addr, uint256.NewInt(999), 0) + root, _, err := sdb.CommitWithUpdate(0, false, false) + require.NoError(t, err) + + // FlatDiff marks account as destructed but does NOT add it to Accounts. + diff := &FlatDiff{ + Accounts: make(map[common.Address]types.StateAccount), + Storage: make(map[common.Address]map[common.Hash]common.Hash), + Destructs: map[common.Address]struct{}{addr: {}}, + Code: make(map[common.Hash][]byte), + } + + overlayDB, err := NewWithFlatBase(root, db, diff) + require.NoError(t, err) + + require.False(t, overlayDB.Exist(addr), "destructed account should not exist") + require.True(t, overlayDB.GetBalance(addr).IsZero(), "destructed account balance should be zero") +} + +func TestFlatDiffOverlay_DestructAndResurrect(t *testing.T) { + db := NewDatabaseForTesting() + sdb, err := New(types.EmptyRootHash, db) + require.NoError(t, err) + + addr := common.HexToAddress("0xdead02") + sdb.CreateAccount(addr) + sdb.SetNonce(addr, 5, 0) + root, _, err := sdb.CommitWithUpdate(0, false, false) + require.NoError(t, err) + + // FlatDiff has addr in BOTH Destructs and Accounts (destruct + resurrect with new nonce). + diff := &FlatDiff{ + Accounts: map[common.Address]types.StateAccount{ + addr: { + Nonce: 10, + Balance: uint256.NewInt(0), + Root: types.EmptyRootHash, + CodeHash: types.EmptyCodeHash.Bytes(), + }, + }, + Storage: make(map[common.Address]map[common.Hash]common.Hash), + Destructs: map[common.Address]struct{}{addr: {}}, + Code: make(map[common.Hash][]byte), + } + + overlayDB, err := NewWithFlatBase(root, db, diff) + require.NoError(t, err) + + // The account should be resurrected with the new nonce from FlatDiff.Accounts. + require.Equal(t, uint64(10), overlayDB.GetNonce(addr)) +} + +func TestTrieOnlyReader_SkipsFlatReaders(t *testing.T) { + db := NewDatabaseForTesting() + sdb, err := New(types.EmptyRootHash, db) + require.NoError(t, err) + + addr := common.HexToAddress("0xacc001") + sdb.CreateAccount(addr) + sdb.SetBalance(addr, uint256.NewInt(42), 0) + root, _, err := sdb.CommitWithUpdate(0, false, false) + require.NoError(t, err) + + // Create StateDB via NewTrieOnly — reads go through trie, not flat/snapshot. + trieDB, err := NewTrieOnly(root, db) + require.NoError(t, err) + + // Verify trie reader returns correct data. + require.Equal(t, uint256.NewInt(42), trieDB.GetBalance(addr)) + + // Attach a witness and modify the account via a fresh trie-only StateDB. + // After IntermediateRoot, the witness should capture trie nodes (non-empty + // State map). With flat readers the trie is never walked, so the witness + // would remain empty. + trieDB2, err := NewTrieOnly(root, db) + require.NoError(t, err) + + witness := &stateless.Witness{ + Headers: []*types.Header{{}}, + Codes: make(map[string]struct{}), + State: make(map[string]struct{}), + } + trieDB2.SetWitness(witness) + + // Modify the account so that IntermediateRoot walks the trie and collects + // witness nodes from the account trie. + trieDB2.SetBalance(addr, uint256.NewInt(99), 0) + trieDB2.IntermediateRoot(false) + + require.NotEmpty(t, witness.State, "witness should capture trie nodes when using trie-only reader") +} + +func TestNewTrieOnly_ReadsCorrectData(t *testing.T) { + db := NewDatabaseForTesting() + sdb, err := New(types.EmptyRootHash, db) + require.NoError(t, err) + + addr1 := common.HexToAddress("0xacc101") + addr2 := common.HexToAddress("0xacc102") + addr3 := common.HexToAddress("0xacc103") + + sdb.CreateAccount(addr1) + sdb.SetBalance(addr1, uint256.NewInt(100), 0) + sdb.SetNonce(addr1, 1, 0) + + sdb.CreateAccount(addr2) + sdb.SetBalance(addr2, uint256.NewInt(200), 0) + sdb.SetNonce(addr2, 5, 0) + sdb.SetCode(addr2, []byte{0x60, 0x00, 0x60, 0x00}, 0) + + sdb.CreateAccount(addr3) + sdb.SetBalance(addr3, uint256.NewInt(300), 0) + slot := common.HexToHash("0xaa01") + sdb.SetState(addr3, slot, common.HexToHash("0xbb01")) + + root, _, err := sdb.CommitWithUpdate(0, false, false) + require.NoError(t, err) + + // Create via NewTrieOnly and verify all data. + trieDB, err := NewTrieOnly(root, db) + require.NoError(t, err) + + require.Equal(t, uint256.NewInt(100), trieDB.GetBalance(addr1)) + require.Equal(t, uint64(1), trieDB.GetNonce(addr1)) + + require.Equal(t, uint256.NewInt(200), trieDB.GetBalance(addr2)) + require.Equal(t, uint64(5), trieDB.GetNonce(addr2)) + require.Equal(t, crypto.Keccak256Hash([]byte{0x60, 0x00, 0x60, 0x00}), trieDB.GetCodeHash(addr2)) + require.Equal(t, []byte{0x60, 0x00, 0x60, 0x00}, trieDB.GetCode(addr2)) + + require.Equal(t, uint256.NewInt(300), trieDB.GetBalance(addr3)) + require.Equal(t, common.HexToHash("0xbb01"), trieDB.GetState(addr3, slot)) +} + +func TestPropagateReadsTo_AccountsAndStorage(t *testing.T) { + db := NewDatabaseForTesting() + sdb, err := New(types.EmptyRootHash, db) + require.NoError(t, err) + + addr1 := common.HexToAddress("0xaa0001") + addr2 := common.HexToAddress("0xaa0002") + slot1 := common.HexToHash("0xcc0001") + slot2 := common.HexToHash("0xcc0002") + + sdb.CreateAccount(addr1) + sdb.SetBalance(addr1, uint256.NewInt(111), 0) + sdb.SetState(addr1, slot1, common.HexToHash("0xdd0001")) + sdb.SetState(addr1, slot2, common.HexToHash("0xdd0002")) + + sdb.CreateAccount(addr2) + sdb.SetBalance(addr2, uint256.NewInt(222), 0) + + root, _, err := sdb.CommitWithUpdate(0, false, false) + require.NoError(t, err) + + // Create src and dst StateDBs at the same root. + src, err := New(root, db) + require.NoError(t, err) + dst, err := New(root, db) + require.NoError(t, err) + + // Read accounts and storage on src. + src.GetBalance(addr1) + src.GetBalance(addr2) + src.GetState(addr1, slot1) + src.GetState(addr1, slot2) + + // Propagate reads from src to dst. + src.PropagateReadsTo(dst) + + // dst should now have the accounts and storage in its stateObjects + // (populated by PropagateReadsTo calling GetBalance/GetState on dst). + require.Equal(t, uint256.NewInt(111), dst.GetBalance(addr1)) + require.Equal(t, uint256.NewInt(222), dst.GetBalance(addr2)) + require.Equal(t, common.HexToHash("0xdd0001"), dst.GetState(addr1, slot1)) + require.Equal(t, common.HexToHash("0xdd0002"), dst.GetState(addr1, slot2)) +} + +func TestCommitSnapshot_CapturesDestructs(t *testing.T) { + db := NewDatabaseForTesting() + sdb, err := New(types.EmptyRootHash, db) + require.NoError(t, err) + + addr := common.HexToAddress("0xdestruct01") + sdb.CreateAccount(addr) + sdb.SetBalance(addr, uint256.NewInt(500), 0) + root, _, err := sdb.CommitWithUpdate(0, false, false) + require.NoError(t, err) + + // Create a new StateDB at the committed root and self-destruct the account. + sdb2, err := New(root, db) + require.NoError(t, err) + + sdb2.SelfDestruct(addr) + diff := sdb2.CommitSnapshot(false) + + _, destructed := diff.Destructs[addr] + require.True(t, destructed, "self-destructed account should appear in diff.Destructs") +} diff --git a/core/types.go b/core/types.go index 43f1f87897..f9f4b691e3 100644 --- a/core/types.go +++ b/core/types.go @@ -35,6 +35,11 @@ type Validator interface { // ValidateState validates the given statedb and optionally the process result. ValidateState(block *types.Block, state *state.StateDB, res *ProcessResult, stateless bool) error + + // ValidateStateCheap validates cheap post-state checks (gas, bloom, receipt root, + // requests) without computing the expensive IntermediateRoot. Used by the + // pipelined import path where IntermediateRoot is deferred to an SRC goroutine. + ValidateStateCheap(block *types.Block, state *state.StateDB, res *ProcessResult) error } // Prefetcher is an interface for pre-caching transaction signatures and state. diff --git a/docs/cli/default_config.toml b/docs/cli/default_config.toml index 0d959900c0..cb3197af50 100644 --- a/docs/cli/default_config.toml +++ b/docs/cli/default_config.toml @@ -265,3 +265,7 @@ devfakeauthor = false enable-preconfs = false enable-private-tx = false bp-rpc-endpoints = [] + +[pipeline] + enable-import-src = true + import-src-logs = true diff --git a/docs/cli/server.md b/docs/cli/server.md index 98a978b54e..bc62316a52 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -324,6 +324,12 @@ The ```bor server``` command runs the Bor client. - ```v5disc```: Enables the V5 discovery mechanism (default: true) +### Pipeline Options + +- ```pipeline.enable-import-src```: Enable pipelined state root computation during block import: overlap SRC(N) with block N+1 tx execution (default: true) + +- ```pipeline.import-src-logs```: Enable verbose logging for pipelined import SRC (default: true) + ### Sealer Options - ```allow-gas-tip-override```: Allows block producers to override the mining gas tip (default: false) diff --git a/eth/api_debug.go b/eth/api_debug.go index 7bcf149e07..4cd120a721 100644 --- a/eth/api_debug.go +++ b/eth/api_debug.go @@ -506,7 +506,7 @@ func (api *DebugAPI) ExecutionWitness(bn rpc.BlockNumber) (*stateless.ExtWitness } parentBlock := bc.GetBlockByHash(block.ParentHash()) - _, _, _, statedb, _, err := bc.ProcessBlock(parentBlock, block.Header(), nil, nil) + _, _, _, statedb, _, err := bc.ProcessBlock(parentBlock, block.Header(), nil, nil, nil) if err != nil { return nil, err } @@ -527,7 +527,7 @@ func (api *DebugAPI) ExecutionWitnessByHash(hash common.Hash) (*stateless.ExtWit } parentBlock := bc.GetBlockByHash(block.ParentHash()) - _, _, _, statedb, _, err := bc.ProcessBlock(parentBlock, block.Header(), nil, nil) + _, _, _, statedb, _, err := bc.ProcessBlock(parentBlock, block.Header(), nil, nil, nil) if err != nil { return nil, err } diff --git a/eth/backend.go b/eth/backend.go index fbd3f98eb3..4328bcc3eb 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -291,7 +291,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { EnableWitnessStats: config.EnableWitnessStats, EnableEVMSwitchDispatch: config.EnableEVMSwitchDispatch, }, - Stateless: config.SyncMode == downloader.StatelessSync, + EnablePipelinedImportSRC: config.EnablePipelinedImportSRC, + PipelinedImportSRCLogs: config.PipelinedImportSRCLogs, + Stateless: config.SyncMode == downloader.StatelessSync, // Enables file journaling for the trie database. The journal files will be stored // within the data directory. The corresponding paths will be either: // - DATADIR/triedb/merkle.journal diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index a1d42d7b57..0ddb5a9078 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -132,6 +132,10 @@ type Config struct { NoPruning bool // Whether to disable pruning and flush everything to disk NoPrefetch bool // Whether to disable prefetching and only load state on demand + // Pipelined import SRC: overlap SRC(N) with tx execution of block N+1 during import + EnablePipelinedImportSRC bool + PipelinedImportSRCLogs bool + // Deprecated: use 'TransactionHistory' instead. TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. diff --git a/eth/handler.go b/eth/handler.go index 65dbe7eb3a..195cc84614 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -164,13 +164,15 @@ type handler struct { // privateTxGetter to check if a transaction needs to be treated as private or not privateTxGetter relay.PrivateTxGetter - eventMux *event.TypeMux - txsCh chan core.NewTxsEvent - txsSub event.Subscription - stuckTxsCh chan core.StuckTxsEvent - stuckTxsSub event.Subscription - minedBlockSub *event.TypeMuxSubscription - blockRange *blockRangeState + eventMux *event.TypeMux + txsCh chan core.NewTxsEvent + txsSub event.Subscription + stuckTxsCh chan core.StuckTxsEvent + stuckTxsSub event.Subscription + minedBlockSub *event.TypeMuxSubscription + witnessReadyCh chan core.WitnessReadyEvent + witnessReadySub event.Subscription + blockRange *blockRangeState requiredBlocks map[uint64]common.Hash @@ -619,6 +621,12 @@ func (h *handler) Start(maxPeers int) { h.minedBlockSub = h.eventMux.Subscribe(core.NewMinedBlockEvent{}) go h.minedBroadcastLoop() + // announce witnesses from pipelined import SRC + h.witnessReadyCh = make(chan core.WitnessReadyEvent, 10) + h.witnessReadySub = h.chain.SubscribeWitnessReadyEvent(h.witnessReadyCh) + h.wg.Add(1) + go h.witnessReadyBroadcastLoop() + h.wg.Add(1) go h.chainSync.loop() @@ -640,6 +648,9 @@ func (h *handler) Stop() { h.stuckTxsSub.Unsubscribe() // quits stuckTxBroadcastLoop } h.minedBlockSub.Unsubscribe() + if h.witnessReadySub != nil { + h.witnessReadySub.Unsubscribe() + } h.blockRange.stop() // Quit chainSync and txsync64. @@ -837,6 +848,24 @@ func (h *handler) minedBroadcastLoop() { } } +// witnessReadyBroadcastLoop announces witness availability from the pipelined +// import SRC goroutine. Without this, the stateless node would have to poll +// for witnesses with 10-second retry intervals. +func (h *handler) witnessReadyBroadcastLoop() { + defer h.wg.Done() + + for { + select { + case ev := <-h.witnessReadyCh: + for _, peer := range h.peers.peersWithoutWitness(ev.BlockHash) { + peer.Peer.AsyncSendNewWitnessHash(ev.BlockHash, ev.BlockNumber) + } + case <-h.witnessReadySub.Err(): + return + } + } +} + // txBroadcastLoop announces new transactions to connected peers. func (h *handler) txBroadcastLoop() { defer h.wg.Done() diff --git a/eth/handler_wit.go b/eth/handler_wit.go index 2c4e19d471..7bd52b92cc 100644 --- a/eth/handler_wit.go +++ b/eth/handler_wit.go @@ -120,14 +120,30 @@ func (h *witHandler) handleGetWitness(peer *wit.Peer, req *wit.GetWitnessPacket) seen[witnessPage.Hash] = struct{}{} } - // witness sizes query + // witness sizes query — first check rawdb, then fall back to GetWitness + // which can wait for the pipelined import SRC goroutine to finish generating + // the witness. Without this fallback, the handler returns empty for witnesses + // that are cached (SRC done) but not yet written to the store (auto-collection + // pending). witnessSize := make(map[common.Hash]uint64, len(seen)) + prefetchedWitnesses := make(map[common.Hash][]byte, len(seen)) for witnessBlockHash := range seen { size := rawdb.ReadWitnessSize(h.Chain().DB(), witnessBlockHash) - if size == nil { - witnessSize[witnessBlockHash] = 0 - } else { + if size != nil { witnessSize[witnessBlockHash] = *size + } else if h.Chain().GetHeaderByHash(witnessBlockHash) != nil { + // Witness not in store yet but block exists — try GetWitness which + // checks the cache and waits for pipelined SRC if needed. The + // header check prevents a DoS where a peer requests witnesses for + // non-existent blocks, causing 2-second waits per hash. + if w := h.Chain().GetWitness(witnessBlockHash); len(w) > 0 { + witnessSize[witnessBlockHash] = uint64(len(w)) + prefetchedWitnesses[witnessBlockHash] = w + } else { + witnessSize[witnessBlockHash] = 0 + } + } else { + witnessSize[witnessBlockHash] = 0 } } @@ -150,6 +166,11 @@ func (h *witHandler) handleGetWitness(peer *wit.Peer, req *wit.GetWitnessPacket) var witnessBytes []byte if cachedRLPBytes, exists := witnessCache[witnessPage.Hash]; exists { witnessBytes = cachedRLPBytes + } else if prefetched, exists := prefetchedWitnesses[witnessPage.Hash]; exists { + // Use the witness already fetched during the size check (avoids double wait) + witnessBytes = prefetched + witnessCache[witnessPage.Hash] = prefetched + totalCached += len(prefetched) } else { // Use GetWitness to benefit from the blockchain's witness cache queriedBytes := h.Chain().GetWitness(witnessPage.Hash) diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index c705d58704..56ccb8ab76 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -175,6 +175,9 @@ type Config struct { // Relay has transaction relay related settings Relay *RelayConfig `hcl:"relay,block" toml:"relay,block"` + + // Pipeline has pipelined SRC settings for block import + Pipeline *PipelineConfig `hcl:"pipeline,block" toml:"pipeline,block"` } type HistoryConfig struct { @@ -813,6 +816,16 @@ type RelayConfig struct { BlockProducerRpcEndpoints []string `hcl:"bp-rpc-endpoints,optional" toml:"bp-rpc-endpoints,optional"` } +// PipelineConfig has settings for pipelined state root computation during block import. +type PipelineConfig struct { + // EnableImportSRC enables pipelined state root computation during block import: + // overlap SRC(N) with tx execution of block N+1 + EnableImportSRC bool `hcl:"enable-import-src,optional" toml:"enable-import-src,optional"` + + // ImportSRCLogs enables verbose logging for pipelined import SRC + ImportSRCLogs bool `hcl:"import-src-logs,optional" toml:"import-src-logs,optional"` +} + func DefaultConfig() *Config { return &Config{ Chain: "mainnet", @@ -1075,6 +1088,10 @@ func DefaultConfig() *Config { EnablePrivateTx: false, BlockProducerRpcEndpoints: []string{}, }, + Pipeline: &PipelineConfig{ + EnableImportSRC: true, + ImportSRCLogs: true, + }, } } @@ -1558,6 +1575,8 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.TrieDirtyCache = calcPerc(c.Cache.PercGc) n.NoPrefetch = c.Cache.NoPrefetch n.Preimages = c.Cache.Preimages + n.EnablePipelinedImportSRC = c.Pipeline.EnableImportSRC + n.PipelinedImportSRCLogs = c.Pipeline.ImportSRCLogs // Note that even the values set by `history.transactions` will be written in the old flag until it's removed. n.TransactionHistory = c.Cache.TxLookupLimit n.TrieTimeout = c.Cache.TrieTimeout diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index e314a568a6..6be2069faf 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -654,6 +654,20 @@ func (c *Command) Flags(config *Config) *flagset.Flagset { Default: c.cliConfig.Cache.TxLookupLimit, Group: "Cache", }) + f.BoolFlag(&flagset.BoolFlag{ + Name: "pipeline.enable-import-src", + Usage: "Enable pipelined state root computation during block import: overlap SRC(N) with block N+1 tx execution", + Value: &c.cliConfig.Pipeline.EnableImportSRC, + Default: c.cliConfig.Pipeline.EnableImportSRC, + Group: "Pipeline", + }) + f.BoolFlag(&flagset.BoolFlag{ + Name: "pipeline.import-src-logs", + Usage: "Enable verbose logging for pipelined import SRC", + Value: &c.cliConfig.Pipeline.ImportSRCLogs, + Default: c.cliConfig.Pipeline.ImportSRCLogs, + Group: "Pipeline", + }) f.IntFlag(&flagset.IntFlag{ Name: "fdlimit", Usage: "Raise the open file descriptor resource limit (default = system fd limit)", diff --git a/miner/pipeline.go b/miner/pipeline.go index ae4f4be9e3..0fb39d4048 100644 --- a/miner/pipeline.go +++ b/miner/pipeline.go @@ -121,7 +121,7 @@ func (w *worker) commitPipelined(env *environment, start time.Time) error { } parentRoot := parent.Root - w.chain.SetLastFlatDiff(flatDiff, env.header.Number.Uint64()) + w.chain.SetLastFlatDiff(flatDiff, env.header.Number.Uint64(), parentRoot, common.Hash{}) // Note: this counts block N as "entering the pipeline." If Prepare() fails // and fallbackToSequential produces the block inline, the counter is slightly // inflated — the block was produced sequentially, not speculatively. @@ -533,7 +533,7 @@ func (w *worker) commitSpeculativeWork(req *speculativeWorkReq) { srcSpawnTime := time.Now() tmpBlockCur := types.NewBlockWithHeader(finalSpecHeader) w.chain.SpawnSRCGoroutine(tmpBlockCur, rootN, flatDiff) - w.chain.SetLastFlatDiff(flatDiff, finalSpecHeader.Number.Uint64()) + w.chain.SetLastFlatDiff(flatDiff, finalSpecHeader.Number.Uint64(), rootN, common.Hash{}) if w.config.PipelinedSRCLogs { log.Info("Pipelined SRC: spawned SRC, starting speculative exec", "srcBlock", nextBlockNumber, "specExecBlock", nextNextBlockNumber) @@ -795,7 +795,7 @@ func (w *worker) sealBlockViaTaskCh( if spawnSRC { tmpBlock := types.NewBlockWithHeader(finalHeader) w.chain.SpawnSRCGoroutine(tmpBlock, rootN, flatDiff) - w.chain.SetLastFlatDiff(flatDiff, finalHeader.Number.Uint64()) + w.chain.SetLastFlatDiff(flatDiff, finalHeader.Number.Uint64(), rootN, common.Hash{}) } pipelineSpeculativeBlocksCounter.Inc(1) diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go index 8def74981e..1e13efa2b2 100644 --- a/tests/bor/bor_test.go +++ b/tests/bor/bor_test.go @@ -3100,3 +3100,311 @@ txsDone: require.GreaterOrEqual(t, totalTxs, txCount, "expected at least %d transactions across all blocks, got %d", txCount, totalTxs) } + +// TestPipelinedImportSRC_BasicImport verifies that a non-mining node with +// pipelined import SRC enabled correctly syncs blocks from a block-producing +// peer. The importer computes state roots in the background (overlapping +// SRC(N) with tx execution of N+1) and should arrive at the same chain state +// as the BP. +func TestPipelinedImportSRC_BasicImport(t *testing.T) { + t.Parallel() + log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true))) + fdlimit.Raise(2048) + + faucets := make([]*ecdsa.PrivateKey, 128) + for i := 0; i < len(faucets); i++ { + faucets[i], _ = crypto.GenerateKey() + } + + genesis := InitGenesis(t, faucets, "./testdata/genesis_2val.json", 16) + genesis.Config.Bor.Period = map[string]uint64{"0": 2} + genesis.Config.Bor.Sprint = map[string]uint64{"0": 16} + genesis.Config.Bor.RioBlock = big.NewInt(0) + + // Start a normal BP (no pipeline on mining side) + bpStack, bpBackend, err := InitMiner(genesis, keys[0], true) + require.NoError(t, err) + defer bpStack.Close() + + for bpStack.Server().NodeInfo().Ports.Listener == 0 { + time.Sleep(250 * time.Millisecond) + } + + // Start a non-mining importer with pipelined import SRC + importerStack, importerBackend, err := InitImporterWithPipelinedSRC(genesis, keys[1], true) + require.NoError(t, err) + defer importerStack.Close() + + for importerStack.Server().NodeInfo().Ports.Listener == 0 { + time.Sleep(250 * time.Millisecond) + } + + // Connect the two peers + importerStack.Server().AddPeer(bpStack.Server().Self()) + bpStack.Server().AddPeer(importerStack.Server().Self()) + + // Start mining on the BP + err = bpBackend.StartMining() + require.NoError(t, err) + + // Wait for the BP to produce at least 20 blocks + targetBlock := uint64(20) + deadline := time.After(120 * time.Second) + for { + select { + case <-deadline: + bpNum := bpBackend.BlockChain().CurrentBlock().Number.Uint64() + t.Fatalf("Timed out waiting for BP to reach block %d, current: %d", targetBlock, bpNum) + default: + time.Sleep(500 * time.Millisecond) + if bpBackend.BlockChain().CurrentBlock().Number.Uint64() >= targetBlock { + goto bpDone + } + } + } +bpDone: + + bpNum := bpBackend.BlockChain().CurrentBlock().Number.Uint64() + t.Logf("BP produced %d blocks, waiting for importer to sync", bpNum) + + // Wait for the importer to sync up to the target + deadline = time.After(120 * time.Second) + for { + select { + case <-deadline: + importerNum := importerBackend.BlockChain().CurrentBlock().Number.Uint64() + t.Fatalf("Timed out waiting for importer to reach block %d, current: %d", targetBlock, importerNum) + default: + time.Sleep(500 * time.Millisecond) + if importerBackend.BlockChain().CurrentBlock().Number.Uint64() >= targetBlock { + goto importerDone + } + } + } +importerDone: + + importerNum := importerBackend.BlockChain().CurrentBlock().Number.Uint64() + t.Logf("Importer synced to block %d", importerNum) + + // Allow async DB writes to flush + time.Sleep(2 * time.Second) + + // Use the minimum of both chains for comparison + bpNum = bpBackend.BlockChain().CurrentBlock().Number.Uint64() + importerNum = importerBackend.BlockChain().CurrentBlock().Number.Uint64() + compareUpTo := bpNum + if importerNum < compareUpTo { + compareUpTo = importerNum + } + + bpChain := bpBackend.BlockChain() + importerChain := importerBackend.BlockChain() + + for i := uint64(1); i <= compareUpTo; i++ { + bpBlock := bpChain.GetBlockByNumber(i) + require.NotNil(t, bpBlock, "BP missing block %d", i) + + importerBlock := importerChain.GetBlockByNumber(i) + require.NotNil(t, importerBlock, "importer missing block %d", i) + + // Block hashes must match + require.Equal(t, bpBlock.Hash(), importerBlock.Hash(), + "block %d hash mismatch: BP=%x importer=%x", i, bpBlock.Hash(), importerBlock.Hash()) + + // State roots must match + require.Equal(t, bpBlock.Root(), importerBlock.Root(), + "block %d state root mismatch: BP=%x importer=%x", i, bpBlock.Root(), importerBlock.Root()) + + // Verify the importer can open state at each block's root + _, err := importerChain.StateAt(importerBlock.Root()) + require.NoError(t, err, "importer cannot open state at block %d root %x", i, importerBlock.Root()) + } + + t.Logf("Verified %d blocks: hashes, state roots, and state accessibility all match", compareUpTo) +} + +// TestPipelinedImportSRC_WithTransactions verifies that a non-mining node with +// pipelined import SRC correctly imports blocks containing transactions. It +// checks that transaction receipts exist and that account balances match +// between the BP and the importer. +func TestPipelinedImportSRC_WithTransactions(t *testing.T) { + t.Parallel() + log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true))) + fdlimit.Raise(2048) + + faucets := make([]*ecdsa.PrivateKey, 128) + for i := 0; i < len(faucets); i++ { + faucets[i], _ = crypto.GenerateKey() + } + + genesis := InitGenesis(t, faucets, "./testdata/genesis_2val.json", 16) + genesis.Config.Bor.Period = map[string]uint64{"0": 2} + genesis.Config.Bor.Sprint = map[string]uint64{"0": 16} + genesis.Config.Bor.RioBlock = big.NewInt(0) + + // Start BP without pipeline + bpStack, bpBackend, err := InitMiner(genesis, keys[0], true) + require.NoError(t, err) + defer bpStack.Close() + + for bpStack.Server().NodeInfo().Ports.Listener == 0 { + time.Sleep(250 * time.Millisecond) + } + + // Start importer with pipelined import SRC + importerStack, importerBackend, err := InitImporterWithPipelinedSRC(genesis, keys[1], true) + require.NoError(t, err) + defer importerStack.Close() + + for importerStack.Server().NodeInfo().Ports.Listener == 0 { + time.Sleep(250 * time.Millisecond) + } + + // Connect peers + importerStack.Server().AddPeer(bpStack.Server().Self()) + bpStack.Server().AddPeer(importerStack.Server().Self()) + + // Start mining + err = bpBackend.StartMining() + require.NoError(t, err) + + // Wait for a few blocks before submitting transactions + for bpBackend.BlockChain().CurrentBlock().Number.Uint64() < 2 { + time.Sleep(500 * time.Millisecond) + } + + // Submit ETH transfer transactions to the BP + txpool := bpBackend.TxPool() + senderKey := pkey1 + senderAddr := crypto.PubkeyToAddress(senderKey.PublicKey) + recipientAddr := crypto.PubkeyToAddress(pkey2.PublicKey) + signer := types.LatestSignerForChainID(genesis.Config.ChainID) + + nonce := txpool.Nonce(senderAddr) + txCount := 10 + transferAmount := big.NewInt(1000) + + for i := 0; i < txCount; i++ { + tx := types.NewTransaction( + nonce+uint64(i), + recipientAddr, + transferAmount, + 21000, + big.NewInt(30000000000), + nil, + ) + signedTx, err := types.SignTx(tx, signer, senderKey) + require.NoError(t, err) + errs := txpool.Add([]*types.Transaction{signedTx}, true) + require.Nil(t, errs[0], "failed to add tx %d", i) + } + + // Wait for all transactions to be mined on the BP + deadline := time.After(120 * time.Second) + for { + select { + case <-deadline: + t.Fatal("Timed out waiting for transactions to be mined on BP") + default: + time.Sleep(500 * time.Millisecond) + if txpool.Nonce(senderAddr) >= nonce+uint64(txCount) { + goto txsMined + } + } + } +txsMined: + + bpNum := bpBackend.BlockChain().CurrentBlock().Number.Uint64() + t.Logf("All %d transactions mined on BP by block %d", txCount, bpNum) + + // Wait for the importer to sync past the block containing the last tx + targetBlock := bpNum + deadline = time.After(120 * time.Second) + for { + select { + case <-deadline: + importerNum := importerBackend.BlockChain().CurrentBlock().Number.Uint64() + t.Fatalf("Timed out waiting for importer to reach block %d, current: %d", targetBlock, importerNum) + default: + time.Sleep(500 * time.Millisecond) + if importerBackend.BlockChain().CurrentBlock().Number.Uint64() >= targetBlock { + goto importerSynced + } + } + } +importerSynced: + + // Allow async DB writes to flush + time.Sleep(2 * time.Second) + + importerNum := importerBackend.BlockChain().CurrentBlock().Number.Uint64() + t.Logf("Importer synced to block %d", importerNum) + + bpChain := bpBackend.BlockChain() + importerChain := importerBackend.BlockChain() + + // Re-read current block numbers after the flush delay + bpNum = bpChain.CurrentBlock().Number.Uint64() + importerNum = importerChain.CurrentBlock().Number.Uint64() + compareUpTo := bpNum + if importerNum < compareUpTo { + compareUpTo = importerNum + } + + // Verify blocks, state roots, and transaction counts match + totalBpTxs := 0 + totalImporterTxs := 0 + for i := uint64(1); i <= compareUpTo; i++ { + bpBlock := bpChain.GetBlockByNumber(i) + require.NotNil(t, bpBlock, "BP missing block %d", i) + + importerBlock := importerChain.GetBlockByNumber(i) + require.NotNil(t, importerBlock, "importer missing block %d", i) + + require.Equal(t, bpBlock.Hash(), importerBlock.Hash(), + "block %d hash mismatch", i) + require.Equal(t, bpBlock.Root(), importerBlock.Root(), + "block %d state root mismatch", i) + + // Transaction counts must match per block + require.Equal(t, len(bpBlock.Transactions()), len(importerBlock.Transactions()), + "block %d tx count mismatch: BP=%d importer=%d", i, + len(bpBlock.Transactions()), len(importerBlock.Transactions())) + + totalBpTxs += len(bpBlock.Transactions()) + totalImporterTxs += len(importerBlock.Transactions()) + + // Verify receipts exist on the importer for each transaction + for j, tx := range importerBlock.Transactions() { + receipt, _, _, _ := rawdb.ReadReceipt(importerBackend.ChainDb(), tx.Hash(), importerChain.Config()) + require.NotNil(t, receipt, "importer missing receipt for tx %d in block %d (hash=%x)", j, i, tx.Hash()) + } + } + + require.GreaterOrEqual(t, totalBpTxs, txCount, + "expected at least %d transactions across BP blocks, got %d", txCount, totalBpTxs) + require.Equal(t, totalBpTxs, totalImporterTxs, + "total tx count mismatch: BP=%d importer=%d", totalBpTxs, totalImporterTxs) + + // Verify account balances match between BP and importer at the latest + // common block — this confirms the pipelined SRC produced correct state. + bpState, err := bpChain.StateAt(bpChain.GetBlockByNumber(compareUpTo).Root()) + require.NoError(t, err, "cannot open BP state at block %d", compareUpTo) + + importerState, err := importerChain.StateAt(importerChain.GetBlockByNumber(compareUpTo).Root()) + require.NoError(t, err, "cannot open importer state at block %d", compareUpTo) + + bpRecipientBal := bpState.GetBalance(recipientAddr) + importerRecipientBal := importerState.GetBalance(recipientAddr) + require.Equal(t, bpRecipientBal.String(), importerRecipientBal.String(), + "recipient balance mismatch at block %d: BP=%s importer=%s", + compareUpTo, bpRecipientBal, importerRecipientBal) + + bpSenderBal := bpState.GetBalance(senderAddr) + importerSenderBal := importerState.GetBalance(senderAddr) + require.Equal(t, bpSenderBal.String(), importerSenderBal.String(), + "sender balance mismatch at block %d: BP=%s importer=%s", + compareUpTo, bpSenderBal, importerSenderBal) + + t.Logf("Verified %d blocks with %d total transactions, balances match", compareUpTo, totalImporterTxs) +} diff --git a/tests/bor/helper.go b/tests/bor/helper.go index 1c94456b87..7353c3d38b 100644 --- a/tests/bor/helper.go +++ b/tests/bor/helper.go @@ -819,3 +819,72 @@ func InitMinerWithPipelinedSRC(genesis *core.Genesis, privKey *ecdsa.PrivateKey, err = stack.Start() return stack, ethBackend, err } + +// InitImporterWithPipelinedSRC creates a non-mining node with pipelined import +// SRC enabled. The node will import blocks from peers using the pipelined state +// root computation path. A validator key is still needed for the keystore (used +// for P2P identity / account manager) but the node does NOT start mining. +func InitImporterWithPipelinedSRC(genesis *core.Genesis, privKey *ecdsa.PrivateKey, withoutHeimdall bool) (*node.Node, *eth.Ethereum, error) { + datadir, err := os.MkdirTemp("", "InitImporter-"+uuid.New().String()) + if err != nil { + return nil, nil, err + } + + config := &node.Config{ + Name: "geth", + Version: params.Version, + DataDir: datadir, + P2P: p2p.Config{ + ListenAddr: "0.0.0.0:0", + NoDiscovery: true, + MaxPeers: 25, + }, + UseLightweightKDF: true, + } + stack, err := node.New(config) + if err != nil { + return nil, nil, err + } + + ethBackend, err := eth.New(stack, ðconfig.Config{ + Genesis: genesis, + NetworkId: genesis.Config.ChainID.Uint64(), + SyncMode: downloader.FullSync, + DatabaseCache: 256, + DatabaseHandles: 256, + TxPool: legacypool.DefaultConfig, + GPO: ethconfig.Defaults.GPO, + Miner: miner.Config{ + Etherbase: crypto.PubkeyToAddress(privKey.PublicKey), + GasCeil: genesis.GasLimit * 11 / 10, + GasPrice: big.NewInt(1), + Recommit: time.Second, + }, + WithoutHeimdall: withoutHeimdall, + EnablePipelinedImportSRC: true, + PipelinedImportSRCLogs: true, + }) + if err != nil { + return nil, nil, err + } + + keydir := stack.KeyStoreDir() + n, p := keystore.StandardScryptN, keystore.StandardScryptP + kStore := keystore.NewKeyStore(keydir, n, p) + + _, err = kStore.ImportECDSA(privKey, "") + if err != nil { + return nil, nil, err + } + + acc := kStore.Accounts()[0] + err = kStore.Unlock(acc, "") + if err != nil { + return nil, nil, err + } + + ethBackend.AccountManager().AddBackend(kStore) + + err = stack.Start() + return stack, ethBackend, err +} diff --git a/triedb/pathdb/reader.go b/triedb/pathdb/reader.go index 842ac0972e..818ee0f720 100644 --- a/triedb/pathdb/reader.go +++ b/triedb/pathdb/reader.go @@ -102,6 +102,9 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, func (r *reader) AccountRLP(hash common.Hash) ([]byte, error) { l, err := r.db.tree.lookupAccount(hash, r.state) if err != nil { + if errors.Is(err, errSnapshotStale) { + return r.accountFallback(hash) + } return nil, err } // If the located layer is stale, fall back to the slow path to retrieve @@ -114,7 +117,21 @@ func (r *reader) AccountRLP(hash common.Hash) ([]byte, error) { // not affect the result unless the entry point layer is also stale. blob, err := l.account(hash, 0) if errors.Is(err, errSnapshotStale) { - return r.layer.account(hash, 0) + return r.accountFallback(hash) + } + return blob, err +} + +// accountFallback retrieves account data when the normal lookup path fails +// due to concurrent layer flattening (cap). It tries the reader's entry-point +// layer first (which is still in memory), then falls back to the current base +// disk layer. The base fallback is needed because persist() creates intermediate +// disk layers that are marked stale during recursive flattening — only the +// final base layer is guaranteed non-stale. +func (r *reader) accountFallback(hash common.Hash) ([]byte, error) { + blob, err := r.layer.account(hash, 0) + if errors.Is(err, errSnapshotStale) { + return r.db.tree.bottom().account(hash, 0) } return blob, err } @@ -151,6 +168,9 @@ func (r *reader) Account(hash common.Hash) (*types.SlimAccount, error) { func (r *reader) Storage(accountHash, storageHash common.Hash) ([]byte, error) { l, err := r.db.tree.lookupStorage(accountHash, storageHash, r.state) if err != nil { + if errors.Is(err, errSnapshotStale) { + return r.storageFallback(accountHash, storageHash) + } return nil, err } // If the located layer is stale, fall back to the slow path to retrieve @@ -163,7 +183,16 @@ func (r *reader) Storage(accountHash, storageHash common.Hash) ([]byte, error) { // not affect the result unless the entry point layer is also stale. blob, err := l.storage(accountHash, storageHash, 0) if errors.Is(err, errSnapshotStale) { - return r.layer.storage(accountHash, storageHash, 0) + return r.storageFallback(accountHash, storageHash) + } + return blob, err +} + +// storageFallback is the storage counterpart of accountFallback. +func (r *reader) storageFallback(accountHash, storageHash common.Hash) ([]byte, error) { + blob, err := r.layer.storage(accountHash, storageHash, 0) + if errors.Is(err, errSnapshotStale) { + return r.db.tree.bottom().storage(accountHash, storageHash, 0) } return blob, err } From b283227925bacab0a5ad3341c30c305802d23f15 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Fri, 10 Apr 2026 09:37:28 +0530 Subject: [PATCH 8/9] tests/bor: add pipelined import SRC self-destruct integration test Adds TestPipelinedImportSRC_SelfDestruct to verify that the FlatDiff Destructs check in getStateObject correctly handles self-destructed contracts during pipelined import. --- tests/bor/bor_test.go | 196 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go index 1e13efa2b2..e4b94297e7 100644 --- a/tests/bor/bor_test.go +++ b/tests/bor/bor_test.go @@ -3408,3 +3408,199 @@ importerSynced: t.Logf("Verified %d blocks with %d total transactions, balances match", compareUpTo, totalImporterTxs) } + +// TestPipelinedImportSRC_SelfDestruct verifies that a contract which +// self-destructs in its constructor is correctly handled by the FlatDiff +// overlay during pipelined import. Without the Destructs check in +// getStateObject, the importer would fall through to the trie reader and +// see stale pre-destruct state from the committed parent root. +// +// Post-Cancun (EIP-6780), SELFDESTRUCT only fully destroys an account when +// called in the same transaction that created the contract, so the test uses +// a constructor that immediately self-destructs. +func TestPipelinedImportSRC_SelfDestruct(t *testing.T) { + t.Parallel() + log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true))) + fdlimit.Raise(2048) + + faucets := make([]*ecdsa.PrivateKey, 128) + for i := 0; i < len(faucets); i++ { + faucets[i], _ = crypto.GenerateKey() + } + + genesis := InitGenesis(t, faucets, "./testdata/genesis_2val.json", 16) + genesis.Config.Bor.Period = map[string]uint64{"0": 2} + genesis.Config.Bor.Sprint = map[string]uint64{"0": 16} + genesis.Config.Bor.RioBlock = big.NewInt(0) + + // Start a normal BP (no pipeline on mining side) + bpStack, bpBackend, err := InitMiner(genesis, keys[0], true) + require.NoError(t, err) + defer bpStack.Close() + + for bpStack.Server().NodeInfo().Ports.Listener == 0 { + time.Sleep(250 * time.Millisecond) + } + + // Start importer with pipelined import SRC + importerStack, importerBackend, err := InitImporterWithPipelinedSRC(genesis, keys[1], true) + require.NoError(t, err) + defer importerStack.Close() + + for importerStack.Server().NodeInfo().Ports.Listener == 0 { + time.Sleep(250 * time.Millisecond) + } + + // Connect peers + importerStack.Server().AddPeer(bpStack.Server().Self()) + bpStack.Server().AddPeer(importerStack.Server().Self()) + + // Start mining + err = bpBackend.StartMining() + require.NoError(t, err) + + // Wait for a few blocks so we're past cancunBlock=3 + for bpBackend.BlockChain().CurrentBlock().Number.Uint64() < 5 { + time.Sleep(500 * time.Millisecond) + } + + // Deploy a contract whose constructor immediately self-destructs, + // sending its value back to CALLER. + // Init code: CALLER (0x33) SELFDESTRUCT (0xFF) = 0x33FF + selfDestructInitCode := []byte{byte(vm.CALLER), byte(vm.SELFDESTRUCT)} + deployValue := big.NewInt(1_000_000_000_000_000_000) // 1 ETH + + txpool := bpBackend.TxPool() + senderKey := pkey1 + senderAddr := crypto.PubkeyToAddress(senderKey.PublicKey) + signer := types.LatestSignerForChainID(genesis.Config.ChainID) + + nonce := txpool.Nonce(senderAddr) + + // Predict the contract address + contractAddr := crypto.CreateAddress(senderAddr, nonce) + t.Logf("Deploying self-destruct contract at predicted address %s with nonce %d", contractAddr.Hex(), nonce) + + // Record sender balance before deployment + bpChain := bpBackend.BlockChain() + preState, err := bpChain.StateAt(bpChain.CurrentBlock().Root) + require.NoError(t, err) + senderBalBefore := preState.GetBalance(senderAddr) + t.Logf("Sender balance before deploy: %s", senderBalBefore.String()) + + // Create the deployment tx with value + deployTx, err := types.SignTx( + types.NewContractCreation(nonce, deployValue, 100_000, big.NewInt(30_000_000_000), selfDestructInitCode), + signer, senderKey, + ) + require.NoError(t, err) + + errs := txpool.Add([]*types.Transaction{deployTx}, true) + require.Nil(t, errs[0], "failed to add deploy tx") + + // Also send a normal transfer in the NEXT block to force pipeline overlap. + // This ensures block N+1 uses the FlatDiff from block N (which has the destruct). + nonce++ + recipientAddr := crypto.PubkeyToAddress(pkey2.PublicKey) + transferTx, err := types.SignTx( + types.NewTransaction(nonce, recipientAddr, big.NewInt(1000), 21000, big.NewInt(30_000_000_000), nil), + signer, senderKey, + ) + require.NoError(t, err) + errs = txpool.Add([]*types.Transaction{transferTx}, true) + require.Nil(t, errs[0], "failed to add transfer tx") + + // Wait for both txs to be mined + deadline := time.After(120 * time.Second) + for { + select { + case <-deadline: + t.Fatal("Timed out waiting for transactions to be mined on BP") + default: + time.Sleep(500 * time.Millisecond) + if txpool.Nonce(senderAddr) >= nonce+1 { + goto txsMined + } + } + } +txsMined: + + bpNum := bpBackend.BlockChain().CurrentBlock().Number.Uint64() + t.Logf("Transactions mined on BP by block %d", bpNum) + + // Wait for importer to sync + targetBlock := bpNum + deadline = time.After(120 * time.Second) + for { + select { + case <-deadline: + importerNum := importerBackend.BlockChain().CurrentBlock().Number.Uint64() + t.Fatalf("Timed out waiting for importer to reach block %d, current: %d", targetBlock, importerNum) + default: + time.Sleep(500 * time.Millisecond) + if importerBackend.BlockChain().CurrentBlock().Number.Uint64() >= targetBlock { + goto importerSynced + } + } + } +importerSynced: + + // Allow async DB writes to flush + time.Sleep(2 * time.Second) + + importerChain := importerBackend.BlockChain() + importerNum := importerChain.CurrentBlock().Number.Uint64() + t.Logf("Importer synced to block %d", importerNum) + + // Re-read BP chain head + bpNum = bpChain.CurrentBlock().Number.Uint64() + compareUpTo := bpNum + if importerNum < compareUpTo { + compareUpTo = importerNum + } + + // Verify block hashes and state roots match + for i := uint64(1); i <= compareUpTo; i++ { + bpBlock := bpChain.GetBlockByNumber(i) + require.NotNil(t, bpBlock, "BP missing block %d", i) + + importerBlock := importerChain.GetBlockByNumber(i) + require.NotNil(t, importerBlock, "importer missing block %d", i) + + require.Equal(t, bpBlock.Hash(), importerBlock.Hash(), + "block %d hash mismatch", i) + require.Equal(t, bpBlock.Root(), importerBlock.Root(), + "block %d state root mismatch", i) + } + + // Verify the self-destructed contract is gone on BOTH chains + bpState, err := bpChain.StateAt(bpChain.GetBlockByNumber(compareUpTo).Root()) + require.NoError(t, err) + importerState, err := importerChain.StateAt(importerChain.GetBlockByNumber(compareUpTo).Root()) + require.NoError(t, err) + + // Contract should have zero balance (ETH sent back to sender via SELFDESTRUCT) + bpContractBal := bpState.GetBalance(contractAddr) + importerContractBal := importerState.GetBalance(contractAddr) + require.True(t, bpContractBal.IsZero(), "BP: contract should have zero balance, got %s", bpContractBal) + require.True(t, importerContractBal.IsZero(), "importer: contract should have zero balance, got %s", importerContractBal) + + // Contract should have no code + bpCode := bpState.GetCode(contractAddr) + importerCode := importerState.GetCode(contractAddr) + require.Empty(t, bpCode, "BP: contract should have no code") + require.Empty(t, importerCode, "importer: contract should have no code") + + // Contract nonce should be zero (fully destroyed) + require.Equal(t, uint64(0), bpState.GetNonce(contractAddr), "BP: contract nonce should be 0") + require.Equal(t, uint64(0), importerState.GetNonce(contractAddr), "importer: contract nonce should be 0") + + // Sender balances must match between BP and importer + bpSenderBal := bpState.GetBalance(senderAddr) + importerSenderBal := importerState.GetBalance(senderAddr) + require.Equal(t, bpSenderBal.String(), importerSenderBal.String(), + "sender balance mismatch: BP=%s importer=%s", bpSenderBal, importerSenderBal) + + t.Logf("Verified: contract %s fully destroyed, sender balances match (BP=%s, importer=%s)", + contractAddr.Hex(), bpSenderBal, importerSenderBal) +} From acc0ef714ced03585ab46e7b0a41e14b632e6d79 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Fri, 10 Apr 2026 17:32:10 +0530 Subject: [PATCH 9/9] core/state, triedb/pathdb: fix prefetcher race during pipelined SRC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two fixes for prefetcher errors during pipelined state root computation: 1. Storage root mismatch: FlatDiff accounts had storage roots from block N's post-state, but the prefetcher's NodeReader was at the committed parent root (grandparent). Add prefetchRoot field to stateObject that stores the grandparent's storage root, read from the flat state reader when loading from FlatDiff. Use it consistently across all prefetcher interactions. 2. Layer stale during trie node resolution: SRC's cap() flattens diff layers concurrently with prefetcher trie walks. Add nodeFallback to reader.Node(), mirroring the existing accountFallback/storageFallback pattern — retries via the current base disk layer on errSnapshotStale. --- core/state/state_object.go | 58 ++++++++- core/state/statedb.go | 21 ++++ core/state/statedb_pipeline_test.go | 180 ++++++++++++++++++++++++++++ triedb/pathdb/reader.go | 33 ++++- 4 files changed, 286 insertions(+), 6 deletions(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index 7ecee2e053..206c5efa57 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -54,6 +54,26 @@ type stateObject struct { origin *types.StateAccount // Account original data without any change applied, nil means it was not existent data types.StateAccount // Account data with all mutations applied in the scope of block + // prefetchRoot holds the storage root from the committed parent state, used + // exclusively for prefetcher interactions during pipelined SRC. + // + // When an account is loaded from FlatDiff (the previous block's uncommitted + // mutations), its origin.Root and data.Root reflect block N's post-state — + // but the prefetcher's NodeReader is opened at committedParentRoot (the + // grandparent). This creates a (stateRoot, storageRoot) mismatch: the reader + // can only resolve trie nodes for the grandparent's storage root, not block + // N's. The result is "Unexpected trie node" hash-mismatch errors on every + // storage trie root resolution, killing the prefetcher for those accounts. + // + // prefetchRoot stores the grandparent's storage root — the one consistent + // with the prefetcher's reader. It is set only for FlatDiff-sourced accounts; + // for accounts loaded from the committed state it stays zero, and + // getPrefetchRoot() falls back to data.Root (which is already consistent). + // + // The committed root is obtained from the flat state reader (in-memory + // snapshot), so the cost is effectively zero. + prefetchRoot common.Hash + // Write caches. trie Trie // storage trie, which becomes non-nil on first access code []byte // contract bytecode, which gets set when code is loaded @@ -122,6 +142,26 @@ func (s *stateObject) touch() { s.db.journal.touchChange(s.address) } +// getPrefetchRoot returns the storage root to use for all prefetcher +// interactions (prefetch, trie lookup, used). This must be consistent across +// all calls for a given account so the subfetcher trieID matches. +// +// For accounts loaded from FlatDiff (pipelined SRC), the storage root in +// origin/data reflects block N's post-state, but the prefetcher's NodeReader +// is at committedParentRoot (the grandparent). Using block N's root would +// cause a hash mismatch when resolving the storage trie root node. Instead, +// we return the grandparent's storage root (stored in prefetchRoot), which +// is consistent with the reader. +// +// For accounts loaded from the committed state (normal path), prefetchRoot +// is zero and we fall back to data.Root, which is already consistent. +func (s *stateObject) getPrefetchRoot() common.Hash { + if s.prefetchRoot != (common.Hash{}) { + return s.prefetchRoot + } + return s.data.Root +} + // getTrie returns the associated storage trie. The trie will be opened if it's // not loaded previously. An error will be returned if trie can't be loaded. // @@ -153,8 +193,10 @@ func (s *stateObject) getPrefetchedTrie() Trie { if (s.data.Root == types.EmptyRootHash && !s.db.db.TrieDB().IsVerkle()) || s.db.prefetcher == nil { return nil } - // Attempt to retrieve the trie from the prefetcher - return s.db.prefetcher.trie(s.addrHash, s.data.Root) + // Use getPrefetchRoot() so the trieID matches the one used when scheduling + // the prefetch. For FlatDiff accounts this is the committed parent's storage + // root; for normal accounts it equals data.Root (unchanged behavior). + return s.db.prefetcher.trie(s.addrHash, s.getPrefetchRoot()) } // GetState retrieves a value associated with the given storage key. @@ -218,8 +260,11 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { s.db.StorageReads += time.Since(start) // Schedule the resolved storage slots for prefetching if it's enabled. + // Use getPrefetchRoot() for the storage root so the subfetcher's trieID + // is consistent with the prefetcher's NodeReader state root. For FlatDiff + // accounts, this is the committed parent's storage root (not block N's). if s.db.prefetcher != nil && s.data.Root != types.EmptyRootHash { - if err = s.db.prefetcher.prefetch(s.addrHash, s.origin.Root, s.address, nil, []common.Hash{key}, true); err != nil { + if err = s.db.prefetcher.prefetch(s.addrHash, s.getPrefetchRoot(), s.address, nil, []common.Hash{key}, true); err != nil { log.Error("Failed to prefetch storage slot", "addr", s.address, "key", key, "err", err) } } @@ -280,8 +325,9 @@ func (s *stateObject) finalise() { // byzantium fork) and entry is necessary to modify the value back. s.pendingStorage[key] = value } + // Use getPrefetchRoot() for consistency with other prefetcher calls. if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { - if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, nil, slotsToPrefetch, false); err != nil { + if err := s.db.prefetcher.prefetch(s.addrHash, s.getPrefetchRoot(), s.address, nil, slotsToPrefetch, false); err != nil { log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err) } } @@ -377,8 +423,9 @@ func (s *stateObject) updateTrie() (Trie, error) { s.db.StorageDeleted.Add(1) } + // Use getPrefetchRoot() so the trieID matches the one used during scheduling. if s.db.prefetcher != nil { - s.db.prefetcher.used(s.addrHash, s.data.Root, nil, used) + s.db.prefetcher.used(s.addrHash, s.getPrefetchRoot(), nil, used) } // When witness building is enabled without a prefetcher, storage reads // went through the reader (a separate trie with its own PrevalueTracer) @@ -520,6 +567,7 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject { addrHash: s.addrHash, origin: s.origin, data: s.data, + prefetchRoot: s.prefetchRoot, code: s.code, originStorage: s.originStorage.Copy(), pendingStorage: s.pendingStorage.Copy(), diff --git a/core/state/statedb.go b/core/state/statedb.go index 2206226f33..5996618a79 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1046,6 +1046,27 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject { if code, ok := s.flatDiffRef.Code[common.BytesToHash(acctCopy.CodeHash)]; ok { obj.code = code } + // Resolve the committed storage root for prefetcher consistency. + // + // The FlatDiff account's Root is block N's post-state storage root, + // but the prefetcher's NodeReader is opened at committedParentRoot + // (the grandparent). These are inconsistent — the reader can only + // resolve trie nodes for the grandparent's storage root. Without + // this, the prefetcher hits "Unexpected trie node" hash mismatches + // on every storage trie root resolution for FlatDiff accounts. + // + // We read the account from the committed state (flat reader, in- + // memory snapshot) to get the grandparent's storage root. This is + // the root that the prefetcher's reader can actually resolve. + if acctCopy.Root != types.EmptyRootHash { + if committedAcct, err := s.reader.Account(addr); err == nil && committedAcct != nil { + obj.prefetchRoot = committedAcct.Root + } + // If the account doesn't exist in the committed state (new in + // block N), prefetchRoot stays zero and getPrefetchRoot() falls + // back to data.Root. The prefetcher will skip it since the trie + // didn't exist at committedParentRoot. + } s.setStateObject(obj) return obj } diff --git a/core/state/statedb_pipeline_test.go b/core/state/statedb_pipeline_test.go index c2eec48c42..ceab3fec68 100644 --- a/core/state/statedb_pipeline_test.go +++ b/core/state/statedb_pipeline_test.go @@ -337,3 +337,183 @@ func TestCommitSnapshot_CapturesDestructs(t *testing.T) { _, destructed := diff.Destructs[addr] require.True(t, destructed, "self-destructed account should appear in diff.Destructs") } + +// TestPrefetchRoot_FlatDiffAccountUsesCommittedRoot verifies that accounts +// loaded from FlatDiff get their prefetchRoot set to the committed parent's +// storage root, not the FlatDiff's storage root. This is critical for +// pipelined SRC: the prefetcher's NodeReader is opened at the committed +// parent root (grandparent), so it can only resolve trie nodes for that +// state's storage root. Using FlatDiff's root (block N's post-state) would +// cause "Unexpected trie node" hash mismatches. +func TestPrefetchRoot_FlatDiffAccountUsesCommittedRoot(t *testing.T) { + db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil) + + // --- Set up a committed state with a contract that has storage --- + sdb, err := New(types.EmptyRootHash, db) + require.NoError(t, err) + + addr := common.HexToAddress("0xcontract") + sdb.CreateAccount(addr) + sdb.SetNonce(addr, 1, 0) + sdb.SetState(addr, common.HexToHash("0x01"), common.HexToHash("0xaa")) + sdb.Finalise(false) + + committedRoot, _, err := sdb.CommitWithUpdate(0, false, false) + require.NoError(t, err) + + // Read back the committed account to get its storage root. + committedSDB, err := New(committedRoot, db) + require.NoError(t, err) + committedObj := committedSDB.getStateObject(addr) + require.NotNil(t, committedObj) + committedStorageRoot := committedObj.data.Root + require.NotEqual(t, types.EmptyRootHash, committedStorageRoot, "committed account should have non-empty storage root") + + // --- Simulate block N: modify the contract's storage and extract FlatDiff --- + sdb2, err := New(committedRoot, db) + require.NoError(t, err) + sdb2.SetState(addr, common.HexToHash("0x02"), common.HexToHash("0xbb")) // new slot + sdb2.Finalise(false) + diff := sdb2.CommitSnapshot(false) + + // The FlatDiff account has block N's storage root (different from committed). + flatDiffAcct, ok := diff.Accounts[addr] + require.True(t, ok, "contract should be in FlatDiff") + flatDiffStorageRoot := flatDiffAcct.Root + // The FlatDiff root is the account's root BEFORE IntermediateRoot (i.e., + // CommitSnapshot doesn't hash — it captures the current data.Root). So it + // equals the committed root here. But the key point is that getPrefetchRoot + // returns the committed root regardless. + + // --- Create a pipelined StateDB with FlatDiff overlay --- + overlayDB, err := NewWithFlatBase(committedRoot, db, diff) + require.NoError(t, err) + + // Load the account from FlatDiff + obj := overlayDB.getStateObject(addr) + require.NotNil(t, obj) + + // Verify origin/data roots come from FlatDiff + require.Equal(t, flatDiffStorageRoot, obj.data.Root, "data.Root should be from FlatDiff") + + // Verify prefetchRoot was set to the committed storage root + require.Equal(t, committedStorageRoot, obj.prefetchRoot, "prefetchRoot should be the committed parent's storage root") + + // Verify getPrefetchRoot returns the committed root (not data.Root) + require.Equal(t, committedStorageRoot, obj.getPrefetchRoot(), "getPrefetchRoot should return the committed storage root") +} + +// TestPrefetchRoot_NormalAccountFallsBackToDataRoot verifies that accounts +// loaded from the committed state (not FlatDiff) have prefetchRoot=zero, +// and getPrefetchRoot falls back to data.Root. +func TestPrefetchRoot_NormalAccountFallsBackToDataRoot(t *testing.T) { + db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil) + + sdb, err := New(types.EmptyRootHash, db) + require.NoError(t, err) + + addr := common.HexToAddress("0xnormal") + sdb.CreateAccount(addr) + sdb.SetNonce(addr, 1, 0) + sdb.SetState(addr, common.HexToHash("0x01"), common.HexToHash("0xaa")) + sdb.Finalise(false) + + root, _, err := sdb.CommitWithUpdate(0, false, false) + require.NoError(t, err) + + // Load the account normally (no FlatDiff) + sdb2, err := New(root, db) + require.NoError(t, err) + + obj := sdb2.getStateObject(addr) + require.NotNil(t, obj) + + // prefetchRoot should be zero (not set for non-FlatDiff accounts) + require.Equal(t, common.Hash{}, obj.prefetchRoot, "prefetchRoot should be zero for non-FlatDiff accounts") + + // getPrefetchRoot should fall back to data.Root + require.Equal(t, obj.data.Root, obj.getPrefetchRoot(), "getPrefetchRoot should fall back to data.Root") +} + +// TestPrefetchRoot_NewAccountInFlatDiff verifies that an account created in +// block N (exists in FlatDiff but not in committed state) gets prefetchRoot=zero +// since there's nothing to prefetch at the committed parent root. +func TestPrefetchRoot_NewAccountInFlatDiff(t *testing.T) { + db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil) + + // Commit an empty state + sdb, err := New(types.EmptyRootHash, db) + require.NoError(t, err) + committedRoot, _, err := sdb.CommitWithUpdate(0, false, false) + require.NoError(t, err) + + // FlatDiff with a new account that doesn't exist in committed state + newAddr := common.HexToAddress("0xnew") + diff := &FlatDiff{ + Accounts: map[common.Address]types.StateAccount{ + newAddr: { + Nonce: 1, + Balance: uint256.NewInt(100), + Root: crypto.Keccak256Hash([]byte("fake-storage-root")), // non-empty root + CodeHash: types.EmptyCodeHash.Bytes(), + }, + }, + Storage: make(map[common.Address]map[common.Hash]common.Hash), + Destructs: make(map[common.Address]struct{}), + Code: make(map[common.Hash][]byte), + ReadStorage: make(map[common.Address][]common.Hash), + NonExistentReads: nil, + } + + overlayDB, err := NewWithFlatBase(committedRoot, db, diff) + require.NoError(t, err) + + obj := overlayDB.getStateObject(newAddr) + require.NotNil(t, obj) + + // Account is new (not in committed state), so prefetchRoot should be zero + require.Equal(t, common.Hash{}, obj.prefetchRoot, "prefetchRoot should be zero for new accounts not in committed state") + + // getPrefetchRoot falls back to data.Root + require.Equal(t, obj.data.Root, obj.getPrefetchRoot(), "getPrefetchRoot should fall back to data.Root for new accounts") +} + +// TestPrefetchRoot_DeepCopyPreserves verifies that stateObject.deepCopy +// preserves the prefetchRoot field, which is important for StateDB.Copy() +// used by the block-level prefetcher. +func TestPrefetchRoot_DeepCopyPreserves(t *testing.T) { + db := NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil) + + sdb, err := New(types.EmptyRootHash, db) + require.NoError(t, err) + + addr := common.HexToAddress("0xcopy") + sdb.CreateAccount(addr) + sdb.SetNonce(addr, 1, 0) + sdb.SetState(addr, common.HexToHash("0x01"), common.HexToHash("0xaa")) + sdb.Finalise(false) + + committedRoot, _, err := sdb.CommitWithUpdate(0, false, false) + require.NoError(t, err) + + // Simulate a FlatDiff account with a different storage root + sdb2, err := New(committedRoot, db) + require.NoError(t, err) + sdb2.SetState(addr, common.HexToHash("0x02"), common.HexToHash("0xbb")) + sdb2.Finalise(false) + diff := sdb2.CommitSnapshot(false) + + // Create overlay StateDB and load account + overlayDB, err := NewWithFlatBase(committedRoot, db, diff) + require.NoError(t, err) + obj := overlayDB.getStateObject(addr) + require.NotNil(t, obj) + require.NotEqual(t, common.Hash{}, obj.prefetchRoot) + + // Copy the StateDB and verify prefetchRoot is preserved + copiedDB := overlayDB.Copy() + copiedObj := copiedDB.getStateObject(addr) + require.NotNil(t, copiedObj) + require.Equal(t, obj.prefetchRoot, copiedObj.prefetchRoot, "deepCopy should preserve prefetchRoot") + require.Equal(t, obj.getPrefetchRoot(), copiedObj.getPrefetchRoot(), "getPrefetchRoot should match after deepCopy") +} diff --git a/triedb/pathdb/reader.go b/triedb/pathdb/reader.go index 818ee0f720..903b2c0d22 100644 --- a/triedb/pathdb/reader.go +++ b/triedb/pathdb/reader.go @@ -66,7 +66,18 @@ type reader struct { func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { blob, got, loc, err := r.layer.node(owner, path, 0) if err != nil { - return nil, err + // If the diff layer chain walks into a stale disk layer (marked stale + // by concurrent cap()/persist() during pipelined SRC), fall back to + // the current base disk layer — same strategy as accountFallback and + // storageFallback. + if errors.Is(err, errSnapshotStale) { + blob, got, loc, err = r.nodeFallback(owner, path) + if err != nil { + return nil, err + } + } else { + return nil, err + } } // Error out if the local one is inconsistent with the target. if !r.noHashCheck && got != hash { @@ -92,6 +103,26 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, return blob, nil } +// nodeFallback retrieves a trie node when the normal diff layer walk fails +// due to concurrent layer flattening (cap). This mirrors the fallback strategy +// used by accountFallback and storageFallback. +// +// During pipelined SRC, the background SRC goroutine's CommitWithUpdate can +// trigger cap() which flattens bottom diff layers into a new disk layer, +// marking the old disk layer as stale. Concurrently, the prefetcher's trie +// walk may reach this stale disk layer and get errSnapshotStale. +// +// The fallback tries the entry-point layer first (which is still valid in +// memory), then falls back to tree.bottom() — the current base disk layer, +// which is guaranteed non-stale. +func (r *reader) nodeFallback(owner common.Hash, path []byte) ([]byte, common.Hash, *nodeLoc, error) { + blob, got, loc, err := r.layer.node(owner, path, 0) + if errors.Is(err, errSnapshotStale) { + return r.db.tree.bottom().node(owner, path, 0) + } + return blob, got, loc, err +} + // AccountRLP directly retrieves the account associated with a particular hash. // An error will be returned if the read operation exits abnormally. Specifically, // if the layer is already stale.