From aeaf7fa02204a2ed375071c920036c76fe789e96 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Tue, 17 Feb 2026 18:00:57 +0100 Subject: [PATCH 1/7] feat: introduce EVM contract benchmarking with new tests and a GitHub Actions workflow. --- .github/workflows/benchmark.yml | 42 +++++++++++ execution/evm/test/test_helpers.go | 19 +++-- execution/evm/test_helpers.go | 2 +- test/e2e/evm_contract_bench_test.go | 104 ++++++++++++++++++++++++++++ test/e2e/evm_contract_e2e_test.go | 4 +- test/e2e/evm_test_common.go | 30 ++++---- test/e2e/sut_helper.go | 16 ++--- 7 files changed, 187 insertions(+), 30 deletions(-) create mode 100644 .github/workflows/benchmark.yml create mode 100644 test/e2e/evm_contract_bench_test.go diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000000..e34f615027 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,42 @@ +--- +name: Benchmarks +permissions: {} +"on": + push: + branches: + - main + workflow_dispatch: + +jobs: + evm-benchmark: + name: EVM Contract Benchmark + runs-on: ubuntu-latest + timeout-minutes: 30 + permissions: + contents: write + issues: write + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Set up Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: ./go.mod + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 + - name: Build binaries + run: make build-evm build-da + - name: Run EVM benchmarks + run: | + cd test/e2e && go test -tags evm -bench=. -benchmem -run='^$' \ + -timeout=10m --evm-binary=../../build/evm | tee output.txt + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@4bdcce38c94cec68da58d012ac24b7b1155efe8b # v1.20.7 + with: + name: EVM Contract Roundtrip + tool: 'go' + output-file-path: test/e2e/output.txt + auto-push: true + github-token: ${{ secrets.GITHUB_TOKEN }} + alert-threshold: '150%' + fail-on-alert: true + comment-on-alert: true diff --git a/execution/evm/test/test_helpers.go b/execution/evm/test/test_helpers.go index d2c0500528..1aa4ec3175 100644 --- a/execution/evm/test/test_helpers.go +++ b/execution/evm/test/test_helpers.go @@ -18,6 +18,8 @@ import ( "github.com/celestiaorg/tastora/framework/types" "github.com/golang-jwt/jwt/v5" "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" ) // Test-scoped Docker client/network mapping to avoid conflicts between tests @@ -39,7 +41,7 @@ func randomString(n int) string { } // getTestScopedDockerSetup returns a Docker client and network ID that are scoped to the specific test. -func getTestScopedDockerSetup(t *testing.T) (types.TastoraDockerClient, string) { +func getTestScopedDockerSetup(t testing.TB) (types.TastoraDockerClient, string) { t.Helper() testKey := t.Name() @@ -59,13 +61,22 @@ func getTestScopedDockerSetup(t *testing.T) (types.TastoraDockerClient, string) } // SetupTestRethNode creates a single Reth node for testing purposes. -func SetupTestRethNode(t *testing.T) *reth.Node { +func SetupTestRethNode(t testing.TB) *reth.Node { t.Helper() ctx := context.Background() dockerCli, dockerNetID := getTestScopedDockerSetup(t) - n, err := reth.NewNodeBuilderWithTestName(t, fmt.Sprintf("%s-%s", t.Name(), randomString(6))). + testName := fmt.Sprintf("%s-%s", t.Name(), randomString(6)) + logger := zap.NewNop() + if testing.Verbose() { + logger = zaptest.NewLogger(t) + } + n, err := new(reth.NodeBuilder). + WithTestName(testName). + WithLogger(logger). + WithImage(reth.DefaultImage()). + WithBin("ev-reth"). WithDockerClient(dockerCli). WithDockerNetworkID(dockerNetID). WithGenesis([]byte(reth.DefaultEvolveGenesisJSON())). @@ -88,7 +99,7 @@ func SetupTestRethNode(t *testing.T) *reth.Node { } // waitForRethContainer waits for the Reth container to be ready by polling the provided endpoints with JWT authentication. -func waitForRethContainer(t *testing.T, jwtSecret, ethURL, engineURL string) error { +func waitForRethContainer(t testing.TB, jwtSecret, ethURL, engineURL string) error { t.Helper() client := &http.Client{Timeout: 100 * time.Millisecond} timer := time.NewTimer(30 * time.Second) diff --git a/execution/evm/test_helpers.go b/execution/evm/test_helpers.go index 1e97d446da..157f028b27 100644 --- a/execution/evm/test_helpers.go +++ b/execution/evm/test_helpers.go @@ -16,7 +16,7 @@ import ( // Transaction Helpers // GetRandomTransaction creates and signs a random Ethereum legacy transaction using the provided private key, recipient, chain ID, gas limit, and nonce. -func GetRandomTransaction(t *testing.T, privateKeyHex, toAddressHex, chainID string, gasLimit uint64, lastNonce *uint64) *types.Transaction { +func GetRandomTransaction(t testing.TB, privateKeyHex, toAddressHex, chainID string, gasLimit uint64, lastNonce *uint64) *types.Transaction { t.Helper() privateKey, err := crypto.HexToECDSA(privateKeyHex) require.NoError(t, err) diff --git a/test/e2e/evm_contract_bench_test.go b/test/e2e/evm_contract_bench_test.go new file mode 100644 index 0000000000..3c83a3c2a5 --- /dev/null +++ b/test/e2e/evm_contract_bench_test.go @@ -0,0 +1,104 @@ +//go:build evm + +package e2e + +import ( + "context" + "math/big" + "path/filepath" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/require" +) + +// BenchmarkEvmContractRoundtrip measures the store → retrieve roundtrip latency +// against a real reth node with a pre-deployed contract. +// +// All transaction generation happens during setup. The timed loop exclusively +// measures: SendTransaction → wait for receipt → eth_call retrieve → verify. +// +// Run with (after building local-da and evm binaries): +// +// PATH="/path/to/binaries:$PATH" go test -tags evm \ +// -bench BenchmarkEvmContractRoundtrip -benchmem -benchtime=5x \ +// -run='^$' -timeout=10m --evm-binary=/path/to/evm . +func BenchmarkEvmContractRoundtrip(b *testing.B) { + workDir := b.TempDir() + sequencerHome := filepath.Join(workDir, "evm-bench-sequencer") + + client, _, cleanup := setupTestSequencer(b, sequencerHome) + defer cleanup() + + ctx := b.Context() + privateKey, err := crypto.HexToECDSA(TestPrivateKey) + require.NoError(b, err) + chainID, ok := new(big.Int).SetString(DefaultChainID, 10) + require.True(b, ok) + signer := types.NewEIP155Signer(chainID) + + // Deploy contract once during setup. + contractAddr, nonce := deployContract(b, ctx, client, StorageContractBytecode, 0, privateKey, chainID) + + // Pre-build signed store(42) transactions for all iterations. + storeData, err := hexutil.Decode("0x000000000000000000000000000000000000000000000000000000000000002a") + require.NoError(b, err) + + const maxIter = 1024 + signedTxs := make([]*types.Transaction, maxIter) + for i := range maxIter { + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce + uint64(i), + To: &contractAddr, + Value: big.NewInt(0), + Gas: 500000, + GasPrice: big.NewInt(30000000000), + Data: storeData, + }) + signedTxs[i], err = types.SignTx(tx, signer, privateKey) + require.NoError(b, err) + } + + expected := common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000002a").Bytes() + callMsg := ethereum.CallMsg{To: &contractAddr, Data: []byte{}} + + b.ResetTimer() + b.ReportAllocs() + + var i int + for b.Loop() { + require.Less(b, i, maxIter, "increase maxIter for longer benchmark runs") + + // 1. Submit pre-signed store(42) transaction. + err = client.SendTransaction(ctx, signedTxs[i]) + require.NoError(b, err) + + // 2. Wait for inclusion. + waitForReceipt(b, ctx, client, signedTxs[i].Hash()) + + // 3. Retrieve and verify. + result, err := client.CallContract(ctx, callMsg, nil) + require.NoError(b, err) + require.Equal(b, expected, result, "retrieve() should return 42") + + i++ + } +} + +// waitForReceipt polls for a transaction receipt until it is available. +func waitForReceipt(t testing.TB, ctx context.Context, client *ethclient.Client, txHash common.Hash) *types.Receipt { + t.Helper() + var receipt *types.Receipt + var err error + require.Eventually(t, func() bool { + receipt, err = client.TransactionReceipt(ctx, txHash) + return err == nil && receipt != nil + }, 2*time.Second, 50*time.Millisecond, "transaction %s not included", txHash.Hex()) + return receipt +} diff --git a/test/e2e/evm_contract_e2e_test.go b/test/e2e/evm_contract_e2e_test.go index 0203ca6345..b6c988a85a 100644 --- a/test/e2e/evm_contract_e2e_test.go +++ b/test/e2e/evm_contract_e2e_test.go @@ -240,7 +240,7 @@ func TestEvmContractEvents(t *testing.T) { // setupTestSequencer sets up a single sequencer node for testing. // Returns the ethclient, genesis hash, and a cleanup function. -func setupTestSequencer(t *testing.T, homeDir string) (*ethclient.Client, string, func()) { +func setupTestSequencer(t testing.TB, homeDir string) (*ethclient.Client, string, func()) { sut := NewSystemUnderTest(t) genesisHash, seqEthURL := setupSequencerOnlyTest(t, sut, homeDir) @@ -257,7 +257,7 @@ func setupTestSequencer(t *testing.T, homeDir string) (*ethclient.Client, string // deployContract helps deploy a contract and waits for its inclusion. // Returns the deployed contract address and the next nonce. -func deployContract(t *testing.T, ctx context.Context, client *ethclient.Client, bytecodeStr string, nonce uint64, privateKey *ecdsa.PrivateKey, chainID *big.Int) (common.Address, uint64) { +func deployContract(t testing.TB, ctx context.Context, client *ethclient.Client, bytecodeStr string, nonce uint64, privateKey *ecdsa.PrivateKey, chainID *big.Int) (common.Address, uint64) { bytecode, err := hexutil.Decode("0x" + bytecodeStr) require.NoError(t, err) diff --git a/test/e2e/evm_test_common.go b/test/e2e/evm_test_common.go index d5a7215168..85c3abf629 100644 --- a/test/e2e/evm_test_common.go +++ b/test/e2e/evm_test_common.go @@ -55,7 +55,7 @@ func getAvailablePort() (int, net.Listener, error) { } // same as getAvailablePort but fails test if not successful -func mustGetAvailablePort(t *testing.T) int { +func mustGetAvailablePort(t testing.TB) int { t.Helper() port, listener, err := getAvailablePort() require.NoError(t, err) @@ -221,7 +221,7 @@ const ( // createPassphraseFile creates a temporary passphrase file and returns its path. // The file is created in the provided directory with secure permissions (0600). // If the directory doesn't exist, it will be created with 0755 permissions. -func createPassphraseFile(t *testing.T, dir string) string { +func createPassphraseFile(t testing.TB, dir string) string { t.Helper() // Ensure the directory exists err := os.MkdirAll(dir, 0755) @@ -236,7 +236,7 @@ func createPassphraseFile(t *testing.T, dir string) string { // createJWTSecretFile creates a temporary JWT secret file and returns its path. // The file is created in the provided directory with secure permissions (0600). // If the directory doesn't exist, it will be created with 0755 permissions. -func createJWTSecretFile(t *testing.T, dir, jwtSecret string) string { +func createJWTSecretFile(t testing.TB, dir, jwtSecret string) string { t.Helper() // Ensure the directory exists err := os.MkdirAll(dir, 0755) @@ -256,7 +256,7 @@ func createJWTSecretFile(t *testing.T, dir, jwtSecret string) string { // - rpcPort: Optional RPC port to use (if empty, uses default port) // // Returns: The full P2P address (e.g., /ip4/127.0.0.1/tcp/7676/p2p/12D3KooW...) -func getNodeP2PAddress(t *testing.T, sut *SystemUnderTest, nodeHome string, rpcPort ...string) string { +func getNodeP2PAddress(t testing.TB, sut *SystemUnderTest, nodeHome string, rpcPort ...string) string { t.Helper() // Build command arguments @@ -313,7 +313,7 @@ func getNodeP2PAddress(t *testing.T, sut *SystemUnderTest, nodeHome string, rpcP // - jwtSecret: JWT secret for authenticating with EVM engine // - genesisHash: Hash of the genesis block for chain validation // - endpoints: TestEndpoints struct containing unique port assignments -func setupSequencerNode(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { +func setupSequencerNode(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { t.Helper() // Create passphrase file @@ -357,7 +357,7 @@ func setupSequencerNode(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSe // setupSequencerNodeLazy initializes and starts the sequencer node in lazy mode. // In lazy mode, blocks are only produced when transactions are available, // not on a regular timer. -func setupSequencerNodeLazy(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { +func setupSequencerNodeLazy(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { t.Helper() // Create passphrase file @@ -417,7 +417,7 @@ func setupSequencerNodeLazy(t *testing.T, sut *SystemUnderTest, sequencerHome, j // - genesisHash: Hash of the genesis block for chain validation // - sequencerP2PAddress: P2P address of the sequencer node to connect to // - endpoints: TestEndpoints struct containing unique port assignments -func setupFullNode(t *testing.T, sut *SystemUnderTest, fullNodeHome, sequencerHome, fullNodeJwtSecret, genesisHash, sequencerP2PAddress string, endpoints *TestEndpoints) { +func setupFullNode(t testing.TB, sut *SystemUnderTest, fullNodeHome, sequencerHome, fullNodeJwtSecret, genesisHash, sequencerP2PAddress string, endpoints *TestEndpoints) { t.Helper() // Initialize full node @@ -478,7 +478,7 @@ var globalNonce uint64 = 0 // // This is used in full node sync tests to verify that both nodes // include the same transaction in the same block number. -func submitTransactionAndGetBlockNumber(t *testing.T, sequencerClients ...*ethclient.Client) (common.Hash, uint64) { +func submitTransactionAndGetBlockNumber(t testing.TB, sequencerClients ...*ethclient.Client) (common.Hash, uint64) { t.Helper() // Submit transaction to sequencer EVM with unique nonce @@ -512,7 +512,7 @@ func submitTransactionAndGetBlockNumber(t *testing.T, sequencerClients ...*ethcl // - daPort: optional DA port to use (if empty, uses default) // // Returns: jwtSecret, fullNodeJwtSecret (empty if needsFullNode=false), genesisHash -func setupCommonEVMTest(t *testing.T, sut *SystemUnderTest, needsFullNode bool, _ ...string) (string, string, string, *TestEndpoints) { +func setupCommonEVMTest(t testing.TB, sut *SystemUnderTest, needsFullNode bool, _ ...string) (string, string, string, *TestEndpoints) { t.Helper() // Reset global nonce for each test to ensure clean state @@ -570,7 +570,7 @@ func setupCommonEVMTest(t *testing.T, sut *SystemUnderTest, needsFullNode bool, // - blockHeight: Height of the block to retrieve (use nil for latest) // // Returns: block hash, state root, transaction count, block number, and error -func checkBlockInfoAt(t *testing.T, ethURL string, blockHeight *uint64) (common.Hash, common.Hash, int, uint64, error) { +func checkBlockInfoAt(t testing.TB, ethURL string, blockHeight *uint64) (common.Hash, common.Hash, int, uint64, error) { t.Helper() ctx := context.Background() @@ -613,7 +613,7 @@ func checkBlockInfoAt(t *testing.T, ethURL string, blockHeight *uint64) (common. // - nodeHome: Directory path for sequencer node data // // Returns: genesisHash for the sequencer -func setupSequencerOnlyTest(t *testing.T, sut *SystemUnderTest, nodeHome string) (string, string) { +func setupSequencerOnlyTest(t testing.TB, sut *SystemUnderTest, nodeHome string) (string, string) { t.Helper() // Use common setup (no full node needed) @@ -635,7 +635,7 @@ func setupSequencerOnlyTest(t *testing.T, sut *SystemUnderTest, nodeHome string) // - sequencerHome: Directory path for sequencer node data // - jwtSecret: JWT secret for sequencer's EVM engine authentication // - genesisHash: Hash of the genesis block for chain validation -func restartDAAndSequencer(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { +func restartDAAndSequencer(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { t.Helper() // First restart the local DA @@ -685,7 +685,7 @@ func restartDAAndSequencer(t *testing.T, sut *SystemUnderTest, sequencerHome, jw // - sequencerHome: Directory path for sequencer node data // - jwtSecret: JWT secret for sequencer's EVM engine authentication // - genesisHash: Hash of the genesis block for chain validation -func restartDAAndSequencerLazy(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { +func restartDAAndSequencerLazy(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { t.Helper() // First restart the local DA @@ -736,7 +736,7 @@ func restartDAAndSequencerLazy(t *testing.T, sut *SystemUnderTest, sequencerHome // - sequencerHome: Directory path for sequencer node data // - jwtSecret: JWT secret for sequencer's EVM engine authentication // - genesisHash: Hash of the genesis block for chain validation -func restartSequencerNode(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string) { +func restartSequencerNode(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string) { t.Helper() // Start sequencer node (without init - node already exists) @@ -772,7 +772,7 @@ func restartSequencerNode(t *testing.T, sut *SystemUnderTest, sequencerHome, jwt // - nodeName: Human-readable name for logging (e.g., "sequencer", "full node") // // This function ensures that during lazy mode idle periods, no automatic block production occurs. -func verifyNoBlockProduction(t *testing.T, client *ethclient.Client, duration time.Duration, nodeName string) { +func verifyNoBlockProduction(t testing.TB, client *ethclient.Client, duration time.Duration, nodeName string) { t.Helper() ctx := context.Background() diff --git a/test/e2e/sut_helper.go b/test/e2e/sut_helper.go index beba2b6195..f5783da8bb 100644 --- a/test/e2e/sut_helper.go +++ b/test/e2e/sut_helper.go @@ -33,7 +33,7 @@ var WorkDir = "." // SystemUnderTest is used to manage processes and logs during test execution. type SystemUnderTest struct { - t *testing.T + t testing.TB outBuff *ring.Ring errBuff *ring.Ring @@ -45,7 +45,7 @@ type SystemUnderTest struct { } // NewSystemUnderTest constructor -func NewSystemUnderTest(t *testing.T) *SystemUnderTest { +func NewSystemUnderTest(t testing.TB) *SystemUnderTest { r := &SystemUnderTest{ t: t, pids: make(map[int]struct{}), @@ -103,7 +103,7 @@ func (s *SystemUnderTest) ExecCmdWithLogPrefix(prefix, cmd string, args ...strin // AwaitNodeUp waits until a node is operational by checking both liveness and readiness. // Fails tests when node is not up within the specified timeout. -func (s *SystemUnderTest) AwaitNodeUp(t *testing.T, rpcAddr string, timeout time.Duration) { +func (s *SystemUnderTest) AwaitNodeUp(t testing.TB, rpcAddr string, timeout time.Duration) { t.Helper() t.Logf("Await node is up: %s", rpcAddr) require.EventuallyWithT(t, func(t *assert.CollectT) { @@ -120,7 +120,7 @@ func (s *SystemUnderTest) AwaitNodeUp(t *testing.T, rpcAddr string, timeout time } // AwaitNodeLive waits until a node is alive (liveness check only). -func (s *SystemUnderTest) AwaitNodeLive(t *testing.T, rpcAddr string, timeout time.Duration) { +func (s *SystemUnderTest) AwaitNodeLive(t testing.TB, rpcAddr string, timeout time.Duration) { t.Helper() t.Logf("Await node is live: %s", rpcAddr) require.EventuallyWithT(t, func(t *assert.CollectT) { @@ -132,7 +132,7 @@ func (s *SystemUnderTest) AwaitNodeLive(t *testing.T, rpcAddr string, timeout ti } // AwaitNBlocks waits until the node has produced at least `n` blocks. -func (s *SystemUnderTest) AwaitNBlocks(t *testing.T, n uint64, rpcAddr string, timeout time.Duration) { +func (s *SystemUnderTest) AwaitNBlocks(t testing.TB, n uint64, rpcAddr string, timeout time.Duration) { t.Helper() ctx, done := context.WithTimeout(context.Background(), timeout) defer done() @@ -344,7 +344,7 @@ func locateExecutable(file string) string { } // MustCopyFile copies the file from the source path `src` to the destination path `dest` and returns an open file handle to `dest`. -func MustCopyFile(t *testing.T, src, dest string) *os.File { +func MustCopyFile(t testing.TB, src, dest string) *os.File { t.Helper() in, err := os.Open(src) // nolint: gosec // used by tests only require.NoError(t, err) @@ -362,11 +362,11 @@ func MustCopyFile(t *testing.T, src, dest string) *os.File { } // NodeID generates and returns the peer ID from the node's private key. -func NodeID(t *testing.T, nodeDir string) peer.ID { +func NodeID(t testing.TB, nodeDir string) peer.ID { t.Helper() node1Key, err := key.LoadNodeKey(filepath.Join(nodeDir, "config")) require.NoError(t, err) node1ID, err := peer.IDFromPrivateKey(node1Key.PrivKey) require.NoError(t, err) return node1ID -} \ No newline at end of file +} From c3228387fc0f4ff6c21222662a777883625b93f0 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Tue, 17 Feb 2026 19:08:13 +0100 Subject: [PATCH 2/7] Capture otel --- test/e2e/evm_contract_bench_test.go | 198 +++++++++++++++++++++++++++- test/e2e/evm_contract_e2e_test.go | 4 +- test/e2e/evm_test_common.go | 7 +- test/e2e/go.mod | 4 +- 4 files changed, 202 insertions(+), 11 deletions(-) diff --git a/test/e2e/evm_contract_bench_test.go b/test/e2e/evm_contract_bench_test.go index 3c83a3c2a5..b03e41e2bc 100644 --- a/test/e2e/evm_contract_bench_test.go +++ b/test/e2e/evm_contract_bench_test.go @@ -4,8 +4,15 @@ package e2e import ( "context" + "encoding/json" + "fmt" + "io" "math/big" + "net" + "net/http" "path/filepath" + "sort" + "sync" "testing" "time" @@ -16,24 +23,40 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + collpb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // BenchmarkEvmContractRoundtrip measures the store → retrieve roundtrip latency // against a real reth node with a pre-deployed contract. // -// All transaction generation happens during setup. The timed loop exclusively -// measures: SendTransaction → wait for receipt → eth_call retrieve → verify. +// The node is started with OpenTelemetry tracing enabled, exporting to an +// in-process OTLP/HTTP receiver. After the timed loop, the collected spans are +// aggregated into a hierarchical timing report showing where time is spent +// inside ev-node (Engine API calls, executor, sequencer, etc). // // Run with (after building local-da and evm binaries): // // PATH="/path/to/binaries:$PATH" go test -tags evm \ // -bench BenchmarkEvmContractRoundtrip -benchmem -benchtime=5x \ -// -run='^$' -timeout=10m --evm-binary=/path/to/evm . +// -run='^$' -timeout=10m -v --evm-binary=/path/to/evm . func BenchmarkEvmContractRoundtrip(b *testing.B) { workDir := b.TempDir() sequencerHome := filepath.Join(workDir, "evm-bench-sequencer") - client, _, cleanup := setupTestSequencer(b, sequencerHome) + // Start an in-process OTLP/HTTP receiver to collect traces from ev-node. + collector := newOTLPCollector(b) + defer collector.close() + + // Start sequencer with tracing enabled, exporting to our in-process collector. + client, _, cleanup := setupTestSequencer(b, sequencerHome, + "--evnode.instrumentation.tracing=true", + "--evnode.instrumentation.tracing_endpoint", collector.endpoint(), + "--evnode.instrumentation.tracing_sample_rate", "1.0", + "--evnode.instrumentation.tracing_service_name", "ev-node-bench", + ) defer cleanup() ctx := b.Context() @@ -89,6 +112,173 @@ func BenchmarkEvmContractRoundtrip(b *testing.B) { i++ } + + b.StopTimer() + + // Give the node a moment to flush pending span batches. + time.Sleep(2 * time.Second) + + // Print the trace breakdown from the collected spans. + printCollectedTraceReport(b, collector) +} + +// --- In-process OTLP/HTTP Collector --- + +// otlpCollector is a lightweight OTLP/HTTP receiver that collects trace spans +// in memory. It serves the /v1/traces endpoint that the node's OTLP exporter +// posts protobuf-encoded ExportTraceServiceRequest messages to. +type otlpCollector struct { + mu sync.Mutex + spans []*tracepb.Span + server *http.Server + addr string +} + +func newOTLPCollector(t testing.TB) *otlpCollector { + t.Helper() + + c := &otlpCollector{} + + mux := http.NewServeMux() + mux.HandleFunc("/v1/traces", c.handleTraces) + + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + c.addr = listener.Addr().String() + + c.server = &http.Server{Handler: mux} + go func() { _ = c.server.Serve(listener) }() + + t.Logf("OTLP collector listening on %s", c.addr) + return c +} + +func (c *otlpCollector) endpoint() string { + return "http://" + c.addr +} + +func (c *otlpCollector) close() { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = c.server.Shutdown(ctx) +} + +func (c *otlpCollector) handleTraces(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Try protobuf first (default for otlptracehttp). + var req collpb.ExportTraceServiceRequest + if err := proto.Unmarshal(body, &req); err != nil { + // Fallback: try JSON (some configurations use JSON encoding). + if jsonErr := json.Unmarshal(body, &req); jsonErr != nil { + http.Error(w, fmt.Sprintf("proto: %v; json: %v", err, jsonErr), http.StatusBadRequest) + return + } + } + + c.mu.Lock() + for _, rs := range req.GetResourceSpans() { + for _, ss := range rs.GetScopeSpans() { + c.spans = append(c.spans, ss.GetSpans()...) + } + } + c.mu.Unlock() + + // Respond with an empty ExportTraceServiceResponse (protobuf). + resp := &collpb.ExportTraceServiceResponse{} + out, _ := proto.Marshal(resp) + w.Header().Set("Content-Type", "application/x-protobuf") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(out) +} + +func (c *otlpCollector) getSpans() []*tracepb.Span { + c.mu.Lock() + defer c.mu.Unlock() + cp := make([]*tracepb.Span, len(c.spans)) + copy(cp, c.spans) + return cp +} + +// printCollectedTraceReport aggregates collected spans by operation name and +// prints a timing breakdown. +func printCollectedTraceReport(b testing.TB, collector *otlpCollector) { + b.Helper() + + spans := collector.getSpans() + if len(spans) == 0 { + b.Logf("WARNING: no spans collected from ev-node") + return + } + + type stats struct { + count int + total time.Duration + min time.Duration + max time.Duration + } + m := make(map[string]*stats) + + for _, span := range spans { + // Duration: end - start in nanoseconds. + d := time.Duration(span.GetEndTimeUnixNano()-span.GetStartTimeUnixNano()) * time.Nanosecond + if d <= 0 { + continue + } + name := span.GetName() + s, ok := m[name] + if !ok { + s = &stats{min: d, max: d} + m[name] = s + } + s.count++ + s.total += d + if d < s.min { + s.min = d + } + if d > s.max { + s.max = d + } + } + + // Sort by total time descending. + names := make([]string, 0, len(m)) + for name := range m { + names = append(names, name) + } + sort.Slice(names, func(i, j int) bool { + return m[names[i]].total > m[names[j]].total + }) + + // Calculate overall total for percentages. + var overallTotal time.Duration + for _, s := range m { + overallTotal += s.total + } + + b.Logf("\n--- ev-node Trace Breakdown (%d spans collected) ---", len(spans)) + b.Logf("%-40s %6s %12s %12s %12s %7s", "OPERATION", "COUNT", "AVG", "MIN", "MAX", "% TOTAL") + for _, name := range names { + s := m[name] + avg := s.total / time.Duration(s.count) + pct := float64(s.total) / float64(overallTotal) * 100 + b.Logf("%-40s %6d %12s %12s %12s %6.1f%%", name, s.count, avg, s.min, s.max, pct) + } + + b.Logf("\n--- Time Distribution ---") + for _, name := range names { + s := m[name] + pct := float64(s.total) / float64(overallTotal) * 100 + bar := "" + for range int(pct / 2) { + bar += "█" + } + b.Logf("%-40s %5.1f%% %s", name, pct, bar) + } } // waitForReceipt polls for a transaction receipt until it is available. diff --git a/test/e2e/evm_contract_e2e_test.go b/test/e2e/evm_contract_e2e_test.go index b6c988a85a..477b0801be 100644 --- a/test/e2e/evm_contract_e2e_test.go +++ b/test/e2e/evm_contract_e2e_test.go @@ -240,10 +240,10 @@ func TestEvmContractEvents(t *testing.T) { // setupTestSequencer sets up a single sequencer node for testing. // Returns the ethclient, genesis hash, and a cleanup function. -func setupTestSequencer(t testing.TB, homeDir string) (*ethclient.Client, string, func()) { +func setupTestSequencer(t testing.TB, homeDir string, extraArgs ...string) (*ethclient.Client, string, func()) { sut := NewSystemUnderTest(t) - genesisHash, seqEthURL := setupSequencerOnlyTest(t, sut, homeDir) + genesisHash, seqEthURL := setupSequencerOnlyTest(t, sut, homeDir, extraArgs...) t.Logf("Sequencer started at %s (Genesis: %s)", seqEthURL, genesisHash) client, err := ethclient.Dial(seqEthURL) diff --git a/test/e2e/evm_test_common.go b/test/e2e/evm_test_common.go index 85c3abf629..33518097f9 100644 --- a/test/e2e/evm_test_common.go +++ b/test/e2e/evm_test_common.go @@ -313,7 +313,7 @@ func getNodeP2PAddress(t testing.TB, sut *SystemUnderTest, nodeHome string, rpcP // - jwtSecret: JWT secret for authenticating with EVM engine // - genesisHash: Hash of the genesis block for chain validation // - endpoints: TestEndpoints struct containing unique port assignments -func setupSequencerNode(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { +func setupSequencerNode(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints, extraArgs ...string) { t.Helper() // Create passphrase file @@ -350,6 +350,7 @@ func setupSequencerNode(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSe "--evm.engine-url", endpoints.GetSequencerEngineURL(), "--evm.eth-url", endpoints.GetSequencerEthURL(), } + args = append(args, extraArgs...) sut.ExecCmd(evmSingleBinaryPath, args...) sut.AwaitNodeUp(t, endpoints.GetRollkitRPCAddress(), NodeStartupTimeout) } @@ -613,14 +614,14 @@ func checkBlockInfoAt(t testing.TB, ethURL string, blockHeight *uint64) (common. // - nodeHome: Directory path for sequencer node data // // Returns: genesisHash for the sequencer -func setupSequencerOnlyTest(t testing.TB, sut *SystemUnderTest, nodeHome string) (string, string) { +func setupSequencerOnlyTest(t testing.TB, sut *SystemUnderTest, nodeHome string, extraArgs ...string) (string, string) { t.Helper() // Use common setup (no full node needed) jwtSecret, _, genesisHash, endpoints := setupCommonEVMTest(t, sut, false) // Initialize and start sequencer node - setupSequencerNode(t, sut, nodeHome, jwtSecret, genesisHash, endpoints) + setupSequencerNode(t, sut, nodeHome, jwtSecret, genesisHash, endpoints, extraArgs...) t.Log("Sequencer node is up") return genesisHash, endpoints.GetSequencerEthURL() diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 9ef0dae15b..42d0de4b83 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -15,6 +15,8 @@ require ( github.com/libp2p/go-libp2p v0.47.0 github.com/rs/zerolog v1.34.0 github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.40.0 + go.opentelemetry.io/otel/sdk v1.40.0 google.golang.org/protobuf v1.36.11 ) @@ -288,11 +290,9 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect - go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect - go.opentelemetry.io/otel/sdk v1.40.0 // indirect go.opentelemetry.io/otel/trace v1.40.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/dig v1.19.0 // indirect From 014444f482f6196d388554bfb04b2c6aee23d656 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Tue, 17 Feb 2026 19:22:38 +0100 Subject: [PATCH 3/7] shot1 --- block/internal/executing/executor.go | 31 +++++++---- execution/evm/execution.go | 83 ++++++++++++++++------------ 2 files changed, 69 insertions(+), 45 deletions(-) diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index ac68f2cd85..6ec5bf8b53 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -432,18 +432,21 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { header *types.SignedHeader data *types.Data batchData *BatchData + err error ) - // Check if there's an already stored block at the newHeight - // If there is use that instead of creating a new block - pendingHeader, pendingData, err := e.getPendingBlock(ctx) - if err == nil && pendingHeader != nil && pendingHeader.Height() == newHeight { + // Check if there's an already stored pending block at the newHeight. + // This handles crash recovery — a previous run may have saved a pending block. + pendingHeader, pendingData, pendErr := e.getPendingBlock(ctx) + if pendErr == nil && pendingHeader != nil && pendingHeader.Height() == newHeight { e.logger.Info().Uint64("height", newHeight).Msg("using pending block") header = pendingHeader data = pendingData - } else if err != nil && !errors.Is(err, datastore.ErrNotFound) { - return fmt.Errorf("failed to get block data: %w", err) - } else { + } else if pendErr != nil && !errors.Is(pendErr, datastore.ErrNotFound) { + return fmt.Errorf("failed to get block data: %w", pendErr) + } + + if header == nil { // get batch from sequencer batchData, err = e.blockProducer.RetrieveBatch(ctx) if errors.Is(err, common.ErrNoBatch) { @@ -459,8 +462,12 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to create block: %w", err) } - if err := e.savePendingBlock(ctx, header, data); err != nil { - return fmt.Errorf("failed to save block data: %w", err) + // Only persist pending block for raft crash recovery — skip for non-raft + // to avoid serialization + store write overhead on every block. + if e.raftNode != nil { + if err := e.savePendingBlock(ctx, header, data); err != nil { + return fmt.Errorf("failed to save block data: %w", err) + } } } @@ -533,8 +540,10 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { } e.logger.Debug().Uint64("height", newHeight).Msg("proposed block to raft") } - if err := e.deletePendingBlock(batch); err != nil { - e.logger.Warn().Err(err).Uint64("height", newHeight).Msg("failed to delete pending block metadata") + if e.raftNode != nil { + if err := e.deletePendingBlock(batch); err != nil { + e.logger.Warn().Err(err).Uint64("height", newHeight).Msg("failed to delete pending block metadata") + } } if err := batch.Commit(); err != nil { diff --git a/execution/evm/execution.go b/execution/evm/execution.go index 15cddf6417..f1ed44af59 100644 --- a/execution/evm/execution.go +++ b/execution/evm/execution.go @@ -3,7 +3,7 @@ package evm import ( "bytes" "context" - "crypto/sha256" + "encoding/hex" "errors" "fmt" @@ -162,6 +162,15 @@ type EthRPCClient interface { GetTxs(ctx context.Context) ([]string, error) } +// prevBlockInfo caches the result of the last successfully processed block to +// avoid a redundant eth_getBlockByNumber RPC on the next ExecuteTxs call. +type prevBlockInfo struct { + height uint64 + blockHash common.Hash + stateRoot common.Hash + gasLimit uint64 +} + // EngineClient represents a client that interacts with an Ethereum execution engine // through the Engine API. It manages connections to both the engine and standard Ethereum // APIs, and maintains state related to block processing. @@ -183,6 +192,10 @@ type EngineClient struct { currentFinalizedBlockHash common.Hash // Store last finalized block hash blockHashCache map[uint64]common.Hash // height -> hash cache for safe block lookups + // prevBlock caches block info from the last produced block to eliminate a + // getBlockInfo RPC on the next call. Protected by mu. + prevBlock *prevBlockInfo + cachedExecutionInfo atomic.Pointer[execution.ExecutionInfo] // Cached execution info (gas limit) logger zerolog.Logger @@ -346,27 +359,30 @@ func (c *EngineClient) GetTxs(ctx context.Context) ([][]byte, error) { // - Updates ExecMeta to "promoted" after successful execution func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) { - // 1. Check for idempotent execution - stateRoot, payloadID, found, idempotencyErr := c.reconcileExecutionAtHeight(ctx, blockHeight, timestamp, txs) - if idempotencyErr != nil { - c.logger.Warn().Err(idempotencyErr).Uint64("height", blockHeight).Msg("ExecuteTxs: idempotency check failed") - // Continue execution on error, as it might be transient - } else if found { - if stateRoot != nil { - return stateRoot, nil - } - if payloadID != nil { - // Found in-progress execution, attempt to resume - return c.processPayload(ctx, *payloadID, txs) + // 1. Try to get previous block info from cache (avoids eth_getBlockByNumber RPC). + var prevBlockHash common.Hash + var prevHeaderStateRoot common.Hash + var prevGasLimit uint64 + + c.mu.Lock() + cached := c.prevBlock + c.mu.Unlock() + + if cached != nil && cached.height == blockHeight-1 { + // Cache hit — skip the eth RPC entirely. + prevBlockHash = cached.blockHash + prevHeaderStateRoot = cached.stateRoot + prevGasLimit = cached.gasLimit + } else { + // Cache miss (first block, restart, or non-sequential) — fall back to RPC. + var err error + prevBlockHash, prevHeaderStateRoot, prevGasLimit, _, err = c.getBlockInfo(ctx, blockHeight-1) + if err != nil { + return nil, fmt.Errorf("failed to get block info: %w", err) } } - prevBlockHash, prevHeaderStateRoot, prevGasLimit, _, err := c.getBlockInfo(ctx, blockHeight-1) - if err != nil { - return nil, fmt.Errorf("failed to get block info: %w", err) - } - // It's possible that the prev state root passed in is nil if this is the first block. - // If so, we can't do a comparison. Otherwise, we compare the roots. + // Verify state root consistency when prevStateRoot is provided. if len(prevStateRoot) > 0 && !bytes.Equal(prevStateRoot, prevHeaderStateRoot.Bytes()) { return nil, fmt.Errorf("prevStateRoot mismatch at height %d: consensus=%x execution=%x", blockHeight-1, prevStateRoot, prevHeaderStateRoot.Bytes()) } @@ -388,7 +404,6 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight c.mu.Unlock() // update forkchoice to get the next payload id - // Create evolve-compatible payloadtimestamp.Unix() evPayloadAttrs := map[string]any{ // Standard Ethereum payload attributes (flattened) - using camelCase as expected by JSON "timestamp": timestamp.Unix(), @@ -448,9 +463,8 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight return nil, err } - // Save ExecMeta with payloadID for crash recovery (Stage="started") - // This allows resuming the payload build if we crash before completing - c.saveExecMeta(ctx, blockHeight, timestamp.Unix(), newPayloadID[:], nil, nil, txs, ExecStageStarted) + // Save ExecMeta with payloadID for crash recovery (Stage="started") — async. + go c.saveExecMeta(ctx, blockHeight, timestamp.Unix(), newPayloadID[:], nil, nil, txs, ExecStageStarted) // 4. Process the payload (get, submit, finalize) return c.processPayload(ctx, *newPayloadID, txs) @@ -989,8 +1003,18 @@ func (c *EngineClient) processPayload(ctx context.Context, payloadID engine.Payl return nil, fmt.Errorf("forkchoice update failed: %w", err) } - // 4. Save ExecMeta (Promoted) - c.saveExecMeta(ctx, blockHeight, blockTimestamp, payloadID[:], blockHash[:], payloadResult.ExecutionPayload.StateRoot.Bytes(), txs, ExecStagePromoted) + // 4. Cache block info for next ExecuteTxs (avoids getBlockInfo RPC). + c.mu.Lock() + c.prevBlock = &prevBlockInfo{ + height: blockHeight, + blockHash: blockHash, + stateRoot: payloadResult.ExecutionPayload.StateRoot, + gasLimit: payloadResult.ExecutionPayload.GasLimit, + } + c.mu.Unlock() + + // 5. Save ExecMeta (Promoted) — async, best-effort. + go c.saveExecMeta(ctx, blockHeight, blockTimestamp, payloadID[:], blockHash[:], payloadResult.ExecutionPayload.StateRoot.Bytes(), txs, ExecStagePromoted) return payloadResult.ExecutionPayload.StateRoot.Bytes(), nil } @@ -1022,15 +1046,6 @@ func (c *EngineClient) saveExecMeta(ctx context.Context, height uint64, timestam UpdatedAtUnix: time.Now().Unix(), } - // Compute tx hash for sanity checks on retry - if len(txs) > 0 { - h := sha256.New() - for _, tx := range txs { - h.Write(tx) - } - execMeta.TxHash = h.Sum(nil) - } - if err := c.store.SaveExecMeta(ctx, execMeta); err != nil { c.logger.Warn().Err(err).Uint64("height", height).Msg("saveExecMeta: failed to save exec meta") return From debee02c68a0a5368f0316e2f626d0dbdb487788 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Tue, 17 Feb 2026 19:30:36 +0100 Subject: [PATCH 4/7] Shot2 --- block/internal/executing/executor.go | 73 +++++++++++++++++++++++----- execution/evm/execution.go | 37 +++++++++++--- 2 files changed, 92 insertions(+), 18 deletions(-) diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 6ec5bf8b53..11a2319e5b 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -29,6 +29,15 @@ import ( var _ BlockProducer = (*Executor)(nil) +// lastBlockCacheEntry caches the last produced block's header hash, data hash, +// and signature to avoid store reads in CreateBlock. +type lastBlockCacheEntry struct { + height uint64 + headerHash types.Hash + dataHash types.Hash + signature types.Signature +} + // Executor handles block production, transaction processing, and state management type Executor struct { // Core components @@ -71,6 +80,16 @@ type Executor struct { // blockProducer is the interface used for block production operations. // defaults to self, but can be wrapped with tracing. blockProducer BlockProducer + + // lastBlock caches last produced header/data/signature to avoid store reads + // in CreateBlock. Protected by lastBlockMu. + lastBlockMu sync.Mutex + lastBlockInfo *lastBlockCacheEntry + + // cachedSignerInfo caches pubKey and validatorHash (never change after init). + cachedPubKey crypto.PubKey + cachedValidatorHash types.Hash + signerInfoCached bool } // NewExecutor creates a new block executor. @@ -553,6 +572,16 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { // Update in-memory state after successful commit e.setLastState(newState) + // Cache this block for the next CreateBlock call (avoids 2 store reads). + e.lastBlockMu.Lock() + e.lastBlockInfo = &lastBlockCacheEntry{ + height: newHeight, + headerHash: header.Hash(), + dataHash: data.Hash(), + signature: signature, + } + e.lastBlockMu.Unlock() + // broadcast header and data to P2P network g, broadcastCtx := errgroup.WithContext(e.ctx) g.Go(func() error { @@ -621,25 +650,40 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba if height > e.genesis.InitialHeight { headerTime = uint64(batchData.UnixNano()) - lastHeader, lastData, err := e.store.GetBlockData(ctx, height-1) - if err != nil { - return nil, nil, fmt.Errorf("failed to get last block: %w", err) - } - lastHeaderHash = lastHeader.Hash() - lastDataHash = lastData.Hash() + // Try cache first (hot path — avoids 2 store reads). + e.lastBlockMu.Lock() + cached := e.lastBlockInfo + e.lastBlockMu.Unlock() + + if cached != nil && cached.height == height-1 { + lastHeaderHash = cached.headerHash + lastDataHash = cached.dataHash + lastSignature = cached.signature + } else { + // Cache miss (first block after restart) — fall back to store. + lastHeader, lastData, err := e.store.GetBlockData(ctx, height-1) + if err != nil { + return nil, nil, fmt.Errorf("failed to get last block: %w", err) + } + lastHeaderHash = lastHeader.Hash() + lastDataHash = lastData.Hash() - lastSignaturePtr, err := e.store.GetSignature(ctx, height-1) - if err != nil { - return nil, nil, fmt.Errorf("failed to get last signature: %w", err) + lastSignaturePtr, err := e.store.GetSignature(ctx, height-1) + if err != nil { + return nil, nil, fmt.Errorf("failed to get last signature: %w", err) + } + lastSignature = *lastSignaturePtr } - lastSignature = *lastSignaturePtr } - // Get signer info and validator hash + // Get signer info and validator hash (cached after first call). var pubKey crypto.PubKey var validatorHash types.Hash - if e.signer != nil { + if e.signerInfoCached { + pubKey = e.cachedPubKey + validatorHash = e.cachedValidatorHash + } else if e.signer != nil { var err error pubKey, err = e.signer.GetPublic() if err != nil { @@ -650,6 +694,9 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } + e.cachedPubKey = pubKey + e.cachedValidatorHash = validatorHash + e.signerInfoCached = true } else { // For based sequencer without signer, use nil pubkey and compute validator hash var err error @@ -657,6 +704,8 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } + e.cachedValidatorHash = validatorHash + e.signerInfoCached = true } // Create header diff --git a/execution/evm/execution.go b/execution/evm/execution.go index f1ed44af59..f32e39bc6d 100644 --- a/execution/evm/execution.go +++ b/execution/evm/execution.go @@ -569,6 +569,33 @@ func (c *EngineClient) setFinalWithHeight(ctx context.Context, blockHash common. return c.doForkchoiceUpdate(ctx, args, "setFinal") } +// updateForkchoiceState updates the in-memory forkchoice state (head, safe, finalized) +// WITHOUT making an engine RPC call. The updated values will be sent in the next +// ForkchoiceUpdated call (from ExecuteTxs), avoiding a redundant round-trip. +func (c *EngineClient) updateForkchoiceState(blockHash common.Hash, headHeight uint64) { + c.mu.Lock() + defer c.mu.Unlock() + + c.currentHeadBlockHash = blockHash + c.currentHeadHeight = headHeight + + // Advance safe block + if headHeight > SafeBlockLag { + safeHeight := headHeight - SafeBlockLag + if h, ok := c.blockHashCache[safeHeight]; ok { + c.currentSafeBlockHash = h + } + } + + // Advance finalized block + if headHeight > FinalizedBlockLag { + finalizedHeight := headHeight - FinalizedBlockLag + if h, ok := c.blockHashCache[finalizedHeight]; ok { + c.currentFinalizedBlockHash = h + } + } +} + // doForkchoiceUpdate performs the actual forkchoice update RPC call with retry logic. func (c *EngineClient) doForkchoiceUpdate(ctx context.Context, args engine.ForkchoiceStateV1, operation string) error { @@ -994,14 +1021,12 @@ func (c *EngineClient) processPayload(ctx context.Context, payloadID engine.Payl return nil, err } - // 3. Update Forkchoice + // 3. Update forkchoice state (deferred — no RPC). + // The next ExecuteTxs call's ForkchoiceUpdated will carry the correct + // head/safe/finalized values, so we skip the redundant engine RPC here. blockHash := payloadResult.ExecutionPayload.BlockHash c.cacheBlockHash(blockHeight, blockHash) - - err = c.setFinalWithHeight(ctx, blockHash, blockHeight, false) - if err != nil { - return nil, fmt.Errorf("forkchoice update failed: %w", err) - } + c.updateForkchoiceState(blockHash, blockHeight) // 4. Cache block info for next ExecuteTxs (avoids getBlockInfo RPC). c.mu.Lock() From f3274236f6789cd000d7c636b5bbd5d9b06eec2d Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Tue, 17 Feb 2026 20:21:56 +0100 Subject: [PATCH 5/7] Shot3 --- block/internal/executing/executor.go | 36 ++++++++++++++-------------- execution/evm/execution.go | 18 ++++++++++---- 2 files changed, 31 insertions(+), 23 deletions(-) diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 11a2319e5b..81aadbcd5c 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -511,12 +511,10 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { } header.Signature = signature - if err := e.blockProducer.ValidateBlock(ctx, currentState, header, data); err != nil { - e.sendCriticalError(fmt.Errorf("failed to validate block: %w", err)) - e.logger.Error().Err(err).Msg("CRITICAL: Permanent block validation error - halting block production") - return fmt.Errorf("failed to validate block: %w", err) - } + // ValidateBlock is only needed for blocks we didn't produce (syncer path). + // On the sequencer, we just built this block — skip self-validation. + // Prepare store batch synchronously. Only the commit is deferred. batch, err := e.store.NewBatch(ctx) if err != nil { return fmt.Errorf("failed to create batch: %w", err) @@ -565,11 +563,12 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { } } + // Commit synchronously — DA submitter reads store height. if err := batch.Commit(); err != nil { return fmt.Errorf("failed to commit batch: %w", err) } - // Update in-memory state after successful commit + // Update in-memory state after successful commit. e.setLastState(newState) // Cache this block for the next CreateBlock call (avoids 2 store reads). @@ -582,18 +581,19 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { } e.lastBlockMu.Unlock() - // broadcast header and data to P2P network - g, broadcastCtx := errgroup.WithContext(e.ctx) - g.Go(func() error { - return e.headerBroadcaster.WriteToStoreAndBroadcast(broadcastCtx, &types.P2PSignedHeader{SignedHeader: header}) - }) - g.Go(func() error { - return e.dataBroadcaster.WriteToStoreAndBroadcast(broadcastCtx, &types.P2PData{Data: data}) - }) - if err := g.Wait(); err != nil { - e.logger.Error().Err(err).Msg("failed to broadcast header and/data") - // don't fail block production on broadcast error - } + // P2P broadcast is fire-and-forget — doesn't block next block production. + go func() { + g, broadcastCtx := errgroup.WithContext(e.ctx) + g.Go(func() error { + return e.headerBroadcaster.WriteToStoreAndBroadcast(broadcastCtx, &types.P2PSignedHeader{SignedHeader: header}) + }) + g.Go(func() error { + return e.dataBroadcaster.WriteToStoreAndBroadcast(broadcastCtx, &types.P2PData{Data: data}) + }) + if err := g.Wait(); err != nil { + e.logger.Error().Err(err).Msg("failed to broadcast header and/data") + } + }() e.recordBlockMetrics(newState, data) diff --git a/execution/evm/execution.go b/execution/evm/execution.go index f32e39bc6d..17f87b5b33 100644 --- a/execution/evm/execution.go +++ b/execution/evm/execution.go @@ -56,6 +56,14 @@ var ( ErrPayloadSyncing = errors.New("payload syncing") ) +// Pre-computed constants to avoid per-block allocations. +var ( + zeroHashHex = common.Hash{}.Hex() + emptyWithdrawals = []*types.Withdrawal{} + emptyBlobHashes = []string{} + emptyExecReqs = [][]byte{} +) + // Ensure EngineAPIExecutionClient implements the execution.Execute interface var _ execution.Executor = (*EngineClient)(nil) @@ -409,9 +417,9 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight "timestamp": timestamp.Unix(), "prevRandao": c.derivePrevRandao(blockHeight), "suggestedFeeRecipient": c.feeRecipient, - "withdrawals": []*types.Withdrawal{}, + "withdrawals": emptyWithdrawals, // V3 requires parentBeaconBlockRoot - "parentBeaconBlockRoot": common.Hash{}.Hex(), // Use zero hash for evolve + "parentBeaconBlockRoot": zeroHashHex, // Use zero hash for evolve // evolve-specific fields "transactions": txsPayload, "gasLimit": prevGasLimit, // Use camelCase to match JSON conventions @@ -998,9 +1006,9 @@ func (c *EngineClient) processPayload(ctx context.Context, payloadID engine.Payl err = retryWithBackoffOnPayloadStatus(ctx, func() error { newPayloadResult, err := c.engineClient.NewPayload(ctx, payloadResult.ExecutionPayload, - []string{}, // No blob hashes - common.Hash{}.Hex(), // Use zero hash for parentBeaconBlockRoot - [][]byte{}, // No execution requests + emptyBlobHashes, // No blob hashes + zeroHashHex, // Use zero hash for parentBeaconBlockRoot + emptyExecReqs, // No execution requests ) if err != nil { return fmt.Errorf("new payload submission failed: %w", err) From 6ea3b8d1dab59285cd08d84ac3e48bfd4d1bc379 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Wed, 18 Feb 2026 09:17:31 +0100 Subject: [PATCH 6/7] shot4 --- block/internal/executing/executor.go | 6 +- execution/evm/engine_rpc_client.go | 2 +- execution/evm/engine_rpc_tracing.go | 2 +- execution/evm/execution.go | 115 ++++++++++++++++++++------- execution/evm/payload_test.go | 101 +++++++++++++++++++++++ 5 files changed, 193 insertions(+), 33 deletions(-) create mode 100644 execution/evm/payload_test.go diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 81aadbcd5c..4568350c8f 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -90,6 +90,9 @@ type Executor struct { cachedPubKey crypto.PubKey cachedValidatorHash types.Hash signerInfoCached bool + + // cachedChainID avoids per-block allocation in RetrieveBatch + cachedChainID []byte } // NewExecutor creates a new block executor. @@ -152,6 +155,7 @@ func NewExecutor( txNotifyCh: make(chan struct{}, 1), errorCh: errorCh, logger: logger.With().Str("component", "executor").Logger(), + cachedChainID: []byte(genesis.ChainID), } e.blockProducer = e return e, nil @@ -608,7 +612,7 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { // RetrieveBatch gets the next batch of transactions from the sequencer. func (e *Executor) RetrieveBatch(ctx context.Context) (*BatchData, error) { req := coresequencer.GetNextBatchRequest{ - Id: []byte(e.genesis.ChainID), + Id: e.cachedChainID, MaxBytes: common.DefaultMaxBlobSize, LastBatchData: [][]byte{}, // Can be populated if needed for sequencer context } diff --git a/execution/evm/engine_rpc_client.go b/execution/evm/engine_rpc_client.go index ec04564aa1..323bebc6a2 100644 --- a/execution/evm/engine_rpc_client.go +++ b/execution/evm/engine_rpc_client.go @@ -19,7 +19,7 @@ func NewEngineRPCClient(client *rpc.Client) EngineRPCClient { return &engineRPCClient{client: client} } -func (e *engineRPCClient) ForkchoiceUpdated(ctx context.Context, state engine.ForkchoiceStateV1, args map[string]any) (*engine.ForkChoiceResponse, error) { +func (e *engineRPCClient) ForkchoiceUpdated(ctx context.Context, state engine.ForkchoiceStateV1, args interface{}) (*engine.ForkChoiceResponse, error) { var result engine.ForkChoiceResponse err := e.client.CallContext(ctx, &result, "engine_forkchoiceUpdatedV3", state, args) if err != nil { diff --git a/execution/evm/engine_rpc_tracing.go b/execution/evm/engine_rpc_tracing.go index f5bf09e4bc..aff2da304e 100644 --- a/execution/evm/engine_rpc_tracing.go +++ b/execution/evm/engine_rpc_tracing.go @@ -26,7 +26,7 @@ func withTracingEngineRPCClient(inner EngineRPCClient) EngineRPCClient { } } -func (t *tracedEngineRPCClient) ForkchoiceUpdated(ctx context.Context, state engine.ForkchoiceStateV1, args map[string]any) (*engine.ForkChoiceResponse, error) { +func (t *tracedEngineRPCClient) ForkchoiceUpdated(ctx context.Context, state engine.ForkchoiceStateV1, args interface{}) (*engine.ForkChoiceResponse, error) { ctx, span := t.tracer.Start(ctx, "Engine.ForkchoiceUpdated", trace.WithAttributes( attribute.String("method", "engine_forkchoiceUpdatedV3"), diff --git a/execution/evm/execution.go b/execution/evm/execution.go index 17f87b5b33..f4173d9272 100644 --- a/execution/evm/execution.go +++ b/execution/evm/execution.go @@ -152,7 +152,7 @@ func retryWithBackoffOnPayloadStatus(ctx context.Context, fn func() error, maxRe // EngineRPCClient abstracts Engine API RPC calls for tracing and testing. type EngineRPCClient interface { // ForkchoiceUpdated updates the forkchoice state and optionally starts payload building. - ForkchoiceUpdated(ctx context.Context, state engine.ForkchoiceStateV1, args map[string]any) (*engine.ForkChoiceResponse, error) + ForkchoiceUpdated(ctx context.Context, state engine.ForkchoiceStateV1, args interface{}) (*engine.ForkChoiceResponse, error) // GetPayload retrieves a previously requested execution payload. GetPayload(ctx context.Context, payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) @@ -396,7 +396,16 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight } // 2. Prepare payload attributes - txsPayload := c.filterTransactions(txs) + // Use zero-alloc struct instead of map to avoid reflection overhead + evPayloadAttrs := PayloadAttributesV3{ + Timestamp: timestamp.Unix(), + PrevRandao: c.derivePrevRandao(blockHeight).Hex(), + SuggestedFeeRecipient: c.feeRecipient.Hex(), + Withdrawals: emptyWithdrawals, + ParentBeaconBlockRoot: zeroHashHex, + Transactions: PayloadTransactions(txs), + GasLimit: prevGasLimit, + } // Cache parent block hash for safe-block lookups. c.cacheBlockHash(blockHeight-1, prevBlockHash) @@ -411,20 +420,6 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight } c.mu.Unlock() - // update forkchoice to get the next payload id - evPayloadAttrs := map[string]any{ - // Standard Ethereum payload attributes (flattened) - using camelCase as expected by JSON - "timestamp": timestamp.Unix(), - "prevRandao": c.derivePrevRandao(blockHeight), - "suggestedFeeRecipient": c.feeRecipient, - "withdrawals": emptyWithdrawals, - // V3 requires parentBeaconBlockRoot - "parentBeaconBlockRoot": zeroHashHex, // Use zero hash for evolve - // evolve-specific fields - "transactions": txsPayload, - "gasLimit": prevGasLimit, // Use camelCase to match JSON conventions - } - c.logger.Debug(). Uint64("height", blockHeight). Int("tx_count", len(txs)). @@ -874,20 +869,6 @@ func (c *EngineClient) reconcileExecutionAtHeight(ctx context.Context, height ui return nil, nil, false, nil } -// filterTransactions formats transactions for the payload. -// DA transactions should already be filtered via FilterTxs before reaching here. -// Mempool transactions are already validated when added to mempool. -func (c *EngineClient) filterTransactions(txs [][]byte) []string { - validTxs := make([]string, 0, len(txs)) - for _, tx := range txs { - if len(tx) == 0 { - continue - } - validTxs = append(validTxs, "0x"+hex.EncodeToString(tx)) - } - return validTxs -} - // GetExecutionInfo returns current execution layer parameters. func (c *EngineClient) GetExecutionInfo(ctx context.Context) (execution.ExecutionInfo, error) { if cached := c.cachedExecutionInfo.Load(); cached != nil { @@ -1170,3 +1151,77 @@ func getAuthToken(jwtSecret []byte) (string, error) { } return authToken, nil } + +// PayloadAttributesV3 replaces the untyped map for better performance (no reflection) +type PayloadAttributesV3 struct { + Timestamp int64 `json:"timestamp"` + PrevRandao string `json:"prevRandao"` + SuggestedFeeRecipient string `json:"suggestedFeeRecipient"` + Withdrawals []*types.Withdrawal `json:"withdrawals"` + ParentBeaconBlockRoot string `json:"parentBeaconBlockRoot"` + Transactions PayloadTransactions `json:"transactions"` + GasLimit uint64 `json:"gasLimit"` +} + +// PayloadTransactions implements custom JSON marshaling to avoid intermediate string allocations. +type PayloadTransactions [][]byte + +// MarshalJSON encodes transactions as a JSON array of hex strings directly to bytes. +func (txs PayloadTransactions) MarshalJSON() ([]byte, error) { + if len(txs) == 0 { + return []byte("[]"), nil + } + + // Pre-calculate full size to perform a single allocation: + // 2 bytes for [] + (len(txs)-1) commas + per tx: 2 quotes + 2 prefix + hex len + size := 2 + count := 0 + for _, tx := range txs { + if len(tx) == 0 { + continue + } + // comma + if count > 0 { + size++ + } + // "0x..." + size += 4 + hex.EncodedLen(len(tx)) + count++ + } + + if count == 0 { + return []byte("[]"), nil + } + + buf := make([]byte, 0, size) + buf = append(buf, '[') + written := 0 + for _, tx := range txs { + if len(tx) == 0 { + continue + } + if written > 0 { + buf = append(buf, ',') + } + buf = append(buf, '"', '0', 'x') + + // Append encoded hex directly + n := hex.EncodedLen(len(tx)) + start := len(buf) + // Ensure capacity + if cap(buf) < start+n { + // grow logic if needed, but make() with exact size should cover it + newBuf := make([]byte, len(buf), start+n*2) + copy(newBuf, buf) + buf = newBuf + } + // Expand length + buf = buf[:start+n] + hex.Encode(buf[start:], tx) + + buf = append(buf, '"') + written++ + } + buf = append(buf, ']') + return buf, nil +} diff --git a/execution/evm/payload_test.go b/execution/evm/payload_test.go new file mode 100644 index 0000000000..a8a53b111f --- /dev/null +++ b/execution/evm/payload_test.go @@ -0,0 +1,101 @@ +package evm + +import ( + "encoding/json" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/require" +) + +func TestPayloadTransactionsMarshalJSON(t *testing.T) { + tests := []struct { + name string + input [][]byte + expected string + }{ + { + name: "empty", + input: [][]byte{}, + expected: "[]", + }, + { + name: "nil", + input: nil, + expected: "[]", + }, + { + name: "single tx", + input: [][]byte{ + {0x01, 0x02, 0x03}, + }, + expected: `["0x010203"]`, + }, + { + name: "multiple txs", + input: [][]byte{ + {0xaa, 0xbb}, + {0xcc, 0xdd}, + }, + expected: `["0xaabb","0xccdd"]`, + }, + { + name: "skip empty tx", + input: [][]byte{ + {0x01}, + {}, + {0x02}, + }, + expected: `["0x01","0x02"]`, + }, + { + name: "all empty", + input: [][]byte{ + {}, + {}, + }, + expected: "[]", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pt := PayloadTransactions(tt.input) + b, err := pt.MarshalJSON() + require.NoError(t, err) + require.Equal(t, tt.expected, string(b)) + }) + } +} + +func BenchmarkPayloadTransactionsMarshalJSON(b *testing.B) { + // Setup large payload + txs := make([][]byte, 1000) + for i := range txs { + txs[i] = make([]byte, 1024) // 1KB tx + } + pt := PayloadTransactions(txs) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = pt.MarshalJSON() + } +} + +func BenchmarkStandardJSONMarshal(b *testing.B) { + // Setup large payload + txs := make([][]byte, 1000) + for i := range txs { + txs[i] = make([]byte, 1024) // 1KB tx + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Simulate old behavior: allocate strings + json marshal + validTxs := make([]string, 0, len(txs)) + for _, tx := range txs { + validTxs = append(validTxs, hexutil.Encode(tx)) + } + _, _ = json.Marshal(validTxs) + } +} From f247a2c2d576dcea36908af2dadcbeb5b349d439 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Wed, 18 Feb 2026 10:08:21 +0100 Subject: [PATCH 7/7] x --- execution/evm/store.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/execution/evm/store.go b/execution/evm/store.go index 21fe5008ae..76700dd01b 100644 --- a/execution/evm/store.go +++ b/execution/evm/store.go @@ -100,9 +100,9 @@ func NewEVMStore(db ds.Batching) *EVMStore { // execMetaKey returns the datastore key for ExecMeta at a given height. func execMetaKey(height uint64) ds.Key { - heightBytes := make([]byte, 8) - binary.BigEndian.PutUint64(heightBytes, height) - return ds.NewKey(evmStorePrefix + "execmeta/" + string(heightBytes)) + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], height) + return ds.NewKey(evmStorePrefix + "execmeta/" + string(buf[:])) } // GetExecMeta retrieves execution metadata for the given height. @@ -133,7 +133,9 @@ func (s *EVMStore) GetExecMeta(ctx context.Context, height uint64) (*ExecMeta, e // SaveExecMeta persists execution metadata for the given height. func (s *EVMStore) SaveExecMeta(ctx context.Context, meta *ExecMeta) error { key := execMetaKey(meta.Height) - data, err := proto.Marshal(meta.ToProto()) + // Use stack-allocated buffer to avoid heap allocation for small metadata. + var buf [1024]byte + data, err := proto.MarshalOptions{}.MarshalAppend(buf[:0], meta.ToProto()) if err != nil { return fmt.Errorf("failed to marshal exec meta: %w", err) } @@ -180,9 +182,9 @@ func (s *EVMStore) PruneExec(ctx context.Context, height uint64) error { } // Persist updated last pruned height. - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, height) - if err := batch.Put(ctx, ds.NewKey(lastPrunedExecMetaKey), buf); err != nil { + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], height) + if err := batch.Put(ctx, ds.NewKey(lastPrunedExecMetaKey), buf[:]); err != nil { return fmt.Errorf("failed to update last pruned execmeta height: %w", err) }