From aeaf7fa02204a2ed375071c920036c76fe789e96 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Tue, 17 Feb 2026 18:00:57 +0100 Subject: [PATCH 01/11] feat: introduce EVM contract benchmarking with new tests and a GitHub Actions workflow. --- .github/workflows/benchmark.yml | 42 +++++++++++ execution/evm/test/test_helpers.go | 19 +++-- execution/evm/test_helpers.go | 2 +- test/e2e/evm_contract_bench_test.go | 104 ++++++++++++++++++++++++++++ test/e2e/evm_contract_e2e_test.go | 4 +- test/e2e/evm_test_common.go | 30 ++++---- test/e2e/sut_helper.go | 16 ++--- 7 files changed, 187 insertions(+), 30 deletions(-) create mode 100644 .github/workflows/benchmark.yml create mode 100644 test/e2e/evm_contract_bench_test.go diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000000..e34f615027 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,42 @@ +--- +name: Benchmarks +permissions: {} +"on": + push: + branches: + - main + workflow_dispatch: + +jobs: + evm-benchmark: + name: EVM Contract Benchmark + runs-on: ubuntu-latest + timeout-minutes: 30 + permissions: + contents: write + issues: write + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Set up Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: ./go.mod + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 + - name: Build binaries + run: make build-evm build-da + - name: Run EVM benchmarks + run: | + cd test/e2e && go test -tags evm -bench=. -benchmem -run='^$' \ + -timeout=10m --evm-binary=../../build/evm | tee output.txt + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@4bdcce38c94cec68da58d012ac24b7b1155efe8b # v1.20.7 + with: + name: EVM Contract Roundtrip + tool: 'go' + output-file-path: test/e2e/output.txt + auto-push: true + github-token: ${{ secrets.GITHUB_TOKEN }} + alert-threshold: '150%' + fail-on-alert: true + comment-on-alert: true diff --git a/execution/evm/test/test_helpers.go b/execution/evm/test/test_helpers.go index d2c0500528..1aa4ec3175 100644 --- a/execution/evm/test/test_helpers.go +++ b/execution/evm/test/test_helpers.go @@ -18,6 +18,8 @@ import ( "github.com/celestiaorg/tastora/framework/types" "github.com/golang-jwt/jwt/v5" "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" ) // Test-scoped Docker client/network mapping to avoid conflicts between tests @@ -39,7 +41,7 @@ func randomString(n int) string { } // getTestScopedDockerSetup returns a Docker client and network ID that are scoped to the specific test. -func getTestScopedDockerSetup(t *testing.T) (types.TastoraDockerClient, string) { +func getTestScopedDockerSetup(t testing.TB) (types.TastoraDockerClient, string) { t.Helper() testKey := t.Name() @@ -59,13 +61,22 @@ func getTestScopedDockerSetup(t *testing.T) (types.TastoraDockerClient, string) } // SetupTestRethNode creates a single Reth node for testing purposes. -func SetupTestRethNode(t *testing.T) *reth.Node { +func SetupTestRethNode(t testing.TB) *reth.Node { t.Helper() ctx := context.Background() dockerCli, dockerNetID := getTestScopedDockerSetup(t) - n, err := reth.NewNodeBuilderWithTestName(t, fmt.Sprintf("%s-%s", t.Name(), randomString(6))). + testName := fmt.Sprintf("%s-%s", t.Name(), randomString(6)) + logger := zap.NewNop() + if testing.Verbose() { + logger = zaptest.NewLogger(t) + } + n, err := new(reth.NodeBuilder). + WithTestName(testName). + WithLogger(logger). + WithImage(reth.DefaultImage()). + WithBin("ev-reth"). WithDockerClient(dockerCli). WithDockerNetworkID(dockerNetID). WithGenesis([]byte(reth.DefaultEvolveGenesisJSON())). @@ -88,7 +99,7 @@ func SetupTestRethNode(t *testing.T) *reth.Node { } // waitForRethContainer waits for the Reth container to be ready by polling the provided endpoints with JWT authentication. -func waitForRethContainer(t *testing.T, jwtSecret, ethURL, engineURL string) error { +func waitForRethContainer(t testing.TB, jwtSecret, ethURL, engineURL string) error { t.Helper() client := &http.Client{Timeout: 100 * time.Millisecond} timer := time.NewTimer(30 * time.Second) diff --git a/execution/evm/test_helpers.go b/execution/evm/test_helpers.go index 1e97d446da..157f028b27 100644 --- a/execution/evm/test_helpers.go +++ b/execution/evm/test_helpers.go @@ -16,7 +16,7 @@ import ( // Transaction Helpers // GetRandomTransaction creates and signs a random Ethereum legacy transaction using the provided private key, recipient, chain ID, gas limit, and nonce. -func GetRandomTransaction(t *testing.T, privateKeyHex, toAddressHex, chainID string, gasLimit uint64, lastNonce *uint64) *types.Transaction { +func GetRandomTransaction(t testing.TB, privateKeyHex, toAddressHex, chainID string, gasLimit uint64, lastNonce *uint64) *types.Transaction { t.Helper() privateKey, err := crypto.HexToECDSA(privateKeyHex) require.NoError(t, err) diff --git a/test/e2e/evm_contract_bench_test.go b/test/e2e/evm_contract_bench_test.go new file mode 100644 index 0000000000..3c83a3c2a5 --- /dev/null +++ b/test/e2e/evm_contract_bench_test.go @@ -0,0 +1,104 @@ +//go:build evm + +package e2e + +import ( + "context" + "math/big" + "path/filepath" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/require" +) + +// BenchmarkEvmContractRoundtrip measures the store → retrieve roundtrip latency +// against a real reth node with a pre-deployed contract. +// +// All transaction generation happens during setup. The timed loop exclusively +// measures: SendTransaction → wait for receipt → eth_call retrieve → verify. +// +// Run with (after building local-da and evm binaries): +// +// PATH="/path/to/binaries:$PATH" go test -tags evm \ +// -bench BenchmarkEvmContractRoundtrip -benchmem -benchtime=5x \ +// -run='^$' -timeout=10m --evm-binary=/path/to/evm . +func BenchmarkEvmContractRoundtrip(b *testing.B) { + workDir := b.TempDir() + sequencerHome := filepath.Join(workDir, "evm-bench-sequencer") + + client, _, cleanup := setupTestSequencer(b, sequencerHome) + defer cleanup() + + ctx := b.Context() + privateKey, err := crypto.HexToECDSA(TestPrivateKey) + require.NoError(b, err) + chainID, ok := new(big.Int).SetString(DefaultChainID, 10) + require.True(b, ok) + signer := types.NewEIP155Signer(chainID) + + // Deploy contract once during setup. + contractAddr, nonce := deployContract(b, ctx, client, StorageContractBytecode, 0, privateKey, chainID) + + // Pre-build signed store(42) transactions for all iterations. + storeData, err := hexutil.Decode("0x000000000000000000000000000000000000000000000000000000000000002a") + require.NoError(b, err) + + const maxIter = 1024 + signedTxs := make([]*types.Transaction, maxIter) + for i := range maxIter { + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce + uint64(i), + To: &contractAddr, + Value: big.NewInt(0), + Gas: 500000, + GasPrice: big.NewInt(30000000000), + Data: storeData, + }) + signedTxs[i], err = types.SignTx(tx, signer, privateKey) + require.NoError(b, err) + } + + expected := common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000002a").Bytes() + callMsg := ethereum.CallMsg{To: &contractAddr, Data: []byte{}} + + b.ResetTimer() + b.ReportAllocs() + + var i int + for b.Loop() { + require.Less(b, i, maxIter, "increase maxIter for longer benchmark runs") + + // 1. Submit pre-signed store(42) transaction. + err = client.SendTransaction(ctx, signedTxs[i]) + require.NoError(b, err) + + // 2. Wait for inclusion. + waitForReceipt(b, ctx, client, signedTxs[i].Hash()) + + // 3. Retrieve and verify. + result, err := client.CallContract(ctx, callMsg, nil) + require.NoError(b, err) + require.Equal(b, expected, result, "retrieve() should return 42") + + i++ + } +} + +// waitForReceipt polls for a transaction receipt until it is available. +func waitForReceipt(t testing.TB, ctx context.Context, client *ethclient.Client, txHash common.Hash) *types.Receipt { + t.Helper() + var receipt *types.Receipt + var err error + require.Eventually(t, func() bool { + receipt, err = client.TransactionReceipt(ctx, txHash) + return err == nil && receipt != nil + }, 2*time.Second, 50*time.Millisecond, "transaction %s not included", txHash.Hex()) + return receipt +} diff --git a/test/e2e/evm_contract_e2e_test.go b/test/e2e/evm_contract_e2e_test.go index 0203ca6345..b6c988a85a 100644 --- a/test/e2e/evm_contract_e2e_test.go +++ b/test/e2e/evm_contract_e2e_test.go @@ -240,7 +240,7 @@ func TestEvmContractEvents(t *testing.T) { // setupTestSequencer sets up a single sequencer node for testing. // Returns the ethclient, genesis hash, and a cleanup function. -func setupTestSequencer(t *testing.T, homeDir string) (*ethclient.Client, string, func()) { +func setupTestSequencer(t testing.TB, homeDir string) (*ethclient.Client, string, func()) { sut := NewSystemUnderTest(t) genesisHash, seqEthURL := setupSequencerOnlyTest(t, sut, homeDir) @@ -257,7 +257,7 @@ func setupTestSequencer(t *testing.T, homeDir string) (*ethclient.Client, string // deployContract helps deploy a contract and waits for its inclusion. // Returns the deployed contract address and the next nonce. -func deployContract(t *testing.T, ctx context.Context, client *ethclient.Client, bytecodeStr string, nonce uint64, privateKey *ecdsa.PrivateKey, chainID *big.Int) (common.Address, uint64) { +func deployContract(t testing.TB, ctx context.Context, client *ethclient.Client, bytecodeStr string, nonce uint64, privateKey *ecdsa.PrivateKey, chainID *big.Int) (common.Address, uint64) { bytecode, err := hexutil.Decode("0x" + bytecodeStr) require.NoError(t, err) diff --git a/test/e2e/evm_test_common.go b/test/e2e/evm_test_common.go index d5a7215168..85c3abf629 100644 --- a/test/e2e/evm_test_common.go +++ b/test/e2e/evm_test_common.go @@ -55,7 +55,7 @@ func getAvailablePort() (int, net.Listener, error) { } // same as getAvailablePort but fails test if not successful -func mustGetAvailablePort(t *testing.T) int { +func mustGetAvailablePort(t testing.TB) int { t.Helper() port, listener, err := getAvailablePort() require.NoError(t, err) @@ -221,7 +221,7 @@ const ( // createPassphraseFile creates a temporary passphrase file and returns its path. // The file is created in the provided directory with secure permissions (0600). // If the directory doesn't exist, it will be created with 0755 permissions. -func createPassphraseFile(t *testing.T, dir string) string { +func createPassphraseFile(t testing.TB, dir string) string { t.Helper() // Ensure the directory exists err := os.MkdirAll(dir, 0755) @@ -236,7 +236,7 @@ func createPassphraseFile(t *testing.T, dir string) string { // createJWTSecretFile creates a temporary JWT secret file and returns its path. // The file is created in the provided directory with secure permissions (0600). // If the directory doesn't exist, it will be created with 0755 permissions. -func createJWTSecretFile(t *testing.T, dir, jwtSecret string) string { +func createJWTSecretFile(t testing.TB, dir, jwtSecret string) string { t.Helper() // Ensure the directory exists err := os.MkdirAll(dir, 0755) @@ -256,7 +256,7 @@ func createJWTSecretFile(t *testing.T, dir, jwtSecret string) string { // - rpcPort: Optional RPC port to use (if empty, uses default port) // // Returns: The full P2P address (e.g., /ip4/127.0.0.1/tcp/7676/p2p/12D3KooW...) -func getNodeP2PAddress(t *testing.T, sut *SystemUnderTest, nodeHome string, rpcPort ...string) string { +func getNodeP2PAddress(t testing.TB, sut *SystemUnderTest, nodeHome string, rpcPort ...string) string { t.Helper() // Build command arguments @@ -313,7 +313,7 @@ func getNodeP2PAddress(t *testing.T, sut *SystemUnderTest, nodeHome string, rpcP // - jwtSecret: JWT secret for authenticating with EVM engine // - genesisHash: Hash of the genesis block for chain validation // - endpoints: TestEndpoints struct containing unique port assignments -func setupSequencerNode(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { +func setupSequencerNode(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { t.Helper() // Create passphrase file @@ -357,7 +357,7 @@ func setupSequencerNode(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSe // setupSequencerNodeLazy initializes and starts the sequencer node in lazy mode. // In lazy mode, blocks are only produced when transactions are available, // not on a regular timer. -func setupSequencerNodeLazy(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { +func setupSequencerNodeLazy(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { t.Helper() // Create passphrase file @@ -417,7 +417,7 @@ func setupSequencerNodeLazy(t *testing.T, sut *SystemUnderTest, sequencerHome, j // - genesisHash: Hash of the genesis block for chain validation // - sequencerP2PAddress: P2P address of the sequencer node to connect to // - endpoints: TestEndpoints struct containing unique port assignments -func setupFullNode(t *testing.T, sut *SystemUnderTest, fullNodeHome, sequencerHome, fullNodeJwtSecret, genesisHash, sequencerP2PAddress string, endpoints *TestEndpoints) { +func setupFullNode(t testing.TB, sut *SystemUnderTest, fullNodeHome, sequencerHome, fullNodeJwtSecret, genesisHash, sequencerP2PAddress string, endpoints *TestEndpoints) { t.Helper() // Initialize full node @@ -478,7 +478,7 @@ var globalNonce uint64 = 0 // // This is used in full node sync tests to verify that both nodes // include the same transaction in the same block number. -func submitTransactionAndGetBlockNumber(t *testing.T, sequencerClients ...*ethclient.Client) (common.Hash, uint64) { +func submitTransactionAndGetBlockNumber(t testing.TB, sequencerClients ...*ethclient.Client) (common.Hash, uint64) { t.Helper() // Submit transaction to sequencer EVM with unique nonce @@ -512,7 +512,7 @@ func submitTransactionAndGetBlockNumber(t *testing.T, sequencerClients ...*ethcl // - daPort: optional DA port to use (if empty, uses default) // // Returns: jwtSecret, fullNodeJwtSecret (empty if needsFullNode=false), genesisHash -func setupCommonEVMTest(t *testing.T, sut *SystemUnderTest, needsFullNode bool, _ ...string) (string, string, string, *TestEndpoints) { +func setupCommonEVMTest(t testing.TB, sut *SystemUnderTest, needsFullNode bool, _ ...string) (string, string, string, *TestEndpoints) { t.Helper() // Reset global nonce for each test to ensure clean state @@ -570,7 +570,7 @@ func setupCommonEVMTest(t *testing.T, sut *SystemUnderTest, needsFullNode bool, // - blockHeight: Height of the block to retrieve (use nil for latest) // // Returns: block hash, state root, transaction count, block number, and error -func checkBlockInfoAt(t *testing.T, ethURL string, blockHeight *uint64) (common.Hash, common.Hash, int, uint64, error) { +func checkBlockInfoAt(t testing.TB, ethURL string, blockHeight *uint64) (common.Hash, common.Hash, int, uint64, error) { t.Helper() ctx := context.Background() @@ -613,7 +613,7 @@ func checkBlockInfoAt(t *testing.T, ethURL string, blockHeight *uint64) (common. // - nodeHome: Directory path for sequencer node data // // Returns: genesisHash for the sequencer -func setupSequencerOnlyTest(t *testing.T, sut *SystemUnderTest, nodeHome string) (string, string) { +func setupSequencerOnlyTest(t testing.TB, sut *SystemUnderTest, nodeHome string) (string, string) { t.Helper() // Use common setup (no full node needed) @@ -635,7 +635,7 @@ func setupSequencerOnlyTest(t *testing.T, sut *SystemUnderTest, nodeHome string) // - sequencerHome: Directory path for sequencer node data // - jwtSecret: JWT secret for sequencer's EVM engine authentication // - genesisHash: Hash of the genesis block for chain validation -func restartDAAndSequencer(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { +func restartDAAndSequencer(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { t.Helper() // First restart the local DA @@ -685,7 +685,7 @@ func restartDAAndSequencer(t *testing.T, sut *SystemUnderTest, sequencerHome, jw // - sequencerHome: Directory path for sequencer node data // - jwtSecret: JWT secret for sequencer's EVM engine authentication // - genesisHash: Hash of the genesis block for chain validation -func restartDAAndSequencerLazy(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { +func restartDAAndSequencerLazy(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { t.Helper() // First restart the local DA @@ -736,7 +736,7 @@ func restartDAAndSequencerLazy(t *testing.T, sut *SystemUnderTest, sequencerHome // - sequencerHome: Directory path for sequencer node data // - jwtSecret: JWT secret for sequencer's EVM engine authentication // - genesisHash: Hash of the genesis block for chain validation -func restartSequencerNode(t *testing.T, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string) { +func restartSequencerNode(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string) { t.Helper() // Start sequencer node (without init - node already exists) @@ -772,7 +772,7 @@ func restartSequencerNode(t *testing.T, sut *SystemUnderTest, sequencerHome, jwt // - nodeName: Human-readable name for logging (e.g., "sequencer", "full node") // // This function ensures that during lazy mode idle periods, no automatic block production occurs. -func verifyNoBlockProduction(t *testing.T, client *ethclient.Client, duration time.Duration, nodeName string) { +func verifyNoBlockProduction(t testing.TB, client *ethclient.Client, duration time.Duration, nodeName string) { t.Helper() ctx := context.Background() diff --git a/test/e2e/sut_helper.go b/test/e2e/sut_helper.go index beba2b6195..f5783da8bb 100644 --- a/test/e2e/sut_helper.go +++ b/test/e2e/sut_helper.go @@ -33,7 +33,7 @@ var WorkDir = "." // SystemUnderTest is used to manage processes and logs during test execution. type SystemUnderTest struct { - t *testing.T + t testing.TB outBuff *ring.Ring errBuff *ring.Ring @@ -45,7 +45,7 @@ type SystemUnderTest struct { } // NewSystemUnderTest constructor -func NewSystemUnderTest(t *testing.T) *SystemUnderTest { +func NewSystemUnderTest(t testing.TB) *SystemUnderTest { r := &SystemUnderTest{ t: t, pids: make(map[int]struct{}), @@ -103,7 +103,7 @@ func (s *SystemUnderTest) ExecCmdWithLogPrefix(prefix, cmd string, args ...strin // AwaitNodeUp waits until a node is operational by checking both liveness and readiness. // Fails tests when node is not up within the specified timeout. -func (s *SystemUnderTest) AwaitNodeUp(t *testing.T, rpcAddr string, timeout time.Duration) { +func (s *SystemUnderTest) AwaitNodeUp(t testing.TB, rpcAddr string, timeout time.Duration) { t.Helper() t.Logf("Await node is up: %s", rpcAddr) require.EventuallyWithT(t, func(t *assert.CollectT) { @@ -120,7 +120,7 @@ func (s *SystemUnderTest) AwaitNodeUp(t *testing.T, rpcAddr string, timeout time } // AwaitNodeLive waits until a node is alive (liveness check only). -func (s *SystemUnderTest) AwaitNodeLive(t *testing.T, rpcAddr string, timeout time.Duration) { +func (s *SystemUnderTest) AwaitNodeLive(t testing.TB, rpcAddr string, timeout time.Duration) { t.Helper() t.Logf("Await node is live: %s", rpcAddr) require.EventuallyWithT(t, func(t *assert.CollectT) { @@ -132,7 +132,7 @@ func (s *SystemUnderTest) AwaitNodeLive(t *testing.T, rpcAddr string, timeout ti } // AwaitNBlocks waits until the node has produced at least `n` blocks. -func (s *SystemUnderTest) AwaitNBlocks(t *testing.T, n uint64, rpcAddr string, timeout time.Duration) { +func (s *SystemUnderTest) AwaitNBlocks(t testing.TB, n uint64, rpcAddr string, timeout time.Duration) { t.Helper() ctx, done := context.WithTimeout(context.Background(), timeout) defer done() @@ -344,7 +344,7 @@ func locateExecutable(file string) string { } // MustCopyFile copies the file from the source path `src` to the destination path `dest` and returns an open file handle to `dest`. -func MustCopyFile(t *testing.T, src, dest string) *os.File { +func MustCopyFile(t testing.TB, src, dest string) *os.File { t.Helper() in, err := os.Open(src) // nolint: gosec // used by tests only require.NoError(t, err) @@ -362,11 +362,11 @@ func MustCopyFile(t *testing.T, src, dest string) *os.File { } // NodeID generates and returns the peer ID from the node's private key. -func NodeID(t *testing.T, nodeDir string) peer.ID { +func NodeID(t testing.TB, nodeDir string) peer.ID { t.Helper() node1Key, err := key.LoadNodeKey(filepath.Join(nodeDir, "config")) require.NoError(t, err) node1ID, err := peer.IDFromPrivateKey(node1Key.PrivKey) require.NoError(t, err) return node1ID -} \ No newline at end of file +} From c3228387fc0f4ff6c21222662a777883625b93f0 Mon Sep 17 00:00:00 2001 From: Alex Peters Date: Tue, 17 Feb 2026 19:08:13 +0100 Subject: [PATCH 02/11] Capture otel --- test/e2e/evm_contract_bench_test.go | 198 +++++++++++++++++++++++++++- test/e2e/evm_contract_e2e_test.go | 4 +- test/e2e/evm_test_common.go | 7 +- test/e2e/go.mod | 4 +- 4 files changed, 202 insertions(+), 11 deletions(-) diff --git a/test/e2e/evm_contract_bench_test.go b/test/e2e/evm_contract_bench_test.go index 3c83a3c2a5..b03e41e2bc 100644 --- a/test/e2e/evm_contract_bench_test.go +++ b/test/e2e/evm_contract_bench_test.go @@ -4,8 +4,15 @@ package e2e import ( "context" + "encoding/json" + "fmt" + "io" "math/big" + "net" + "net/http" "path/filepath" + "sort" + "sync" "testing" "time" @@ -16,24 +23,40 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + collpb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // BenchmarkEvmContractRoundtrip measures the store → retrieve roundtrip latency // against a real reth node with a pre-deployed contract. // -// All transaction generation happens during setup. The timed loop exclusively -// measures: SendTransaction → wait for receipt → eth_call retrieve → verify. +// The node is started with OpenTelemetry tracing enabled, exporting to an +// in-process OTLP/HTTP receiver. After the timed loop, the collected spans are +// aggregated into a hierarchical timing report showing where time is spent +// inside ev-node (Engine API calls, executor, sequencer, etc). // // Run with (after building local-da and evm binaries): // // PATH="/path/to/binaries:$PATH" go test -tags evm \ // -bench BenchmarkEvmContractRoundtrip -benchmem -benchtime=5x \ -// -run='^$' -timeout=10m --evm-binary=/path/to/evm . +// -run='^$' -timeout=10m -v --evm-binary=/path/to/evm . func BenchmarkEvmContractRoundtrip(b *testing.B) { workDir := b.TempDir() sequencerHome := filepath.Join(workDir, "evm-bench-sequencer") - client, _, cleanup := setupTestSequencer(b, sequencerHome) + // Start an in-process OTLP/HTTP receiver to collect traces from ev-node. + collector := newOTLPCollector(b) + defer collector.close() + + // Start sequencer with tracing enabled, exporting to our in-process collector. + client, _, cleanup := setupTestSequencer(b, sequencerHome, + "--evnode.instrumentation.tracing=true", + "--evnode.instrumentation.tracing_endpoint", collector.endpoint(), + "--evnode.instrumentation.tracing_sample_rate", "1.0", + "--evnode.instrumentation.tracing_service_name", "ev-node-bench", + ) defer cleanup() ctx := b.Context() @@ -89,6 +112,173 @@ func BenchmarkEvmContractRoundtrip(b *testing.B) { i++ } + + b.StopTimer() + + // Give the node a moment to flush pending span batches. + time.Sleep(2 * time.Second) + + // Print the trace breakdown from the collected spans. + printCollectedTraceReport(b, collector) +} + +// --- In-process OTLP/HTTP Collector --- + +// otlpCollector is a lightweight OTLP/HTTP receiver that collects trace spans +// in memory. It serves the /v1/traces endpoint that the node's OTLP exporter +// posts protobuf-encoded ExportTraceServiceRequest messages to. +type otlpCollector struct { + mu sync.Mutex + spans []*tracepb.Span + server *http.Server + addr string +} + +func newOTLPCollector(t testing.TB) *otlpCollector { + t.Helper() + + c := &otlpCollector{} + + mux := http.NewServeMux() + mux.HandleFunc("/v1/traces", c.handleTraces) + + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + c.addr = listener.Addr().String() + + c.server = &http.Server{Handler: mux} + go func() { _ = c.server.Serve(listener) }() + + t.Logf("OTLP collector listening on %s", c.addr) + return c +} + +func (c *otlpCollector) endpoint() string { + return "http://" + c.addr +} + +func (c *otlpCollector) close() { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = c.server.Shutdown(ctx) +} + +func (c *otlpCollector) handleTraces(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Try protobuf first (default for otlptracehttp). + var req collpb.ExportTraceServiceRequest + if err := proto.Unmarshal(body, &req); err != nil { + // Fallback: try JSON (some configurations use JSON encoding). + if jsonErr := json.Unmarshal(body, &req); jsonErr != nil { + http.Error(w, fmt.Sprintf("proto: %v; json: %v", err, jsonErr), http.StatusBadRequest) + return + } + } + + c.mu.Lock() + for _, rs := range req.GetResourceSpans() { + for _, ss := range rs.GetScopeSpans() { + c.spans = append(c.spans, ss.GetSpans()...) + } + } + c.mu.Unlock() + + // Respond with an empty ExportTraceServiceResponse (protobuf). + resp := &collpb.ExportTraceServiceResponse{} + out, _ := proto.Marshal(resp) + w.Header().Set("Content-Type", "application/x-protobuf") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(out) +} + +func (c *otlpCollector) getSpans() []*tracepb.Span { + c.mu.Lock() + defer c.mu.Unlock() + cp := make([]*tracepb.Span, len(c.spans)) + copy(cp, c.spans) + return cp +} + +// printCollectedTraceReport aggregates collected spans by operation name and +// prints a timing breakdown. +func printCollectedTraceReport(b testing.TB, collector *otlpCollector) { + b.Helper() + + spans := collector.getSpans() + if len(spans) == 0 { + b.Logf("WARNING: no spans collected from ev-node") + return + } + + type stats struct { + count int + total time.Duration + min time.Duration + max time.Duration + } + m := make(map[string]*stats) + + for _, span := range spans { + // Duration: end - start in nanoseconds. + d := time.Duration(span.GetEndTimeUnixNano()-span.GetStartTimeUnixNano()) * time.Nanosecond + if d <= 0 { + continue + } + name := span.GetName() + s, ok := m[name] + if !ok { + s = &stats{min: d, max: d} + m[name] = s + } + s.count++ + s.total += d + if d < s.min { + s.min = d + } + if d > s.max { + s.max = d + } + } + + // Sort by total time descending. + names := make([]string, 0, len(m)) + for name := range m { + names = append(names, name) + } + sort.Slice(names, func(i, j int) bool { + return m[names[i]].total > m[names[j]].total + }) + + // Calculate overall total for percentages. + var overallTotal time.Duration + for _, s := range m { + overallTotal += s.total + } + + b.Logf("\n--- ev-node Trace Breakdown (%d spans collected) ---", len(spans)) + b.Logf("%-40s %6s %12s %12s %12s %7s", "OPERATION", "COUNT", "AVG", "MIN", "MAX", "% TOTAL") + for _, name := range names { + s := m[name] + avg := s.total / time.Duration(s.count) + pct := float64(s.total) / float64(overallTotal) * 100 + b.Logf("%-40s %6d %12s %12s %12s %6.1f%%", name, s.count, avg, s.min, s.max, pct) + } + + b.Logf("\n--- Time Distribution ---") + for _, name := range names { + s := m[name] + pct := float64(s.total) / float64(overallTotal) * 100 + bar := "" + for range int(pct / 2) { + bar += "█" + } + b.Logf("%-40s %5.1f%% %s", name, pct, bar) + } } // waitForReceipt polls for a transaction receipt until it is available. diff --git a/test/e2e/evm_contract_e2e_test.go b/test/e2e/evm_contract_e2e_test.go index b6c988a85a..477b0801be 100644 --- a/test/e2e/evm_contract_e2e_test.go +++ b/test/e2e/evm_contract_e2e_test.go @@ -240,10 +240,10 @@ func TestEvmContractEvents(t *testing.T) { // setupTestSequencer sets up a single sequencer node for testing. // Returns the ethclient, genesis hash, and a cleanup function. -func setupTestSequencer(t testing.TB, homeDir string) (*ethclient.Client, string, func()) { +func setupTestSequencer(t testing.TB, homeDir string, extraArgs ...string) (*ethclient.Client, string, func()) { sut := NewSystemUnderTest(t) - genesisHash, seqEthURL := setupSequencerOnlyTest(t, sut, homeDir) + genesisHash, seqEthURL := setupSequencerOnlyTest(t, sut, homeDir, extraArgs...) t.Logf("Sequencer started at %s (Genesis: %s)", seqEthURL, genesisHash) client, err := ethclient.Dial(seqEthURL) diff --git a/test/e2e/evm_test_common.go b/test/e2e/evm_test_common.go index 85c3abf629..33518097f9 100644 --- a/test/e2e/evm_test_common.go +++ b/test/e2e/evm_test_common.go @@ -313,7 +313,7 @@ func getNodeP2PAddress(t testing.TB, sut *SystemUnderTest, nodeHome string, rpcP // - jwtSecret: JWT secret for authenticating with EVM engine // - genesisHash: Hash of the genesis block for chain validation // - endpoints: TestEndpoints struct containing unique port assignments -func setupSequencerNode(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints) { +func setupSequencerNode(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSecret, genesisHash string, endpoints *TestEndpoints, extraArgs ...string) { t.Helper() // Create passphrase file @@ -350,6 +350,7 @@ func setupSequencerNode(t testing.TB, sut *SystemUnderTest, sequencerHome, jwtSe "--evm.engine-url", endpoints.GetSequencerEngineURL(), "--evm.eth-url", endpoints.GetSequencerEthURL(), } + args = append(args, extraArgs...) sut.ExecCmd(evmSingleBinaryPath, args...) sut.AwaitNodeUp(t, endpoints.GetRollkitRPCAddress(), NodeStartupTimeout) } @@ -613,14 +614,14 @@ func checkBlockInfoAt(t testing.TB, ethURL string, blockHeight *uint64) (common. // - nodeHome: Directory path for sequencer node data // // Returns: genesisHash for the sequencer -func setupSequencerOnlyTest(t testing.TB, sut *SystemUnderTest, nodeHome string) (string, string) { +func setupSequencerOnlyTest(t testing.TB, sut *SystemUnderTest, nodeHome string, extraArgs ...string) (string, string) { t.Helper() // Use common setup (no full node needed) jwtSecret, _, genesisHash, endpoints := setupCommonEVMTest(t, sut, false) // Initialize and start sequencer node - setupSequencerNode(t, sut, nodeHome, jwtSecret, genesisHash, endpoints) + setupSequencerNode(t, sut, nodeHome, jwtSecret, genesisHash, endpoints, extraArgs...) t.Log("Sequencer node is up") return genesisHash, endpoints.GetSequencerEthURL() diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 9ef0dae15b..42d0de4b83 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -15,6 +15,8 @@ require ( github.com/libp2p/go-libp2p v0.47.0 github.com/rs/zerolog v1.34.0 github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.40.0 + go.opentelemetry.io/otel/sdk v1.40.0 google.golang.org/protobuf v1.36.11 ) @@ -288,11 +290,9 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect - go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect - go.opentelemetry.io/otel/sdk v1.40.0 // indirect go.opentelemetry.io/otel/trace v1.40.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/dig v1.19.0 // indirect From bda012c3e43467066cde9e3f55af20c31fc5d350 Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 18 Feb 2026 12:20:25 +0000 Subject: [PATCH 03/11] wip: basic spamoor test running and reporting metrics --- test/e2e/evm_spamoor_e2e_test.go | 250 +++++++++++++++++++++++++++++++ test/e2e/go.mod | 5 +- test/e2e/spamoor_api.go | 144 ++++++++++++++++++ 3 files changed, 397 insertions(+), 2 deletions(-) create mode 100644 test/e2e/evm_spamoor_e2e_test.go create mode 100644 test/e2e/spamoor_api.go diff --git a/test/e2e/evm_spamoor_e2e_test.go b/test/e2e/evm_spamoor_e2e_test.go new file mode 100644 index 0000000000..13e3f34d56 --- /dev/null +++ b/test/e2e/evm_spamoor_e2e_test.go @@ -0,0 +1,250 @@ +//go:build evm + +package e2e + +import ( + "bytes" + "context" + "fmt" + "net/http" + "path/filepath" + "strings" + "testing" + "time" + + tastoradocker "github.com/celestiaorg/tastora/framework/docker" + reth "github.com/celestiaorg/tastora/framework/docker/evstack/reth" + spamoor "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" + "go.uber.org/zap" +) + +// TestSpamoorBasicScenario starts a sequencer and runs a basic spamoor scenario +// against the sequencer's ETH RPC. It also attempts to scrape metrics from a +// configured metrics endpoint if supported by the spamoor binary. +// Optional: metrics via spamoor-daemon. Best-effort and skipped if daemon not available. +func TestSpamoorMetricsViaDaemon(t *testing.T) { + t.Parallel() + + sut := NewSystemUnderTest(t) + workDir := t.TempDir() + + // Prepare dynamic ports for rollkit + DA + endpoints, err := generateTestEndpoints() + if err != nil { + t.Fatalf("failed to generate endpoints: %v", err) + } + + // Start local DA on chosen port + localDABinary := "local-da" + if evmSingleBinaryPath != "evm" { + localDABinary = filepath.Join(filepath.Dir(evmSingleBinaryPath), "local-da") + } + sut.ExecCmd(localDABinary, "-port", endpoints.DAPort) + t.Logf("Started local DA on port %s", endpoints.DAPort) + + // Bring up a Reth node in Docker (same network we'll use for Spamoor) + dockerCli, dockerNetID := tastoradocker.Setup(t) + logger, _ := zap.NewDevelopment() + t.Cleanup(func() { _ = logger.Sync() }) + + ctx := context.Background() + rethNode, err := reth.NewNodeBuilder(t). + WithDockerClient(dockerCli). + WithDockerNetworkID(dockerNetID). + WithLogger(logger). + WithGenesis([]byte(reth.DefaultEvolveGenesisJSON())). + Build(ctx) + if err != nil { + t.Fatalf("failed to build reth node: %v", err) + } + t.Cleanup(func() { _ = rethNode.Remove(context.Background()) }) + if err := rethNode.Start(ctx); err != nil { + t.Fatalf("failed to start reth node: %v", err) + } + + // Gather JWT and genesis hash for sequencer + networkInfo, err := rethNode.GetNetworkInfo(ctx) + if err != nil { + t.Fatalf("failed to get reth network info: %v", err) + } + seqJWT := rethNode.JWTSecretHex() + genesisHash, err := rethNode.GenesisHash(ctx) + if err != nil { + t.Fatalf("failed to get genesis hash: %v", err) + } + + // Ensure Reth RPC is actually responding before starting spamoor + rethRPC := "http://127.0.0.1:" + networkInfo.External.Ports.RPC + requireJSONRPC(t, rethRPC, 30*time.Second) + + // Fill in reth host-mapped ports for the sequencer + endpoints.SequencerEthPort = networkInfo.External.Ports.RPC + endpoints.SequencerEnginePort = networkInfo.External.Ports.Engine + + // Start sequencer node (host process) connected to reth + sequencerHome := filepath.Join(workDir, "sequencer") + setupSequencerNode(t, sut, sequencerHome, seqJWT, genesisHash, endpoints) + t.Log("Sequencer node is up") + + // Run spamoor-daemon via tastora container node using the maintained Docker image + // so that CI only needs Docker, not local binaries. + // Build spamoor container node in the SAME docker network + // It can reach Reth via internal IP:port + internalRPC := "http://" + networkInfo.Internal.IP + ":" + networkInfo.Internal.Ports.RPC + spBuilder := spamoor.NewNodeBuilder(t.Name()). + WithDockerClient(dockerCli). + WithDockerNetworkID(dockerNetID). + WithLogger(logger). + WithRPCHosts(internalRPC). + WithPrivateKey(TestPrivateKey). + WithHostPort(0) + spNode, err := spBuilder.Build(ctx) + if err != nil { + t.Skipf("cannot build spamoor container: %v", err) + return + } + t.Cleanup(func() { _ = spNode.Remove(context.Background()) }) + + if err := spNode.Start(ctx); err != nil { + t.Skipf("cannot start spamoor container: %v", err) + return + } + + // Discover host-mapped ports from Docker + spInfo, err := spNode.GetNetworkInfo(ctx) + if err != nil { + t.Fatalf("failed to get spamoor network info: %v", err) + } + apiAddr := "http://127.0.0.1:" + spInfo.External.Ports.HTTP + metricsAddr := "http://127.0.0.1:" + spInfo.External.Ports.Metrics + // Wait for the daemon HTTP server to accept connections (use HTTP port) + requireHTTP(t, apiAddr+"/api/spammers", 30*time.Second) + api := NewSpamoorAPI(apiAddr) + // Config YAML for eoatx (no 'count' — run for a short window) + // Increase throughput and wallet pool to ensure visible on-chain activity for tx metrics + cfg := strings.Join([]string{ + "throughput: 60", + "max_pending: 1000", + "max_wallets: 200", + "amount: 100", + "random_amount: true", + "random_target: true", + "refill_amount: 1000000000000000000", // 1 ETH + "refill_balance: 500000000000000000", // 0.5 ETH + "refill_interval: 600", + }, "\n") + spammerID, err := api.CreateSpammer("e2e-eoatx", "eoatx", cfg, true) + if err != nil { + t.Fatalf("failed to create/start spammer: %v", err) + } + t.Cleanup(func() { _ = api.DeleteSpammer(spammerID) }) + + // Wait a bit for the spammer to be running + runUntil := time.Now().Add(10 * time.Second) + for time.Now().Before(runUntil) { + s, _ := api.GetSpammer(spammerID) + if s != nil && s.Status == 1 { // running + break + } + time.Sleep(200 * time.Millisecond) + } + // Allow additional time to generate activity for metrics + time.Sleep(20 * time.Second) + + // Dump metrics while/after running from the dedicated metrics port + metricsAPI := NewSpamoorAPI(metricsAddr) + // Poll metrics until spamoor-specific metrics appear or timeout + var m string + metricsDeadline := time.Now().Add(30 * time.Second) + for { + m, err = metricsAPI.GetMetrics() + if err == nil && strings.Contains(m, "spamoor") { + break + } + if time.Now().After(metricsDeadline) { + break + } + time.Sleep(500 * time.Millisecond) + } + if err != nil { + t.Logf("metrics not available: %v", err) + return + } + if len(strings.TrimSpace(m)) == 0 { + t.Log("empty metrics from daemon") + return + } + // Print a short sample of overall metrics + t.Logf("daemon metrics sample:\n%s", firstLines(m, 40)) + // Additionally, extract spamoor-specific metrics if present + var spamoorLines []string + for _, line := range strings.Split(m, "\n") { + if strings.Contains(line, "spamoor") { + spamoorLines = append(spamoorLines, line) + if len(spamoorLines) >= 100 { + break + } + } + } + if len(spamoorLines) > 0 { + t.Logf("spamoor metrics (subset):\n%s", strings.Join(spamoorLines, "\n")) + } else { + t.Fatalf("no spamoor-prefixed metrics found; increase runtime/throughput or verify tx metrics are enabled") + } + + time.Sleep(time.Hour) +} + +// firstLines returns up to n lines of s. +func firstLines(s string, n int) string { + lines := strings.Split(s, "\n") + if len(lines) > n { + lines = lines[:n] + } + return strings.Join(lines, "\n") +} + +// requireHTTP polls a URL until it returns a 200-range response or the timeout expires. +func requireHTTP(t *testing.T, url string, timeout time.Duration) { + t.Helper() + client := &http.Client{Timeout: 200 * time.Millisecond} + deadline := time.Now().Add(timeout) + var lastErr error + for time.Now().Before(deadline) { + resp, err := client.Get(url) + if err == nil { + _ = resp.Body.Close() + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return + } + lastErr = fmt.Errorf("status %d", resp.StatusCode) + } else { + lastErr = err + } + time.Sleep(100 * time.Millisecond) + } + t.Fatalf("daemon not ready at %s: %v", url, lastErr) +} + +// requireJSONRPC checks that the ETH RPC responds to a net_version request. +func requireJSONRPC(t *testing.T, url string, timeout time.Duration) { + t.Helper() + client := &http.Client{Timeout: 400 * time.Millisecond} + deadline := time.Now().Add(timeout) + payload := []byte(`{"jsonrpc":"2.0","method":"net_version","params":[],"id":1}`) + var lastErr error + for time.Now().Before(deadline) { + resp, err := client.Post(url, "application/json", bytes.NewReader(payload)) + if err == nil { + _ = resp.Body.Close() + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return + } + lastErr = fmt.Errorf("status %d", resp.StatusCode) + } else { + lastErr = err + } + time.Sleep(200 * time.Millisecond) + } + t.Fatalf("reth rpc not ready at %s: %v", url, lastErr) +} diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 6baa4a3eaf..db7921db07 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -8,6 +8,7 @@ require ( github.com/celestiaorg/tastora v0.12.0 github.com/cosmos/cosmos-sdk v0.53.6 github.com/cosmos/ibc-go/v8 v8.8.0 + github.com/docker/go-connections v0.5.0 github.com/ethereum/go-ethereum v1.16.8 github.com/evstack/ev-node v1.0.0-rc.3 github.com/evstack/ev-node/execution/evm v0.0.0-20250602130019-2a732cf903a5 @@ -15,6 +16,7 @@ require ( github.com/libp2p/go-libp2p v0.47.0 github.com/rs/zerolog v1.34.0 github.com/stretchr/testify v1.11.1 + go.uber.org/zap v1.27.1 google.golang.org/protobuf v1.36.11 ) @@ -23,6 +25,7 @@ replace ( github.com/evstack/ev-node/core => ../../core github.com/evstack/ev-node/execution/evm => ../../execution/evm github.com/evstack/ev-node/execution/evm/test => ../../execution/evm/test + github.com/celestiaorg/tastora => /Users/chatton/checkouts/celestiaorg/tastora ) require ( @@ -98,7 +101,6 @@ require ( github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/docker v28.5.2+incompatible // indirect - github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dunglas/httpsfv v1.1.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -299,7 +301,6 @@ require ( go.uber.org/fx v1.24.0 // indirect go.uber.org/mock v0.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.1 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/arch v0.17.0 // indirect diff --git a/test/e2e/spamoor_api.go b/test/e2e/spamoor_api.go new file mode 100644 index 0000000000..17d0aac7e7 --- /dev/null +++ b/test/e2e/spamoor_api.go @@ -0,0 +1,144 @@ +//go:build evm + +package e2e + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" +) + +// SpamoorAPI is a thin HTTP client for the spamoor-daemon API +type SpamoorAPI struct { + BaseURL string // e.g., http://127.0.0.1:8080 + client *http.Client +} + +func NewSpamoorAPI(baseURL string) *SpamoorAPI { + return &SpamoorAPI{BaseURL: baseURL, client: &http.Client{Timeout: 2 * time.Second}} +} + +type createSpammerReq struct { + Name string `json:"name"` + Description string `json:"description"` + Scenario string `json:"scenario"` + ConfigYAML string `json:"config"` + StartImmediately bool `json:"startImmediately"` +} + +// CreateSpammer posts a new spammer; returns its ID. +func (api *SpamoorAPI) CreateSpammer(name, scenario, configYAML string, start bool) (int, error) { + reqBody := createSpammerReq{Name: name, Description: name, Scenario: scenario, ConfigYAML: configYAML, StartImmediately: start} + b, _ := json.Marshal(reqBody) + url := fmt.Sprintf("%s/api/spammer", api.BaseURL) + resp, err := api.client.Post(url, "application/json", bytes.NewReader(b)) + if err != nil { + return 0, err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + body, _ := io.ReadAll(resp.Body) + return 0, fmt.Errorf("create spammer failed: %s", string(body)) + } + var id int + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&id); err != nil { + return 0, fmt.Errorf("decode id: %w", err) + } + return id, nil +} + +// ValidateScenarioConfig attempts to create a spammer with the provided scenario/config +// without starting it, and deletes it immediately if creation succeeds. It returns +// a descriptive error when the daemon rejects the config. +func (api *SpamoorAPI) ValidateScenarioConfig(name, scenario, configYAML string) error { + id, err := api.CreateSpammer(name, scenario, configYAML, false) + if err != nil { + return fmt.Errorf("invalid scenario config: %w", err) + } + // Best-effort cleanup of the temporary spammer + _ = api.DeleteSpammer(id) + return nil +} + +// DeleteSpammer deletes an existing spammer by ID. +func (api *SpamoorAPI) DeleteSpammer(id int) error { + url := fmt.Sprintf("%s/api/spammer/%d", api.BaseURL, id) + req, _ := http.NewRequest(http.MethodDelete, url, http.NoBody) + resp, err := api.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("delete spammer failed: %s", string(body)) + } + return nil +} + +// StartSpammer sends a start request for a given spammer ID. +func (api *SpamoorAPI) StartSpammer(id int) error { + url := fmt.Sprintf("%s/api/spammer/%d/start", api.BaseURL, id) + req, _ := http.NewRequest(http.MethodPost, url, http.NoBody) + resp, err := api.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("start spammer failed: %s", string(body)) + } + return nil +} + +// GetMetrics fetches the Prometheus /metrics endpoint from the daemon. +func (api *SpamoorAPI) GetMetrics() (string, error) { + url := fmt.Sprintf("%s/metrics", api.BaseURL) + resp, err := api.client.Get(url) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + body, _ := io.ReadAll(resp.Body) + return "", fmt.Errorf("metrics request failed: %s", string(body)) + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(b), nil +} + +// Spammer represents a spammer resource minimally for status checks. +type Spammer struct { + ID int `json:"id"` + Name string `json:"name"` + Scenario string `json:"scenario"` + Status int `json:"status"` +} + +// GetSpammer retrieves a spammer by ID. +func (api *SpamoorAPI) GetSpammer(id int) (*Spammer, error) { + url := fmt.Sprintf("%s/api/spammer/%d", api.BaseURL, id) + resp, err := api.client.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("get spammer failed: %s", string(body)) + } + var s Spammer + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&s); err != nil { + return nil, err + } + return &s, nil +} From 9c35f6d4d2482de9a268d33495adc720a32de4c9 Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 18 Feb 2026 14:44:06 +0000 Subject: [PATCH 04/11] chore: adding trance benchmark e2e --- .../evm_spamoor_trace_benchmark_e2e_test.go | 186 ++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 test/e2e/evm_spamoor_trace_benchmark_e2e_test.go diff --git a/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go b/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go new file mode 100644 index 0000000000..41b1a2d535 --- /dev/null +++ b/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go @@ -0,0 +1,186 @@ +//go:build evm + +package e2e + +import ( + "context" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" + "time" + + spamoor "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" +) + +// TestSpamoorTraceBenchmark spins up reth (Docker via Tastora), local-da, ev-node (host binary) +// with OTEL tracing exported to an in-process OTLP/HTTP collector, and a Spamoor container +// driving EOA transfers directly to reth's internal ETH RPC. It runs a short load, then +// emits a concise metrics/trace summary and fails if activity or spans are missing. +func TestSpamoorTraceBenchmark(t *testing.T) { + t.Parallel() + + sut := NewSystemUnderTest(t) + + // Common EVM env: reth + local DA. Return JWT, genesis, endpoints, and reth node handle. + seqJWT, _, genesisHash, endpoints, rethNode := setupCommonEVMTest(t, sut, false) + + // In-process OTLP collector to receive ev-node spans. + collector := newOTLPCollector(t) + t.Cleanup(func() { collector.close() }) + + // Start sequencer (ev-node) with tracing enabled to our collector. + sequencerHome := filepath.Join(t.TempDir(), "sequencer") + setupSequencerNode(t, sut, sequencerHome, seqJWT, genesisHash, endpoints, + "--evnode.instrumentation.tracing=true", + "--evnode.instrumentation.tracing_endpoint", collector.endpoint(), + "--evnode.instrumentation.tracing_sample_rate", "1.0", + "--evnode.instrumentation.tracing_service_name", "ev-node-e2e", + ) + t.Log("Sequencer node is up") + + // Launch Spamoor container on the same Docker network; point it at reth INTERNAL ETH RPC. + ni, err := rethNode.GetNetworkInfo(context.Background()) + if err != nil { + t.Fatalf("failed to get reth network info: %v", err) + } + internalRPC := "http://" + ni.Internal.RPCAddress() + + spBuilder := spamoor.NewNodeBuilder(t.Name()). + WithDockerClient(rethNode.DockerClient). + WithDockerNetworkID(rethNode.NetworkID). + WithLogger(rethNode.Logger). + WithRPCHosts(internalRPC). + WithPrivateKey(TestPrivateKey). + WithHostPort(0) + + ctx := context.Background() + spNode, err := spBuilder.Build(ctx) + if err != nil { + t.Skipf("cannot build spamoor container: %v", err) + return + } + t.Cleanup(func() { _ = spNode.Remove(context.Background()) }) + if err := spNode.Start(ctx); err != nil { + t.Skipf("cannot start spamoor container: %v", err) + return + } + + // Discover host-mapped ports and wait for daemon readiness. + spInfo, err := spNode.GetNetworkInfo(ctx) + if err != nil { + t.Fatalf("failed to get spamoor network info: %v", err) + } + apiAddr := "http://127.0.0.1:" + spInfo.External.Ports.HTTP + metricsAddr := "http://127.0.0.1:" + spInfo.External.Ports.Metrics + requireHTTP(t, apiAddr+"/api/spammers", 30*time.Second) + api := NewSpamoorAPI(apiAddr) + + // Configure spam: decent throughput to populate blocks and metrics. + // Run as a daemon-managed spammer that starts immediately. + cfg := strings.Join([]string{ + "throughput: 80", + "total_count: 3000", + "max_pending: 4000", + "max_wallets: 300", + "amount: 100", + "random_amount: true", + "random_target: true", + "refill_amount: 1000000000000000000", // 1 ETH + "refill_balance: 500000000000000000", // 0.5 ETH + "refill_interval: 600", + }, "\n") + spammerID, err := api.CreateSpammer("benchmark-eoatx", "eoatx", cfg, true) + if err != nil { + t.Fatalf("failed to create/start spammer: %v", err) + } + t.Cleanup(func() { _ = api.DeleteSpammer(spammerID) }) + + // Wait for running status briefly, then let it run to generate load. + runUntil := time.Now().Add(10 * time.Second) + for time.Now().Before(runUntil) { + s, _ := api.GetSpammer(spammerID) + if s != nil && s.Status == 1 { + break + } + time.Sleep(200 * time.Millisecond) + } + + // Let the load run for ~30–45s to stabilize block production and metrics, + // but also poll metrics so we can break early once activity is observed. + metricsAPI := NewSpamoorAPI(metricsAddr) + var metricsText string + deadline := time.Now().Add(45 * time.Second) + for { + if time.Now().After(deadline) { + break + } + m, err := metricsAPI.GetMetrics() + if err == nil && strings.Contains(m, "spamoor_transactions_sent_total") { + metricsText = m + if scrapeCounter(m, `^spamoor_transactions_sent_total\{.*\}\s+(\\d+(?:\\.\\d+)?)`) > 0 { + break + } + } + time.Sleep(500 * time.Millisecond) + } + + // If we didn't capture metrics during the loop, fetch once now. + if metricsText == "" { + var err error + metricsText, err = metricsAPI.GetMetrics() + if err != nil { + t.Fatalf("failed to fetch spamoor metrics: %v", err) + } + } + + sentTotal := scrapeCounter(metricsText, `^spamoor_transactions_sent_total\{.*\}\s+(\d+(?:\.\d+)?)`) + failures := scrapeCounter(metricsText, `^spamoor_transactions_failed_total\{.*\}\s+(\d+(?:\.\d+)?)`) + pending := scrapeGauge(metricsText, `^spamoor_pending_transactions\{.*\}\s+(\d+(?:\.\d+)?)`) + blockGas := scrapeCounter(metricsText, `^spamoor_block_gas_usage\{.*\}\s+(\d+(?:\.\d+)?)`) + + t.Logf("Spamoor summary: sent=%.0f failed=%.0f pending=%.0f block_gas=%.0f", sentTotal, failures, pending, blockGas) + + if sentTotal < 5 { + t.Fatalf("insufficient on-chain activity: spamoor_transactions_sent_total=%.0f", sentTotal) + } + + // Give ev-node a moment to flush pending span batches, then aggregate spans. + time.Sleep(2 * time.Second) + spans := collector.getSpans() + if len(spans) == 0 { + t.Fatalf("no ev-node spans recorded by OTLP collector") + } + printCollectedTraceReport(t, collector) + + // Optional: sanity-check reth RPC responsiveness (debug tracing optional and best-effort). + // If debug API is enabled in the reth image, a recent tx could be traced here. + // We only check that RPC endpoint responds to a simple request to catch regressions. + requireJSONRPC(t, endpoints.GetSequencerEthURL(), 10*time.Second) +} + +// --- small helpers to scrape Prometheus text --- + +func scrapeCounter(metrics, pattern string) float64 { + return scrapeFloat(metrics, pattern) +} + +func scrapeGauge(metrics, pattern string) float64 { + return scrapeFloat(metrics, pattern) +} + +func scrapeFloat(metrics, pattern string) float64 { + re := regexp.MustCompile(pattern) + lines := strings.Split(metrics, "\n") + for _, line := range lines { + if m := re.FindStringSubmatch(line); len(m) == 2 { + if v, err := strconv.ParseFloat(m[1], 64); err == nil { + return v + } + } + } + return 0 +} + + From 3dfd7de61e9b3c26b385b4d387c0875ee137eeec Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 18 Feb 2026 15:20:45 +0000 Subject: [PATCH 05/11] wip: experimenting with gas burner tx --- .../evm_spamoor_trace_benchmark_e2e_test.go | 519 ++++++++++++------ 1 file changed, 358 insertions(+), 161 deletions(-) diff --git a/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go b/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go index 41b1a2d535..4e34c6145a 100644 --- a/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go +++ b/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go @@ -3,15 +3,17 @@ package e2e import ( - "context" - "path/filepath" - "regexp" - "strconv" - "strings" - "testing" - "time" - - spamoor "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" + "context" + "math/big" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" + "time" + + spamoor "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" + "github.com/ethereum/go-ethereum/ethclient" ) // TestSpamoorTraceBenchmark spins up reth (Docker via Tastora), local-da, ev-node (host binary) @@ -19,168 +21,363 @@ import ( // driving EOA transfers directly to reth's internal ETH RPC. It runs a short load, then // emits a concise metrics/trace summary and fails if activity or spans are missing. func TestSpamoorTraceBenchmark(t *testing.T) { - t.Parallel() - - sut := NewSystemUnderTest(t) - - // Common EVM env: reth + local DA. Return JWT, genesis, endpoints, and reth node handle. - seqJWT, _, genesisHash, endpoints, rethNode := setupCommonEVMTest(t, sut, false) - - // In-process OTLP collector to receive ev-node spans. - collector := newOTLPCollector(t) - t.Cleanup(func() { collector.close() }) - - // Start sequencer (ev-node) with tracing enabled to our collector. - sequencerHome := filepath.Join(t.TempDir(), "sequencer") - setupSequencerNode(t, sut, sequencerHome, seqJWT, genesisHash, endpoints, - "--evnode.instrumentation.tracing=true", - "--evnode.instrumentation.tracing_endpoint", collector.endpoint(), - "--evnode.instrumentation.tracing_sample_rate", "1.0", - "--evnode.instrumentation.tracing_service_name", "ev-node-e2e", - ) - t.Log("Sequencer node is up") - - // Launch Spamoor container on the same Docker network; point it at reth INTERNAL ETH RPC. - ni, err := rethNode.GetNetworkInfo(context.Background()) - if err != nil { - t.Fatalf("failed to get reth network info: %v", err) - } - internalRPC := "http://" + ni.Internal.RPCAddress() - - spBuilder := spamoor.NewNodeBuilder(t.Name()). - WithDockerClient(rethNode.DockerClient). - WithDockerNetworkID(rethNode.NetworkID). - WithLogger(rethNode.Logger). - WithRPCHosts(internalRPC). - WithPrivateKey(TestPrivateKey). - WithHostPort(0) - - ctx := context.Background() - spNode, err := spBuilder.Build(ctx) - if err != nil { - t.Skipf("cannot build spamoor container: %v", err) - return - } - t.Cleanup(func() { _ = spNode.Remove(context.Background()) }) - if err := spNode.Start(ctx); err != nil { - t.Skipf("cannot start spamoor container: %v", err) - return - } - - // Discover host-mapped ports and wait for daemon readiness. - spInfo, err := spNode.GetNetworkInfo(ctx) - if err != nil { - t.Fatalf("failed to get spamoor network info: %v", err) - } - apiAddr := "http://127.0.0.1:" + spInfo.External.Ports.HTTP - metricsAddr := "http://127.0.0.1:" + spInfo.External.Ports.Metrics - requireHTTP(t, apiAddr+"/api/spammers", 30*time.Second) - api := NewSpamoorAPI(apiAddr) - - // Configure spam: decent throughput to populate blocks and metrics. - // Run as a daemon-managed spammer that starts immediately. - cfg := strings.Join([]string{ - "throughput: 80", - "total_count: 3000", - "max_pending: 4000", - "max_wallets: 300", - "amount: 100", - "random_amount: true", - "random_target: true", - "refill_amount: 1000000000000000000", // 1 ETH - "refill_balance: 500000000000000000", // 0.5 ETH - "refill_interval: 600", - }, "\n") - spammerID, err := api.CreateSpammer("benchmark-eoatx", "eoatx", cfg, true) - if err != nil { - t.Fatalf("failed to create/start spammer: %v", err) - } - t.Cleanup(func() { _ = api.DeleteSpammer(spammerID) }) - - // Wait for running status briefly, then let it run to generate load. - runUntil := time.Now().Add(10 * time.Second) - for time.Now().Before(runUntil) { - s, _ := api.GetSpammer(spammerID) - if s != nil && s.Status == 1 { - break - } - time.Sleep(200 * time.Millisecond) - } - - // Let the load run for ~30–45s to stabilize block production and metrics, - // but also poll metrics so we can break early once activity is observed. - metricsAPI := NewSpamoorAPI(metricsAddr) - var metricsText string - deadline := time.Now().Add(45 * time.Second) - for { - if time.Now().After(deadline) { - break - } - m, err := metricsAPI.GetMetrics() - if err == nil && strings.Contains(m, "spamoor_transactions_sent_total") { - metricsText = m - if scrapeCounter(m, `^spamoor_transactions_sent_total\{.*\}\s+(\\d+(?:\\.\\d+)?)`) > 0 { - break - } - } - time.Sleep(500 * time.Millisecond) - } - - // If we didn't capture metrics during the loop, fetch once now. - if metricsText == "" { - var err error - metricsText, err = metricsAPI.GetMetrics() - if err != nil { - t.Fatalf("failed to fetch spamoor metrics: %v", err) - } - } - - sentTotal := scrapeCounter(metricsText, `^spamoor_transactions_sent_total\{.*\}\s+(\d+(?:\.\d+)?)`) - failures := scrapeCounter(metricsText, `^spamoor_transactions_failed_total\{.*\}\s+(\d+(?:\.\d+)?)`) - pending := scrapeGauge(metricsText, `^spamoor_pending_transactions\{.*\}\s+(\d+(?:\.\d+)?)`) - blockGas := scrapeCounter(metricsText, `^spamoor_block_gas_usage\{.*\}\s+(\d+(?:\.\d+)?)`) - - t.Logf("Spamoor summary: sent=%.0f failed=%.0f pending=%.0f block_gas=%.0f", sentTotal, failures, pending, blockGas) - - if sentTotal < 5 { - t.Fatalf("insufficient on-chain activity: spamoor_transactions_sent_total=%.0f", sentTotal) - } - - // Give ev-node a moment to flush pending span batches, then aggregate spans. - time.Sleep(2 * time.Second) - spans := collector.getSpans() - if len(spans) == 0 { - t.Fatalf("no ev-node spans recorded by OTLP collector") - } - printCollectedTraceReport(t, collector) - - // Optional: sanity-check reth RPC responsiveness (debug tracing optional and best-effort). - // If debug API is enabled in the reth image, a recent tx could be traced here. - // We only check that RPC endpoint responds to a simple request to catch regressions. - requireJSONRPC(t, endpoints.GetSequencerEthURL(), 10*time.Second) + t.Parallel() + + sut := NewSystemUnderTest(t) + + // Common EVM env: reth + local DA. Return JWT, genesis, endpoints, and reth node handle. + seqJWT, _, genesisHash, endpoints, rethNode := setupCommonEVMTest(t, sut, false) + + // In-process OTLP collector to receive ev-node spans. + collector := newOTLPCollector(t) + t.Cleanup(func() { collector.close() }) + + // Start sequencer (ev-node) with tracing enabled to our collector. + sequencerHome := filepath.Join(t.TempDir(), "sequencer") + setupSequencerNode(t, sut, sequencerHome, seqJWT, genesisHash, endpoints, + "--evnode.instrumentation.tracing=true", + "--evnode.instrumentation.tracing_endpoint", collector.endpoint(), + "--evnode.instrumentation.tracing_sample_rate", "1.0", + "--evnode.instrumentation.tracing_service_name", "ev-node-e2e", + ) + t.Log("Sequencer node is up") + + // Launch Spamoor container on the same Docker network; point it at reth INTERNAL ETH RPC. + ni, err := rethNode.GetNetworkInfo(context.Background()) + if err != nil { + t.Fatalf("failed to get reth network info: %v", err) + } + internalRPC := "http://" + ni.Internal.RPCAddress() + + spBuilder := spamoor.NewNodeBuilder(t.Name()). + WithDockerClient(rethNode.DockerClient). + WithDockerNetworkID(rethNode.NetworkID). + WithLogger(rethNode.Logger). + WithRPCHosts(internalRPC). + WithPrivateKey(TestPrivateKey). + WithHostPort(0) + + ctx := context.Background() + spNode, err := spBuilder.Build(ctx) + if err != nil { + t.Skipf("cannot build spamoor container: %v", err) + return + } + t.Cleanup(func() { _ = spNode.Remove(context.Background()) }) + if err := spNode.Start(ctx); err != nil { + t.Skipf("cannot start spamoor container: %v", err) + return + } + + // Discover host-mapped ports and wait for daemon readiness. + spInfo, err := spNode.GetNetworkInfo(ctx) + if err != nil { + t.Fatalf("failed to get spamoor network info: %v", err) + } + apiAddr := "http://127.0.0.1:" + spInfo.External.Ports.HTTP + metricsAddr := "http://127.0.0.1:" + spInfo.External.Ports.Metrics + requireHTTP(t, apiAddr+"/api/spammers", 30*time.Second) + api := NewSpamoorAPI(apiAddr) + + // Configure multiple concurrent spammers to try and saturate blocks. + // Each spammer runs an EOA transfer scenario; collectively we aim to push block gas towards the limit. + baseCfg := func(tp int) string { + return strings.Join([]string{ + "throughput: " + strconv.Itoa(tp), + "total_count: 5000", + "max_pending: 8000", + "max_wallets: 500", + "amount: 100", + "random_amount: true", + "random_target: true", + "base_fee: 20", // gwei + "tip_fee: 2", // gwei + "refill_amount: 1000000000000000000", // 1 ETH + "refill_balance: 500000000000000000", // 0.5 ETH + "refill_interval: 600", + }, "\n") + } + + // Spin up 3 spammers at 150 tps each (approx; Spamoor maintains a target rate), total target ~450 tps. + spammerIDs := make([]int, 0, 3) + for i := 0; i < 3; i++ { + cfg := baseCfg(150) + id, err := api.CreateSpammer("benchmark-eoatx-"+strconv.Itoa(i), "eoatx", cfg, true) + if err != nil { + t.Fatalf("failed to create/start spammer %d: %v", i, err) + } + spammerIDs = append(spammerIDs, id) + idx := i + t.Cleanup(func() { _ = api.DeleteSpammer(spammerIDs[idx]) }) + } + + // Add 1-2 heavier-gas spammers using known Spamoor scenarios. We validate each config + // before starting to avoid failures on unsupported images. + // Prefer gasburnertx spammers to maximize gas usage per transaction. + gasBurnerCfg := strings.Join([]string{ + "throughput: 40", + "total_count: 3000", + "max_pending: 8000", + "max_wallets: 200", + "refill_amount: 1000000000000000000", // 1 ETH + "refill_balance: 500000000000000000", // 0.5 ETH + "refill_interval: 600", + "base_fee: 20", // gwei + "tip_fee: 2", // gwei + }, "\n") + for i := 0; i < 2; i++ { + name := "benchmark-gasburner-" + strconv.Itoa(i) + if err := api.ValidateScenarioConfig(name+"-probe", "gasburnertx", gasBurnerCfg); err != nil { + t.Logf("gasburnertx not supported or invalid config: %v", err) + break + } + id, err := api.CreateSpammer(name, "gasburnertx", gasBurnerCfg, true) + if err != nil { + t.Logf("failed to start gasburnertx: %v", err) + break + } + spammerIDs = append(spammerIDs, id) + idx := len(spammerIDs) - 1 + t.Cleanup(func() { _ = api.DeleteSpammer(spammerIDs[idx]) }) + } + + // Wait for any spammer to report running, then proceed. + runUntil := time.Now().Add(15 * time.Second) + for time.Now().Before(runUntil) { + ok := false + for _, id := range spammerIDs { + s, _ := api.GetSpammer(id) + if s != nil && s.Status == 1 { + ok = true + break + } + } + if ok { + break + } + time.Sleep(200 * time.Millisecond) + } + + // Let the initial load run for ~30–45s to stabilize block production and metrics, + // but also poll metrics so we can break early once activity is observed. + metricsAPI := NewSpamoorAPI(metricsAddr) + var metricsText string + deadline := time.Now().Add(45 * time.Second) + for { + if time.Now().After(deadline) { + break + } + m, err := metricsAPI.GetMetrics() + if err == nil && strings.Contains(m, "spamoor_transactions_sent_total") { + metricsText = m + if scrapeCounter(m, `^spamoor_transactions_sent_total\{.*\}\s+(\\d+(?:\\.\\d+)?)`) > 0 { + break + } + } + time.Sleep(500 * time.Millisecond) + } + + // If we didn't capture metrics during the loop, fetch once now (best-effort). + if metricsText == "" { + if m, err := metricsAPI.GetMetrics(); err == nil { + metricsText = m + } else { + t.Logf("metrics unavailable (continuing without them): %v", err) + } + } + + // Try to saturate block gas: dynamically ramp up spammers until utilization is high or we hit a safe cap. + if ec, err := ethclient.Dial(endpoints.GetSequencerEthURL()); err == nil { + defer ec.Close() + rampDeadline := time.Now().Add(60 * time.Second) + for len(spammerIDs) < 8 && time.Now().Before(rampDeadline) { + avg, peak := sampleGasUtilization(t, ec, 5*time.Second) + t.Logf("Ramp check: gas utilization avg=%.1f%% peak=%.1f%% spammers=%d", avg*100, peak*100, len(spammerIDs)) + if peak >= 0.9 { // good enough + break + } + // Add another spammer at a higher target rate to push harder. + cfg := baseCfg(250) + id, err := api.CreateSpammer("benchmark-eoatx-ramp-"+strconv.Itoa(len(spammerIDs)), "eoatx", cfg, true) + if err != nil { + t.Logf("failed to add ramp spammer: %v", err) + break + } + spammerIDs = append(spammerIDs, id) + idx := len(spammerIDs) - 1 + t.Cleanup(func() { _ = api.DeleteSpammer(spammerIDs[idx]) }) + // give it a moment to start before next measurement + time.Sleep(3 * time.Second) + } + } + + if metricsText != "" { + sentTotal := scrapeCounter(metricsText, `^spamoor_transactions_sent_total\{.*\}\s+(\d+(?:\.\d+)?)`) + failures := scrapeCounter(metricsText, `^spamoor_transactions_failed_total\{.*\}\s+(\d+(?:\.\d+)?)`) + pending := scrapeGauge(metricsText, `^spamoor_pending_transactions\{.*\}\s+(\d+(?:\.\d+)?)`) + blockGas := scrapeCounter(metricsText, `^spamoor_block_gas_usage\{.*\}\s+(\d+(?:\.\d+)?)`) + t.Logf("Spamoor summary: sent=%.0f failed=%.0f pending=%.0f block_gas=%.0f", sentTotal, failures, pending, blockGas) + if sentTotal < 5 { + t.Logf("warning: low spamoor sent count (%.0f)", sentTotal) + } + } else { + t.Log("Spamoor metrics unavailable; will validate activity via block headers and spans") + } + + // Give ev-node a moment to flush pending span batches, then aggregate spans. + time.Sleep(2 * time.Second) + spans := collector.getSpans() + if len(spans) == 0 { + t.Fatalf("no ev-node spans recorded by OTLP collector") + } + printCollectedTraceReport(t, collector) + + // Optional: sanity-check reth RPC responsiveness (debug tracing optional and best-effort). + // If debug API is enabled in the reth image, a recent tx could be traced here. + // We only check that RPC endpoint responds to a simple request to catch regressions. + requireJSONRPC(t, endpoints.GetSequencerEthURL(), 10*time.Second) + + // Report observed block gas utilization over a short window. + // Query latest headers to compute peak and average gas usage fraction. + if ec, err := ethclient.Dial(endpoints.GetSequencerEthURL()); err == nil { + defer ec.Close() + avg, peak := sampleGasUtilization(t, ec, 10*time.Second) + // Also compute a normalized utilization vs a 30M gas cap for readability when chain gasLimit is huge. + navg, npeak := sampleNormalizedUtilization(t, ec, 10*time.Second, 30_000_000) + t.Logf("Block gas utilization: raw avg=%.3f%% peak=%.3f%% | normalized(30M) avg=%.1f%% peak=%.1f%%", avg*100, peak*100, navg*100, npeak*100) + if avg < 0.05 && peak < 0.1 { // very low; dump recent block gas details for debugging + debugLogRecentBlockGas(t, ec, 8) + } + // If metrics were unavailable earlier, ensure on-chain activity by checking recent tx counts. + // Fail if we observe zero transactions in the last ~8 blocks. + if metricsText == "" { + header, herr := ec.HeaderByNumber(context.Background(), nil) + if herr == nil && header != nil { + var anyTx bool + latest := new(big.Int).Set(header.Number) + min := new(big.Int).Sub(latest, big.NewInt(int64(7))) + if min.Sign() < 0 { + min.SetInt64(0) + } + for b := new(big.Int).Set(latest); b.Cmp(min) >= 0; b.Sub(b, big.NewInt(1)) { + blk, e := ec.BlockByNumber(context.Background(), b) + if e == nil && blk != nil && len(blk.Transactions()) > 0 { + anyTx = true + break + } + } + if !anyTx { + t.Fatalf("no on-chain activity observed in recent blocks and metrics unavailable") + } + } + } + } } // --- small helpers to scrape Prometheus text --- func scrapeCounter(metrics, pattern string) float64 { - return scrapeFloat(metrics, pattern) + return scrapeFloat(metrics, pattern) } func scrapeGauge(metrics, pattern string) float64 { - return scrapeFloat(metrics, pattern) + return scrapeFloat(metrics, pattern) } func scrapeFloat(metrics, pattern string) float64 { - re := regexp.MustCompile(pattern) - lines := strings.Split(metrics, "\n") - for _, line := range lines { - if m := re.FindStringSubmatch(line); len(m) == 2 { - if v, err := strconv.ParseFloat(m[1], 64); err == nil { - return v - } - } - } - return 0 + re := regexp.MustCompile(pattern) + lines := strings.Split(metrics, "\n") + for _, line := range lines { + if m := re.FindStringSubmatch(line); len(m) == 2 { + if v, err := strconv.ParseFloat(m[1], 64); err == nil { + return v + } + } + } + return 0 +} + +// sampleGasUtilization samples latest blocks for the given duration and returns +// average and peak gas used fraction. +func sampleGasUtilization(t *testing.T, ec *ethclient.Client, dur time.Duration) (avg, peak float64) { + t.Helper() + deadline := time.Now().Add(dur) + var sum float64 + var n int + for time.Now().Before(deadline) { + header, err := ec.HeaderByNumber(context.Background(), nil) + if err == nil && header != nil && header.GasLimit > 0 { + frac := float64(header.GasUsed) / float64(header.GasLimit) + sum += frac + n++ + if frac > peak { + peak = frac + } + } + time.Sleep(300 * time.Millisecond) + } + if n > 0 { + avg = sum / float64(n) + } + return } - +// sampleNormalizedUtilization computes utilization relative to a targetGasLimit rather than the chain's gasLimit. +func sampleNormalizedUtilization(t *testing.T, ec *ethclient.Client, dur time.Duration, targetGasLimit uint64) (avg, peak float64) { + t.Helper() + if targetGasLimit == 0 { + return 0, 0 + } + deadline := time.Now().Add(dur) + var sum float64 + var n int + for time.Now().Before(deadline) { + header, err := ec.HeaderByNumber(context.Background(), nil) + if err == nil && header != nil { + frac := float64(header.GasUsed) / float64(targetGasLimit) + if frac > 1 { + frac = 1 // cap at 100% + } + sum += frac + n++ + if frac > peak { + peak = frac + } + } + time.Sleep(300 * time.Millisecond) + } + if n > 0 { + avg = sum / float64(n) + } + return +} + +// debugLogRecentBlockGas logs gasUsed/gasLimit and tx count for the most recent n blocks. +func debugLogRecentBlockGas(t *testing.T, ec *ethclient.Client, n int) { + t.Helper() + // Get latest number + header, err := ec.HeaderByNumber(context.Background(), nil) + if err != nil || header == nil { + t.Logf("debug: failed to fetch latest header: %v", err) + return + } + latest := new(big.Int).Set(header.Number) + min := new(big.Int).Sub(latest, big.NewInt(int64(n-1))) + if min.Sign() < 0 { + min.SetInt64(0) + } + t.Logf("debug: recent block gas (number gasUsed/gasLimit txs)") + for b := new(big.Int).Set(latest); b.Cmp(min) >= 0; b.Sub(b, big.NewInt(1)) { + h, err := ec.HeaderByNumber(context.Background(), b) + if err != nil || h == nil { + t.Logf("debug: block %s header error: %v", b.String(), err) + continue + } + blk, err := ec.BlockByNumber(context.Background(), b) + txc := 0 + if err == nil && blk != nil { + txc = len(blk.Transactions()) + } + t.Logf("debug: %s %d/%d txs=%d", h.Number.String(), h.GasUsed, h.GasLimit, txc) + } +} From 269be560a40097e24006eefdfea8d4f5a3faca93 Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 18 Feb 2026 16:04:54 +0000 Subject: [PATCH 06/11] wip --- .../evm_spamoor_trace_benchmark_e2e_test.go | 185 ++++++++++-------- 1 file changed, 108 insertions(+), 77 deletions(-) diff --git a/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go b/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go index 4e34c6145a..8a17b2067f 100644 --- a/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go +++ b/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go @@ -34,12 +34,15 @@ func TestSpamoorTraceBenchmark(t *testing.T) { // Start sequencer (ev-node) with tracing enabled to our collector. sequencerHome := filepath.Join(t.TempDir(), "sequencer") - setupSequencerNode(t, sut, sequencerHome, seqJWT, genesisHash, endpoints, - "--evnode.instrumentation.tracing=true", - "--evnode.instrumentation.tracing_endpoint", collector.endpoint(), - "--evnode.instrumentation.tracing_sample_rate", "1.0", - "--evnode.instrumentation.tracing_service_name", "ev-node-e2e", - ) + // Target: ~1 Ggas/s with 100ms blocks => ~100M gas per block. + // Use tight block_time without lazy mode to enforce cadence. + setupSequencerNode(t, sut, sequencerHome, seqJWT, genesisHash, endpoints, + "--evnode.instrumentation.tracing=true", + "--evnode.instrumentation.tracing_endpoint", collector.endpoint(), + "--evnode.instrumentation.tracing_sample_rate", "1.0", + "--evnode.instrumentation.tracing_service_name", "ev-node-e2e", + "--evnode.node.block_time", "100ms", + ) t.Log("Sequencer node is up") // Launch Spamoor container on the same Docker network; point it at reth INTERNAL ETH RPC. @@ -79,58 +82,34 @@ func TestSpamoorTraceBenchmark(t *testing.T) { requireHTTP(t, apiAddr+"/api/spammers", 30*time.Second) api := NewSpamoorAPI(apiAddr) - // Configure multiple concurrent spammers to try and saturate blocks. - // Each spammer runs an EOA transfer scenario; collectively we aim to push block gas towards the limit. - baseCfg := func(tp int) string { - return strings.Join([]string{ - "throughput: " + strconv.Itoa(tp), - "total_count: 5000", - "max_pending: 8000", - "max_wallets: 500", - "amount: 100", - "random_amount: true", - "random_target: true", - "base_fee: 20", // gwei - "tip_fee: 2", // gwei - "refill_amount: 1000000000000000000", // 1 ETH - "refill_balance: 500000000000000000", // 0.5 ETH - "refill_interval: 600", - }, "\n") - } + // Gasburner-only configuration below; eoatx disabled for this ceiling test. - // Spin up 3 spammers at 150 tps each (approx; Spamoor maintains a target rate), total target ~450 tps. - spammerIDs := make([]int, 0, 3) - for i := 0; i < 3; i++ { - cfg := baseCfg(150) - id, err := api.CreateSpammer("benchmark-eoatx-"+strconv.Itoa(i), "eoatx", cfg, true) - if err != nil { - t.Fatalf("failed to create/start spammer %d: %v", i, err) - } - spammerIDs = append(spammerIDs, id) - idx := i - t.Cleanup(func() { _ = api.DeleteSpammer(spammerIDs[idx]) }) - } + // Start with gasburner-only to maximize gas per block and simplify inclusion checks. + // Aim for ~100M gas per 100ms block: set 10M gas/tx and target ~10 tx/block total across spammers. + spammerIDs := make([]int, 0, 4) // Add 1-2 heavier-gas spammers using known Spamoor scenarios. We validate each config // before starting to avoid failures on unsupported images. // Prefer gasburnertx spammers to maximize gas usage per transaction. - gasBurnerCfg := strings.Join([]string{ - "throughput: 40", - "total_count: 3000", - "max_pending: 8000", - "max_wallets: 200", - "refill_amount: 1000000000000000000", // 1 ETH - "refill_balance: 500000000000000000", // 0.5 ETH - "refill_interval: 600", - "base_fee: 20", // gwei - "tip_fee: 2", // gwei - }, "\n") - for i := 0; i < 2; i++ { - name := "benchmark-gasburner-" + strconv.Itoa(i) - if err := api.ValidateScenarioConfig(name+"-probe", "gasburnertx", gasBurnerCfg); err != nil { - t.Logf("gasburnertx not supported or invalid config: %v", err) - break - } + gasBurnerCfg := strings.Join([]string{ + "throughput: 200", // aggressive per-spammer rate (per-second), drives continuous backlog + "total_count: 20000", + "max_pending: 50000", + "max_wallets: 1000", + "gas_units_to_burn: 10000000", // 10M gas/tx + "refill_amount: 5000000000000000000", // 5 ETH + "refill_balance: 2000000000000000000", // 2 ETH + "refill_interval: 300", + "base_fee: 20", // gwei (reduce to avoid fee-cap rejection) + "tip_fee: 5", // gwei + "rebroadcast: 2", + }, "\n") + for i := 0; i < 14; i++ { // start with 14 spammers to create a stronger continuous backlog + name := "benchmark-gasburner-" + strconv.Itoa(i) + if err := api.ValidateScenarioConfig(name+"-probe", "gasburnertx", gasBurnerCfg); err != nil { + t.Logf("gasburnertx not supported or invalid config: %v", err) + break + } id, err := api.CreateSpammer(name, "gasburnertx", gasBurnerCfg, true) if err != nil { t.Logf("failed to start gasburnertx: %v", err) @@ -186,30 +165,29 @@ func TestSpamoorTraceBenchmark(t *testing.T) { } } - // Try to saturate block gas: dynamically ramp up spammers until utilization is high or we hit a safe cap. - if ec, err := ethclient.Dial(endpoints.GetSequencerEthURL()); err == nil { - defer ec.Close() - rampDeadline := time.Now().Add(60 * time.Second) - for len(spammerIDs) < 8 && time.Now().Before(rampDeadline) { - avg, peak := sampleGasUtilization(t, ec, 5*time.Second) - t.Logf("Ramp check: gas utilization avg=%.1f%% peak=%.1f%% spammers=%d", avg*100, peak*100, len(spammerIDs)) - if peak >= 0.9 { // good enough - break - } - // Add another spammer at a higher target rate to push harder. - cfg := baseCfg(250) - id, err := api.CreateSpammer("benchmark-eoatx-ramp-"+strconv.Itoa(len(spammerIDs)), "eoatx", cfg, true) - if err != nil { - t.Logf("failed to add ramp spammer: %v", err) - break - } - spammerIDs = append(spammerIDs, id) - idx := len(spammerIDs) - 1 - t.Cleanup(func() { _ = api.DeleteSpammer(spammerIDs[idx]) }) - // give it a moment to start before next measurement - time.Sleep(3 * time.Second) - } - } + // Try to saturate block gas: dynamically ramp up gasburner spammers until utilization approaches ~100M/block or we hit a safe cap. + if ec, err := ethclient.Dial(endpoints.GetSequencerEthURL()); err == nil { + defer ec.Close() + rampDeadline := time.Now().Add(60 * time.Second) + for len(spammerIDs) < 20 && time.Now().Before(rampDeadline) { // ramp up further if still underutilized + avg, peak := sampleGasUtilization(t, ec, 5*time.Second) + t.Logf("Ramp check: gas utilization avg=%.1f%% peak=%.1f%% spammers=%d", avg*100, peak*100, len(spammerIDs)) + if peak >= 0.9 { // good enough + break + } + // Add another gasburner spammer to push harder. + id, err := api.CreateSpammer("benchmark-gasburner-ramp-"+strconv.Itoa(len(spammerIDs)), "gasburnertx", gasBurnerCfg, true) + if err != nil { + t.Logf("failed to add ramp spammer: %v", err) + break + } + spammerIDs = append(spammerIDs, id) + idx := len(spammerIDs) - 1 + t.Cleanup(func() { _ = api.DeleteSpammer(spammerIDs[idx]) }) + // give it a moment to start before next measurement + time.Sleep(3 * time.Second) + } + } if metricsText != "" { sentTotal := scrapeCounter(metricsText, `^spamoor_transactions_sent_total\{.*\}\s+(\d+(?:\.\d+)?)`) @@ -248,6 +226,25 @@ func TestSpamoorTraceBenchmark(t *testing.T) { if avg < 0.05 && peak < 0.1 { // very low; dump recent block gas details for debugging debugLogRecentBlockGas(t, ec, 8) } + // Additionally compute aggregate gas/sec over a recent time window (by timestamps), + // which is robust even if the chain has a long history. + gsec, gpb, btMs := computeGasThroughputWindow(t, ec, 60) // last ~60s + t.Logf("Throughput window: avg_gas_block=%.0f avg_block_time=%.1fms gas_sec=%.0f", gpb, btMs, gsec) + // Report simple target status for 1 Ggas/s @ 100ms cadence. + const ( + targetGasSec = 1_000_000_000.0 + minGasSec = 0.9 * targetGasSec // 900M gas/sec + minBtMs = 80.0 // acceptable block time window + maxBtMs = 150.0 + minGasPerBlock = 90_000_000.0 // ~100M +/- 10% + maxGasPerBlock = 110_000_000.0 + ) + hit := gsec >= minGasSec && btMs >= minBtMs && btMs <= maxBtMs && gpb >= minGasPerBlock && gpb <= maxGasPerBlock + if hit { + t.Logf("Target 1Ggas/s@100ms: PASS (gas_sec=%.0f, avg_bt=%.1fms, gas_block=%.0f)", gsec, btMs, gpb) + } else { + t.Logf("Target 1Ggas/s@100ms: NOT MET (gas_sec=%.0f, avg_bt=%.1fms, gas_block=%.0f). Thresholds: gas_sec>=900,000,000; 80ms<=avg_bt<=150ms; 90M<=gas_block<=110M.", gsec, btMs, gpb) + } // If metrics were unavailable earlier, ensure on-chain activity by checking recent tx counts. // Fail if we observe zero transactions in the last ~8 blocks. if metricsText == "" { @@ -381,3 +378,37 @@ func debugLogRecentBlockGas(t *testing.T, ec *ethclient.Client, n int) { t.Logf("debug: %s %d/%d txs=%d", h.Number.String(), h.GasUsed, h.GasLimit, txc) } } + +// computeGasThroughputWindow scans backwards from the latest block until it accumulates +// approximately windowSec seconds of chain time, then returns gas/sec, avg gas/block, +// and avg block time (ms) over that window. +func computeGasThroughputWindow(t *testing.T, ec *ethclient.Client, windowSec int) (gasPerSec, avgGasPerBlock, avgBlockTimeMs float64) { + t.Helper() + if windowSec <= 0 { windowSec = 60 } + latestHeader, err := ec.HeaderByNumber(context.Background(), nil) + if err != nil || latestHeader == nil { return 0, 0, 0 } + latestNum := new(big.Int).Set(latestHeader.Number) + latestTS := latestHeader.Time + + var sumGas uint64 + var count int + var oldestTS uint64 = latestTS + // Walk backwards up to a cap of blocks to avoid long scans if blocks are sparse. + capBlocks := 5000 + for i := 0; i < capBlocks; i++ { + num := new(big.Int).Sub(latestNum, big.NewInt(int64(i))) + if num.Sign() < 0 { break } + blk, err := ec.BlockByNumber(context.Background(), num) + if err != nil || blk == nil { break } + sumGas += blk.GasUsed() + count++ + oldestTS = blk.Time() + if latestTS - oldestTS >= uint64(windowSec) { break } + } + if count < 2 || latestTS <= oldestTS { return 0, 0, 0 } + elapsed := float64(latestTS - oldestTS) // seconds + gasPerSec = float64(sumGas) / elapsed + avgGasPerBlock = float64(sumGas) / float64(count) + avgBlockTimeMs = (elapsed / float64(count-1)) * 1000.0 + return +} From edba20289e9911dd95124e49b8cf7b235c8e3793 Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 19 Feb 2026 09:45:34 +0000 Subject: [PATCH 07/11] chore: adding basic spamoor test --- test/e2e/evm_spamoor_e2e_test.go | 211 --------- test/e2e/evm_spamoor_smoke_test.go | 179 ++++++++ .../evm_spamoor_trace_benchmark_e2e_test.go | 414 ------------------ test/e2e/go.mod | 2 +- test/e2e/spamoor_api.go | 144 ------ 5 files changed, 180 insertions(+), 770 deletions(-) delete mode 100644 test/e2e/evm_spamoor_e2e_test.go create mode 100644 test/e2e/evm_spamoor_smoke_test.go delete mode 100644 test/e2e/evm_spamoor_trace_benchmark_e2e_test.go delete mode 100644 test/e2e/spamoor_api.go diff --git a/test/e2e/evm_spamoor_e2e_test.go b/test/e2e/evm_spamoor_e2e_test.go deleted file mode 100644 index d69bfcf64f..0000000000 --- a/test/e2e/evm_spamoor_e2e_test.go +++ /dev/null @@ -1,211 +0,0 @@ -//go:build evm - -package e2e - -import ( - "bytes" - "context" - "fmt" - "net/http" - "path/filepath" - "strings" - "testing" - "time" - - spamoor "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" -) - -// TestSpamoorBasicScenario starts a sequencer and runs a basic spamoor scenario -// against the sequencer's ETH RPC. It also attempts to scrape metrics from a -// configured metrics endpoint if supported by the spamoor binary. -// Optional: metrics via spamoor-daemon. Best-effort and skipped if daemon not available. -func TestSpamoorMetricsViaDaemon(t *testing.T) { - t.Parallel() - - sut := NewSystemUnderTest(t) - - // Common EVM env (Reth + local DA ports/JWT/genesis) - seqJWT, _, genesisHash, endpoints, rethNode := setupCommonEVMTest(t, sut, false) - - // In-process OTLP collector for ev-node traces - collector := newOTLPCollector(t) - t.Cleanup(func() { collector.close() }) - - // Start sequencer using shared helper and only tracing extra args - sequencerHome := filepath.Join(t.TempDir(), "sequencer") - setupSequencerNode(t, sut, sequencerHome, seqJWT, genesisHash, endpoints, - "--evnode.instrumentation.tracing=true", - "--evnode.instrumentation.tracing_endpoint", collector.endpoint(), - "--evnode.instrumentation.tracing_sample_rate", "1.0", - "--evnode.instrumentation.tracing_service_name", "ev-node-e2e", - ) - t.Log("Sequencer node is up") - - // Run spamoor-daemon via tastora container node using the maintained Docker image - // so that CI only needs Docker, not local binaries. - // Build spamoor container node in the SAME docker network - // It can reach Reth via internal IP:port - // Use reth INTERNAL ETH RPC so the Spamoor container talks directly over Docker network - ni, err := rethNode.GetNetworkInfo(context.Background()) - if err != nil { - t.Fatalf("failed to get reth network info for Spamoor RPC: %v", err) - } - - internalRPC := "http://" + ni.Internal.RPCAddress() - spBuilder := spamoor.NewNodeBuilder(t.Name()). - WithDockerClient(rethNode.DockerClient). - WithDockerNetworkID(rethNode.NetworkID). - WithLogger(rethNode.Logger). - WithRPCHosts(internalRPC). - WithPrivateKey(TestPrivateKey). - WithHostPort(0) - ctx := context.Background() - spNode, err := spBuilder.Build(ctx) - if err != nil { - t.Skipf("cannot build spamoor container: %v", err) - return - } - t.Cleanup(func() { _ = spNode.Remove(context.Background()) }) - - if err := spNode.Start(ctx); err != nil { - t.Skipf("cannot start spamoor container: %v", err) - return - } - - // Discover host-mapped ports from Docker - spInfo, err := spNode.GetNetworkInfo(ctx) - if err != nil { - t.Fatalf("failed to get spamoor network info: %v", err) - } - apiAddr := "http://127.0.0.1:" + spInfo.External.Ports.HTTP - metricsAddr := "http://127.0.0.1:" + spInfo.External.Ports.Metrics - // Wait for the daemon HTTP server to accept connections (use HTTP port) - requireHTTP(t, apiAddr+"/api/spammers", 30*time.Second) - api := NewSpamoorAPI(apiAddr) - // Config YAML for eoatx (no 'count' — run for a short window) - // Increase throughput and wallet pool to ensure visible on-chain activity for tx metrics - cfg := strings.Join([]string{ - "throughput: 60", - "max_pending: 1000", - "max_wallets: 200", - "amount: 100", - "random_amount: true", - "random_target: true", - "refill_amount: 1000000000000000000", // 1 ETH - "refill_balance: 500000000000000000", // 0.5 ETH - "refill_interval: 600", - }, "\n") - spammerID, err := api.CreateSpammer("e2e-eoatx", "eoatx", cfg, true) - if err != nil { - t.Fatalf("failed to create/start spammer: %v", err) - } - t.Cleanup(func() { _ = api.DeleteSpammer(spammerID) }) - - // Wait a bit for the spammer to be running - runUntil := time.Now().Add(10 * time.Second) - for time.Now().Before(runUntil) { - s, _ := api.GetSpammer(spammerID) - if s != nil && s.Status == 1 { // running - break - } - time.Sleep(200 * time.Millisecond) - } - // Allow additional time to generate activity for metrics - time.Sleep(20 * time.Second) - - // Dump metrics while/after running from the dedicated metrics port - metricsAPI := NewSpamoorAPI(metricsAddr) - // Poll metrics until spamoor-specific metrics appear or timeout - var m string - metricsDeadline := time.Now().Add(30 * time.Second) - for { - m, err = metricsAPI.GetMetrics() - if err == nil && strings.Contains(m, "spamoor") { - break - } - if time.Now().After(metricsDeadline) { - break - } - time.Sleep(500 * time.Millisecond) - } - if err != nil { - t.Logf("metrics not available: %v", err) - return - } - if len(strings.TrimSpace(m)) == 0 { - t.Log("empty metrics from daemon") - return - } - // Print a short sample of overall metrics - t.Logf("daemon metrics sample:\n%s", firstLines(m, 40)) - // Additionally, extract spamoor-specific metrics if present - var spamoorLines []string - for _, line := range strings.Split(m, "\n") { - if strings.Contains(line, "spamoor") { - spamoorLines = append(spamoorLines, line) - if len(spamoorLines) >= 100 { - break - } - } - } - if len(spamoorLines) > 0 { - t.Logf("spamoor metrics (subset):\n%s", strings.Join(spamoorLines, "\n")) - } else { - t.Fatalf("no spamoor-prefixed metrics found; increase runtime/throughput or verify tx metrics are enabled") - } - // Done -} - -// firstLines returns up to n lines of s. -func firstLines(s string, n int) string { - lines := strings.Split(s, "\n") - if len(lines) > n { - lines = lines[:n] - } - return strings.Join(lines, "\n") -} - -// requireHTTP polls a URL until it returns a 200-range response or the timeout expires. -func requireHTTP(t *testing.T, url string, timeout time.Duration) { - t.Helper() - client := &http.Client{Timeout: 200 * time.Millisecond} - deadline := time.Now().Add(timeout) - var lastErr error - for time.Now().Before(deadline) { - resp, err := client.Get(url) - if err == nil { - _ = resp.Body.Close() - if resp.StatusCode >= 200 && resp.StatusCode < 300 { - return - } - lastErr = fmt.Errorf("status %d", resp.StatusCode) - } else { - lastErr = err - } - time.Sleep(100 * time.Millisecond) - } - t.Fatalf("daemon not ready at %s: %v", url, lastErr) -} - -// requireJSONRPC checks that the ETH RPC responds to a net_version request. -func requireJSONRPC(t *testing.T, url string, timeout time.Duration) { - t.Helper() - client := &http.Client{Timeout: 400 * time.Millisecond} - deadline := time.Now().Add(timeout) - payload := []byte(`{"jsonrpc":"2.0","method":"net_version","params":[],"id":1}`) - var lastErr error - for time.Now().Before(deadline) { - resp, err := client.Post(url, "application/json", bytes.NewReader(payload)) - if err == nil { - _ = resp.Body.Close() - if resp.StatusCode >= 200 && resp.StatusCode < 300 { - return - } - lastErr = fmt.Errorf("status %d", resp.StatusCode) - } else { - lastErr = err - } - time.Sleep(200 * time.Millisecond) - } - t.Fatalf("reth rpc not ready at %s: %v", url, lastErr) -} diff --git a/test/e2e/evm_spamoor_smoke_test.go b/test/e2e/evm_spamoor_smoke_test.go new file mode 100644 index 0000000000..398b9ceb2b --- /dev/null +++ b/test/e2e/evm_spamoor_smoke_test.go @@ -0,0 +1,179 @@ +//go:build evm + +package e2e + +import ( + "context" + "fmt" + "net/http" + "path/filepath" + "testing" + "time" + + spamoor "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" +) + +// TestSpamoorSmoke spins up reth + sequencer and a Spamoor node, starts a few +// basic spammers, waits briefly, then prints a concise metrics summary. +func TestSpamoorSmoke(t *testing.T) { + t.Parallel() + + sut := NewSystemUnderTest(t) + // Bring up reth + local DA and start sequencer with default settings. + seqJWT, _, genesisHash, endpoints, rethNode := setupCommonEVMTest(t, sut, false) + sequencerHome := filepath.Join(t.TempDir(), "sequencer") + + // In-process OTLP/HTTP collector to capture ev-node spans. + collector := newOTLPCollector(t) + t.Cleanup(collector.close) + + // Start sequencer with tracing to our collector. + setupSequencerNode(t, sut, sequencerHome, seqJWT, genesisHash, endpoints, + "--evnode.instrumentation.tracing=true", + "--evnode.instrumentation.tracing_endpoint", collector.endpoint(), + "--evnode.instrumentation.tracing_sample_rate", "1.0", + "--evnode.instrumentation.tracing_service_name", "ev-node-smoke", + ) + t.Log("Sequencer node is up") + + // Start Spamoor within the same Docker network, targeting reth internal RPC. + ni, err := rethNode.GetNetworkInfo(context.Background()) + require.NoError(t, err, "failed to get network info") + + internalRPC := "http://" + ni.Internal.RPCAddress() + + spBuilder := spamoor.NewNodeBuilder(t.Name()). + WithDockerClient(rethNode.DockerClient). + WithDockerNetworkID(rethNode.NetworkID). + WithLogger(rethNode.Logger). + WithRPCHosts(internalRPC). + WithPrivateKey(TestPrivateKey) + + ctx := context.Background() + spNode, err := spBuilder.Build(ctx) + require.NoError(t, err, "failed to build sp node") + + t.Cleanup(func() { _ = spNode.Remove(context.Background()) }) + require.NoError(t, spNode.Start(ctx), "failed to start spamoor node") + + // Wait for daemon readiness. + spInfo, err := spNode.GetNetworkInfo(ctx) + require.NoError(t, err, "failed to get network info") + + apiAddr := "http://127.0.0.1:" + spInfo.External.Ports.HTTP + requireHTTP(t, apiAddr+"/api/spammers", 30*time.Second) + api := spNode.API() + + // Basic scenarios (structs that YAML-marshal into the daemon config). + eoatx := map[string]any{ + "throughput": 100, + "total_count": 3000, + "max_pending": 4000, + "max_wallets": 300, + "amount": 100, + "random_amount": true, + "random_target": true, + "base_fee": 20, // gwei + "tip_fee": 2, // gwei + "refill_amount": "1000000000000000000", + "refill_balance": "500000000000000000", + "refill_interval": 600, + } + + gasburner := map[string]any{ + "throughput": 25, + "total_count": 2000, + "max_pending": 8000, + "max_wallets": 500, + "gas_units_to_burn": 3000000, + "base_fee": 20, + "tip_fee": 5, + "rebroadcast": 5, + "refill_amount": "5000000000000000000", + "refill_balance": "2000000000000000000", + "refill_interval": 300, + } + + var ids []int + id, err := api.CreateSpammer("smoke-eoatx", spamoor.ScenarioEOATX, eoatx, true) + require.NoError(t, err, "failed to create eoatx spammer") + ids = append(ids, id) + id, err = api.CreateSpammer("smoke-gasburner", spamoor.ScenarioGasBurnerTX, gasburner, true) + require.NoError(t, err, "failed to create gasburner spammer") + ids = append(ids, id) + + for _, id := range ids { + idToDelete := id + t.Cleanup(func() { _ = api.DeleteSpammer(idToDelete) }) + } + + // Allow additional time to accumulate activity. + time.Sleep(60 * time.Second) + + // Fetch parsed metrics and print a concise summary. + if mfs, err := api.GetMetrics(); err == nil && mfs != nil { + sent := sumCounter(mfs["spamoor_transactions_sent_total"]) + fail := sumCounter(mfs["spamoor_transactions_failed_total"]) + pend := sumGauge(mfs["spamoor_pending_transactions"]) + gas := sumCounter(mfs["spamoor_block_gas_usage"]) + t.Logf("Spamoor summary: sent=%.0f failed=%.0f pending=%.0f block_gas=%.0f", sent, fail, pend, gas) + } else { + t.Logf("metrics unavailable or parse error: %v", err) + } + + time.Sleep(2 * time.Second) + printCollectedTraceReport(t, collector) + + // TODO: test should pass / fail based on results +} + +// --- helpers --- + +func requireHTTP(t *testing.T, url string, timeout time.Duration) { + t.Helper() + client := &http.Client{Timeout: 200 * time.Millisecond} + deadline := time.Now().Add(timeout) + var lastErr error + for time.Now().Before(deadline) { + resp, err := client.Get(url) + if err == nil { + _ = resp.Body.Close() + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return + } + lastErr = fmt.Errorf("status %d", resp.StatusCode) + } else { + lastErr = err + } + time.Sleep(100 * time.Millisecond) + } + t.Fatalf("daemon not ready at %s: %v", url, lastErr) +} + +// Metric family helpers. +func sumCounter(f *dto.MetricFamily) float64 { + if f == nil || f.GetType() != dto.MetricType_COUNTER { + return 0 + } + var sum float64 + for _, m := range f.GetMetric() { + if m.GetCounter() != nil && m.GetCounter().Value != nil { + sum += m.GetCounter().GetValue() + } + } + return sum +} +func sumGauge(f *dto.MetricFamily) float64 { + if f == nil || f.GetType() != dto.MetricType_GAUGE { + return 0 + } + var sum float64 + for _, m := range f.GetMetric() { + if m.GetGauge() != nil && m.GetGauge().Value != nil { + sum += m.GetGauge().GetValue() + } + } + return sum +} diff --git a/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go b/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go deleted file mode 100644 index 8a17b2067f..0000000000 --- a/test/e2e/evm_spamoor_trace_benchmark_e2e_test.go +++ /dev/null @@ -1,414 +0,0 @@ -//go:build evm - -package e2e - -import ( - "context" - "math/big" - "path/filepath" - "regexp" - "strconv" - "strings" - "testing" - "time" - - spamoor "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" - "github.com/ethereum/go-ethereum/ethclient" -) - -// TestSpamoorTraceBenchmark spins up reth (Docker via Tastora), local-da, ev-node (host binary) -// with OTEL tracing exported to an in-process OTLP/HTTP collector, and a Spamoor container -// driving EOA transfers directly to reth's internal ETH RPC. It runs a short load, then -// emits a concise metrics/trace summary and fails if activity or spans are missing. -func TestSpamoorTraceBenchmark(t *testing.T) { - t.Parallel() - - sut := NewSystemUnderTest(t) - - // Common EVM env: reth + local DA. Return JWT, genesis, endpoints, and reth node handle. - seqJWT, _, genesisHash, endpoints, rethNode := setupCommonEVMTest(t, sut, false) - - // In-process OTLP collector to receive ev-node spans. - collector := newOTLPCollector(t) - t.Cleanup(func() { collector.close() }) - - // Start sequencer (ev-node) with tracing enabled to our collector. - sequencerHome := filepath.Join(t.TempDir(), "sequencer") - // Target: ~1 Ggas/s with 100ms blocks => ~100M gas per block. - // Use tight block_time without lazy mode to enforce cadence. - setupSequencerNode(t, sut, sequencerHome, seqJWT, genesisHash, endpoints, - "--evnode.instrumentation.tracing=true", - "--evnode.instrumentation.tracing_endpoint", collector.endpoint(), - "--evnode.instrumentation.tracing_sample_rate", "1.0", - "--evnode.instrumentation.tracing_service_name", "ev-node-e2e", - "--evnode.node.block_time", "100ms", - ) - t.Log("Sequencer node is up") - - // Launch Spamoor container on the same Docker network; point it at reth INTERNAL ETH RPC. - ni, err := rethNode.GetNetworkInfo(context.Background()) - if err != nil { - t.Fatalf("failed to get reth network info: %v", err) - } - internalRPC := "http://" + ni.Internal.RPCAddress() - - spBuilder := spamoor.NewNodeBuilder(t.Name()). - WithDockerClient(rethNode.DockerClient). - WithDockerNetworkID(rethNode.NetworkID). - WithLogger(rethNode.Logger). - WithRPCHosts(internalRPC). - WithPrivateKey(TestPrivateKey). - WithHostPort(0) - - ctx := context.Background() - spNode, err := spBuilder.Build(ctx) - if err != nil { - t.Skipf("cannot build spamoor container: %v", err) - return - } - t.Cleanup(func() { _ = spNode.Remove(context.Background()) }) - if err := spNode.Start(ctx); err != nil { - t.Skipf("cannot start spamoor container: %v", err) - return - } - - // Discover host-mapped ports and wait for daemon readiness. - spInfo, err := spNode.GetNetworkInfo(ctx) - if err != nil { - t.Fatalf("failed to get spamoor network info: %v", err) - } - apiAddr := "http://127.0.0.1:" + spInfo.External.Ports.HTTP - metricsAddr := "http://127.0.0.1:" + spInfo.External.Ports.Metrics - requireHTTP(t, apiAddr+"/api/spammers", 30*time.Second) - api := NewSpamoorAPI(apiAddr) - - // Gasburner-only configuration below; eoatx disabled for this ceiling test. - - // Start with gasburner-only to maximize gas per block and simplify inclusion checks. - // Aim for ~100M gas per 100ms block: set 10M gas/tx and target ~10 tx/block total across spammers. - spammerIDs := make([]int, 0, 4) - - // Add 1-2 heavier-gas spammers using known Spamoor scenarios. We validate each config - // before starting to avoid failures on unsupported images. - // Prefer gasburnertx spammers to maximize gas usage per transaction. - gasBurnerCfg := strings.Join([]string{ - "throughput: 200", // aggressive per-spammer rate (per-second), drives continuous backlog - "total_count: 20000", - "max_pending: 50000", - "max_wallets: 1000", - "gas_units_to_burn: 10000000", // 10M gas/tx - "refill_amount: 5000000000000000000", // 5 ETH - "refill_balance: 2000000000000000000", // 2 ETH - "refill_interval: 300", - "base_fee: 20", // gwei (reduce to avoid fee-cap rejection) - "tip_fee: 5", // gwei - "rebroadcast: 2", - }, "\n") - for i := 0; i < 14; i++ { // start with 14 spammers to create a stronger continuous backlog - name := "benchmark-gasburner-" + strconv.Itoa(i) - if err := api.ValidateScenarioConfig(name+"-probe", "gasburnertx", gasBurnerCfg); err != nil { - t.Logf("gasburnertx not supported or invalid config: %v", err) - break - } - id, err := api.CreateSpammer(name, "gasburnertx", gasBurnerCfg, true) - if err != nil { - t.Logf("failed to start gasburnertx: %v", err) - break - } - spammerIDs = append(spammerIDs, id) - idx := len(spammerIDs) - 1 - t.Cleanup(func() { _ = api.DeleteSpammer(spammerIDs[idx]) }) - } - - // Wait for any spammer to report running, then proceed. - runUntil := time.Now().Add(15 * time.Second) - for time.Now().Before(runUntil) { - ok := false - for _, id := range spammerIDs { - s, _ := api.GetSpammer(id) - if s != nil && s.Status == 1 { - ok = true - break - } - } - if ok { - break - } - time.Sleep(200 * time.Millisecond) - } - - // Let the initial load run for ~30–45s to stabilize block production and metrics, - // but also poll metrics so we can break early once activity is observed. - metricsAPI := NewSpamoorAPI(metricsAddr) - var metricsText string - deadline := time.Now().Add(45 * time.Second) - for { - if time.Now().After(deadline) { - break - } - m, err := metricsAPI.GetMetrics() - if err == nil && strings.Contains(m, "spamoor_transactions_sent_total") { - metricsText = m - if scrapeCounter(m, `^spamoor_transactions_sent_total\{.*\}\s+(\\d+(?:\\.\\d+)?)`) > 0 { - break - } - } - time.Sleep(500 * time.Millisecond) - } - - // If we didn't capture metrics during the loop, fetch once now (best-effort). - if metricsText == "" { - if m, err := metricsAPI.GetMetrics(); err == nil { - metricsText = m - } else { - t.Logf("metrics unavailable (continuing without them): %v", err) - } - } - - // Try to saturate block gas: dynamically ramp up gasburner spammers until utilization approaches ~100M/block or we hit a safe cap. - if ec, err := ethclient.Dial(endpoints.GetSequencerEthURL()); err == nil { - defer ec.Close() - rampDeadline := time.Now().Add(60 * time.Second) - for len(spammerIDs) < 20 && time.Now().Before(rampDeadline) { // ramp up further if still underutilized - avg, peak := sampleGasUtilization(t, ec, 5*time.Second) - t.Logf("Ramp check: gas utilization avg=%.1f%% peak=%.1f%% spammers=%d", avg*100, peak*100, len(spammerIDs)) - if peak >= 0.9 { // good enough - break - } - // Add another gasburner spammer to push harder. - id, err := api.CreateSpammer("benchmark-gasburner-ramp-"+strconv.Itoa(len(spammerIDs)), "gasburnertx", gasBurnerCfg, true) - if err != nil { - t.Logf("failed to add ramp spammer: %v", err) - break - } - spammerIDs = append(spammerIDs, id) - idx := len(spammerIDs) - 1 - t.Cleanup(func() { _ = api.DeleteSpammer(spammerIDs[idx]) }) - // give it a moment to start before next measurement - time.Sleep(3 * time.Second) - } - } - - if metricsText != "" { - sentTotal := scrapeCounter(metricsText, `^spamoor_transactions_sent_total\{.*\}\s+(\d+(?:\.\d+)?)`) - failures := scrapeCounter(metricsText, `^spamoor_transactions_failed_total\{.*\}\s+(\d+(?:\.\d+)?)`) - pending := scrapeGauge(metricsText, `^spamoor_pending_transactions\{.*\}\s+(\d+(?:\.\d+)?)`) - blockGas := scrapeCounter(metricsText, `^spamoor_block_gas_usage\{.*\}\s+(\d+(?:\.\d+)?)`) - t.Logf("Spamoor summary: sent=%.0f failed=%.0f pending=%.0f block_gas=%.0f", sentTotal, failures, pending, blockGas) - if sentTotal < 5 { - t.Logf("warning: low spamoor sent count (%.0f)", sentTotal) - } - } else { - t.Log("Spamoor metrics unavailable; will validate activity via block headers and spans") - } - - // Give ev-node a moment to flush pending span batches, then aggregate spans. - time.Sleep(2 * time.Second) - spans := collector.getSpans() - if len(spans) == 0 { - t.Fatalf("no ev-node spans recorded by OTLP collector") - } - printCollectedTraceReport(t, collector) - - // Optional: sanity-check reth RPC responsiveness (debug tracing optional and best-effort). - // If debug API is enabled in the reth image, a recent tx could be traced here. - // We only check that RPC endpoint responds to a simple request to catch regressions. - requireJSONRPC(t, endpoints.GetSequencerEthURL(), 10*time.Second) - - // Report observed block gas utilization over a short window. - // Query latest headers to compute peak and average gas usage fraction. - if ec, err := ethclient.Dial(endpoints.GetSequencerEthURL()); err == nil { - defer ec.Close() - avg, peak := sampleGasUtilization(t, ec, 10*time.Second) - // Also compute a normalized utilization vs a 30M gas cap for readability when chain gasLimit is huge. - navg, npeak := sampleNormalizedUtilization(t, ec, 10*time.Second, 30_000_000) - t.Logf("Block gas utilization: raw avg=%.3f%% peak=%.3f%% | normalized(30M) avg=%.1f%% peak=%.1f%%", avg*100, peak*100, navg*100, npeak*100) - if avg < 0.05 && peak < 0.1 { // very low; dump recent block gas details for debugging - debugLogRecentBlockGas(t, ec, 8) - } - // Additionally compute aggregate gas/sec over a recent time window (by timestamps), - // which is robust even if the chain has a long history. - gsec, gpb, btMs := computeGasThroughputWindow(t, ec, 60) // last ~60s - t.Logf("Throughput window: avg_gas_block=%.0f avg_block_time=%.1fms gas_sec=%.0f", gpb, btMs, gsec) - // Report simple target status for 1 Ggas/s @ 100ms cadence. - const ( - targetGasSec = 1_000_000_000.0 - minGasSec = 0.9 * targetGasSec // 900M gas/sec - minBtMs = 80.0 // acceptable block time window - maxBtMs = 150.0 - minGasPerBlock = 90_000_000.0 // ~100M +/- 10% - maxGasPerBlock = 110_000_000.0 - ) - hit := gsec >= minGasSec && btMs >= minBtMs && btMs <= maxBtMs && gpb >= minGasPerBlock && gpb <= maxGasPerBlock - if hit { - t.Logf("Target 1Ggas/s@100ms: PASS (gas_sec=%.0f, avg_bt=%.1fms, gas_block=%.0f)", gsec, btMs, gpb) - } else { - t.Logf("Target 1Ggas/s@100ms: NOT MET (gas_sec=%.0f, avg_bt=%.1fms, gas_block=%.0f). Thresholds: gas_sec>=900,000,000; 80ms<=avg_bt<=150ms; 90M<=gas_block<=110M.", gsec, btMs, gpb) - } - // If metrics were unavailable earlier, ensure on-chain activity by checking recent tx counts. - // Fail if we observe zero transactions in the last ~8 blocks. - if metricsText == "" { - header, herr := ec.HeaderByNumber(context.Background(), nil) - if herr == nil && header != nil { - var anyTx bool - latest := new(big.Int).Set(header.Number) - min := new(big.Int).Sub(latest, big.NewInt(int64(7))) - if min.Sign() < 0 { - min.SetInt64(0) - } - for b := new(big.Int).Set(latest); b.Cmp(min) >= 0; b.Sub(b, big.NewInt(1)) { - blk, e := ec.BlockByNumber(context.Background(), b) - if e == nil && blk != nil && len(blk.Transactions()) > 0 { - anyTx = true - break - } - } - if !anyTx { - t.Fatalf("no on-chain activity observed in recent blocks and metrics unavailable") - } - } - } - } -} - -// --- small helpers to scrape Prometheus text --- - -func scrapeCounter(metrics, pattern string) float64 { - return scrapeFloat(metrics, pattern) -} - -func scrapeGauge(metrics, pattern string) float64 { - return scrapeFloat(metrics, pattern) -} - -func scrapeFloat(metrics, pattern string) float64 { - re := regexp.MustCompile(pattern) - lines := strings.Split(metrics, "\n") - for _, line := range lines { - if m := re.FindStringSubmatch(line); len(m) == 2 { - if v, err := strconv.ParseFloat(m[1], 64); err == nil { - return v - } - } - } - return 0 -} - -// sampleGasUtilization samples latest blocks for the given duration and returns -// average and peak gas used fraction. -func sampleGasUtilization(t *testing.T, ec *ethclient.Client, dur time.Duration) (avg, peak float64) { - t.Helper() - deadline := time.Now().Add(dur) - var sum float64 - var n int - for time.Now().Before(deadline) { - header, err := ec.HeaderByNumber(context.Background(), nil) - if err == nil && header != nil && header.GasLimit > 0 { - frac := float64(header.GasUsed) / float64(header.GasLimit) - sum += frac - n++ - if frac > peak { - peak = frac - } - } - time.Sleep(300 * time.Millisecond) - } - if n > 0 { - avg = sum / float64(n) - } - return -} - -// sampleNormalizedUtilization computes utilization relative to a targetGasLimit rather than the chain's gasLimit. -func sampleNormalizedUtilization(t *testing.T, ec *ethclient.Client, dur time.Duration, targetGasLimit uint64) (avg, peak float64) { - t.Helper() - if targetGasLimit == 0 { - return 0, 0 - } - deadline := time.Now().Add(dur) - var sum float64 - var n int - for time.Now().Before(deadline) { - header, err := ec.HeaderByNumber(context.Background(), nil) - if err == nil && header != nil { - frac := float64(header.GasUsed) / float64(targetGasLimit) - if frac > 1 { - frac = 1 // cap at 100% - } - sum += frac - n++ - if frac > peak { - peak = frac - } - } - time.Sleep(300 * time.Millisecond) - } - if n > 0 { - avg = sum / float64(n) - } - return -} - -// debugLogRecentBlockGas logs gasUsed/gasLimit and tx count for the most recent n blocks. -func debugLogRecentBlockGas(t *testing.T, ec *ethclient.Client, n int) { - t.Helper() - // Get latest number - header, err := ec.HeaderByNumber(context.Background(), nil) - if err != nil || header == nil { - t.Logf("debug: failed to fetch latest header: %v", err) - return - } - latest := new(big.Int).Set(header.Number) - min := new(big.Int).Sub(latest, big.NewInt(int64(n-1))) - if min.Sign() < 0 { - min.SetInt64(0) - } - t.Logf("debug: recent block gas (number gasUsed/gasLimit txs)") - for b := new(big.Int).Set(latest); b.Cmp(min) >= 0; b.Sub(b, big.NewInt(1)) { - h, err := ec.HeaderByNumber(context.Background(), b) - if err != nil || h == nil { - t.Logf("debug: block %s header error: %v", b.String(), err) - continue - } - blk, err := ec.BlockByNumber(context.Background(), b) - txc := 0 - if err == nil && blk != nil { - txc = len(blk.Transactions()) - } - t.Logf("debug: %s %d/%d txs=%d", h.Number.String(), h.GasUsed, h.GasLimit, txc) - } -} - -// computeGasThroughputWindow scans backwards from the latest block until it accumulates -// approximately windowSec seconds of chain time, then returns gas/sec, avg gas/block, -// and avg block time (ms) over that window. -func computeGasThroughputWindow(t *testing.T, ec *ethclient.Client, windowSec int) (gasPerSec, avgGasPerBlock, avgBlockTimeMs float64) { - t.Helper() - if windowSec <= 0 { windowSec = 60 } - latestHeader, err := ec.HeaderByNumber(context.Background(), nil) - if err != nil || latestHeader == nil { return 0, 0, 0 } - latestNum := new(big.Int).Set(latestHeader.Number) - latestTS := latestHeader.Time - - var sumGas uint64 - var count int - var oldestTS uint64 = latestTS - // Walk backwards up to a cap of blocks to avoid long scans if blocks are sparse. - capBlocks := 5000 - for i := 0; i < capBlocks; i++ { - num := new(big.Int).Sub(latestNum, big.NewInt(int64(i))) - if num.Sign() < 0 { break } - blk, err := ec.BlockByNumber(context.Background(), num) - if err != nil || blk == nil { break } - sumGas += blk.GasUsed() - count++ - oldestTS = blk.Time() - if latestTS - oldestTS >= uint64(windowSec) { break } - } - if count < 2 || latestTS <= oldestTS { return 0, 0, 0 } - elapsed := float64(latestTS - oldestTS) // seconds - gasPerSec = float64(sumGas) / elapsed - avgGasPerBlock = float64(sumGas) / float64(count) - avgBlockTimeMs = (elapsed / float64(count-1)) * 1000.0 - return -} diff --git a/test/e2e/go.mod b/test/e2e/go.mod index c0d8ca2f25..ac8f948788 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -14,6 +14,7 @@ require ( github.com/evstack/ev-node/execution/evm v0.0.0-20250602130019-2a732cf903a5 github.com/evstack/ev-node/execution/evm/test v0.0.0-00010101000000-000000000000 github.com/libp2p/go-libp2p v0.47.0 + github.com/prometheus/client_model v0.6.2 github.com/rs/zerolog v1.34.0 github.com/stretchr/testify v1.11.1 go.opentelemetry.io/otel v1.40.0 @@ -254,7 +255,6 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.23.2 // indirect - github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.17.0 // indirect github.com/quic-go/qpack v0.6.0 // indirect diff --git a/test/e2e/spamoor_api.go b/test/e2e/spamoor_api.go deleted file mode 100644 index 17d0aac7e7..0000000000 --- a/test/e2e/spamoor_api.go +++ /dev/null @@ -1,144 +0,0 @@ -//go:build evm - -package e2e - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "time" -) - -// SpamoorAPI is a thin HTTP client for the spamoor-daemon API -type SpamoorAPI struct { - BaseURL string // e.g., http://127.0.0.1:8080 - client *http.Client -} - -func NewSpamoorAPI(baseURL string) *SpamoorAPI { - return &SpamoorAPI{BaseURL: baseURL, client: &http.Client{Timeout: 2 * time.Second}} -} - -type createSpammerReq struct { - Name string `json:"name"` - Description string `json:"description"` - Scenario string `json:"scenario"` - ConfigYAML string `json:"config"` - StartImmediately bool `json:"startImmediately"` -} - -// CreateSpammer posts a new spammer; returns its ID. -func (api *SpamoorAPI) CreateSpammer(name, scenario, configYAML string, start bool) (int, error) { - reqBody := createSpammerReq{Name: name, Description: name, Scenario: scenario, ConfigYAML: configYAML, StartImmediately: start} - b, _ := json.Marshal(reqBody) - url := fmt.Sprintf("%s/api/spammer", api.BaseURL) - resp, err := api.client.Post(url, "application/json", bytes.NewReader(b)) - if err != nil { - return 0, err - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - body, _ := io.ReadAll(resp.Body) - return 0, fmt.Errorf("create spammer failed: %s", string(body)) - } - var id int - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&id); err != nil { - return 0, fmt.Errorf("decode id: %w", err) - } - return id, nil -} - -// ValidateScenarioConfig attempts to create a spammer with the provided scenario/config -// without starting it, and deletes it immediately if creation succeeds. It returns -// a descriptive error when the daemon rejects the config. -func (api *SpamoorAPI) ValidateScenarioConfig(name, scenario, configYAML string) error { - id, err := api.CreateSpammer(name, scenario, configYAML, false) - if err != nil { - return fmt.Errorf("invalid scenario config: %w", err) - } - // Best-effort cleanup of the temporary spammer - _ = api.DeleteSpammer(id) - return nil -} - -// DeleteSpammer deletes an existing spammer by ID. -func (api *SpamoorAPI) DeleteSpammer(id int) error { - url := fmt.Sprintf("%s/api/spammer/%d", api.BaseURL, id) - req, _ := http.NewRequest(http.MethodDelete, url, http.NoBody) - resp, err := api.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("delete spammer failed: %s", string(body)) - } - return nil -} - -// StartSpammer sends a start request for a given spammer ID. -func (api *SpamoorAPI) StartSpammer(id int) error { - url := fmt.Sprintf("%s/api/spammer/%d/start", api.BaseURL, id) - req, _ := http.NewRequest(http.MethodPost, url, http.NoBody) - resp, err := api.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("start spammer failed: %s", string(body)) - } - return nil -} - -// GetMetrics fetches the Prometheus /metrics endpoint from the daemon. -func (api *SpamoorAPI) GetMetrics() (string, error) { - url := fmt.Sprintf("%s/metrics", api.BaseURL) - resp, err := api.client.Get(url) - if err != nil { - return "", err - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - body, _ := io.ReadAll(resp.Body) - return "", fmt.Errorf("metrics request failed: %s", string(body)) - } - b, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - return string(b), nil -} - -// Spammer represents a spammer resource minimally for status checks. -type Spammer struct { - ID int `json:"id"` - Name string `json:"name"` - Scenario string `json:"scenario"` - Status int `json:"status"` -} - -// GetSpammer retrieves a spammer by ID. -func (api *SpamoorAPI) GetSpammer(id int) (*Spammer, error) { - url := fmt.Sprintf("%s/api/spammer/%d", api.BaseURL, id) - resp, err := api.client.Get(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("get spammer failed: %s", string(body)) - } - var s Spammer - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&s); err != nil { - return nil, err - } - return &s, nil -} From a784783518694abc2a4099a107740e4cab795021 Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 19 Feb 2026 10:37:58 +0000 Subject: [PATCH 08/11] chore: remove local pin --- test/e2e/go.mod | 5 ++--- test/e2e/go.sum | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 0b99c30c30..58a75eaa8d 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -5,10 +5,9 @@ go 1.25.6 require ( cosmossdk.io/math v1.5.3 github.com/celestiaorg/go-square/v3 v3.0.2 - github.com/celestiaorg/tastora v0.12.0 + github.com/celestiaorg/tastora v0.14.0 github.com/cosmos/cosmos-sdk v0.53.6 github.com/cosmos/ibc-go/v8 v8.8.0 - github.com/docker/go-connections v0.5.0 github.com/ethereum/go-ethereum v1.16.8 github.com/evstack/ev-node v1.0.0-rc.3 github.com/evstack/ev-node/execution/evm v0.0.0-20250602130019-2a732cf903a5 @@ -22,7 +21,6 @@ require ( ) replace ( - github.com/celestiaorg/tastora => /Users/chatton/checkouts/celestiaorg/tastora github.com/evstack/ev-node => ../../ github.com/evstack/ev-node/core => ../../core github.com/evstack/ev-node/execution/evm => ../../execution/evm @@ -102,6 +100,7 @@ require ( github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/docker v28.5.2+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dunglas/httpsfv v1.1.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect diff --git a/test/e2e/go.sum b/test/e2e/go.sum index e4b1980905..bbf3e2e3f4 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -145,8 +145,8 @@ github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY7 github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= -github.com/celestiaorg/tastora v0.12.0 h1:xs1a/d+/QFbebShoHZxnWj8q3Kr1i6PY5137oxR+h+4= -github.com/celestiaorg/tastora v0.12.0/go.mod h1:ObeKMraNab/xofYZyylnOEiveHvUAPdKP5HiNjnvQoM= +github.com/celestiaorg/tastora v0.14.0 h1:kvcx1MSKTx4DjOW60g9lcndL/LFpLq3H+CbzglaNlA0= +github.com/celestiaorg/tastora v0.14.0/go.mod h1:ObeKMraNab/xofYZyylnOEiveHvUAPdKP5HiNjnvQoM= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= From c5306c853c86c816f64b141156890cde4668d37b Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 19 Feb 2026 10:42:13 +0000 Subject: [PATCH 09/11] chore: adding basic assertion --- test/e2e/evm_spamoor_smoke_test.go | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/test/e2e/evm_spamoor_smoke_test.go b/test/e2e/evm_spamoor_smoke_test.go index d75cac3141..0c65fc3bc4 100644 --- a/test/e2e/evm_spamoor_smoke_test.go +++ b/test/e2e/evm_spamoor_smoke_test.go @@ -53,11 +53,11 @@ func TestSpamoorSmoke(t *testing.T) { WithRPCHosts(internalRPC). WithPrivateKey(TestPrivateKey) - ctx := context.Background() + ctx := t.Context() spNode, err := spBuilder.Build(ctx) require.NoError(t, err, "failed to build sp node") - t.Cleanup(func() { _ = spNode.Remove(context.Background()) }) + t.Cleanup(func() { _ = spNode.Remove(t.Context()) }) require.NoError(t, spNode.Start(ctx), "failed to start spamoor node") // Wait for daemon readiness. @@ -115,20 +115,16 @@ func TestSpamoorSmoke(t *testing.T) { time.Sleep(60 * time.Second) // Fetch parsed metrics and print a concise summary. - if mfs, err := api.GetMetrics(); err == nil && mfs != nil { - sent := sumCounter(mfs["spamoor_transactions_sent_total"]) - fail := sumCounter(mfs["spamoor_transactions_failed_total"]) - pend := sumGauge(mfs["spamoor_pending_transactions"]) - gas := sumCounter(mfs["spamoor_block_gas_usage"]) - t.Logf("Spamoor summary: sent=%.0f failed=%.0f pending=%.0f block_gas=%.0f", sent, fail, pend, gas) - } else { - t.Logf("metrics unavailable or parse error: %v", err) - } + metrics, err := api.GetMetrics() + require.NoError(t, err, "failed to get metrics") + sent := sumCounter(metrics["spamoor_transactions_sent_total"]) + fail := sumCounter(metrics["spamoor_transactions_failed_total"]) time.Sleep(2 * time.Second) printCollectedTraceReport(t, collector) - // TODO: test should pass / fail based on results + require.Greater(t, sent, float64(0), "at least one transaction should have been sent") + require.Zero(t, fail, "no transactions should have failed") } // --- helpers --- From 6b7a3288c2b1a076686f368cc27fae82bf6afc7b Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 19 Feb 2026 11:04:36 +0000 Subject: [PATCH 10/11] fix linter --- test/e2e/evm_contract_bench_test.go | 10 +++++----- test/e2e/evm_force_inclusion_e2e_test.go | 6 +++--- test/e2e/evm_full_node_e2e_test.go | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/test/e2e/evm_contract_bench_test.go b/test/e2e/evm_contract_bench_test.go index 6fe83f123c..70b40cd8b1 100644 --- a/test/e2e/evm_contract_bench_test.go +++ b/test/e2e/evm_contract_bench_test.go @@ -47,8 +47,8 @@ func BenchmarkEvmContractRoundtrip(b *testing.B) { sequencerHome := filepath.Join(workDir, "evm-bench-sequencer") // Start an in-process OTLP/HTTP receiver to collect traces from ev-node. - collector := newOTLPCollector(b) - defer collector.close() // nolint: errcheck // test only + collector := newOTLPCollector(b) + defer collector.close() // nolint: errcheck // test only // Start sequencer with tracing enabled, exporting to our in-process collector. client, _, cleanup := setupTestSequencer(b, sequencerHome, @@ -158,9 +158,9 @@ func (c *otlpCollector) endpoint() string { } func (c *otlpCollector) close() error { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - return c.server.Shutdown(ctx) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + return c.server.Shutdown(ctx) } func (c *otlpCollector) handleTraces(w http.ResponseWriter, r *http.Request) { diff --git a/test/e2e/evm_force_inclusion_e2e_test.go b/test/e2e/evm_force_inclusion_e2e_test.go index 8b71e39af8..261944a9ed 100644 --- a/test/e2e/evm_force_inclusion_e2e_test.go +++ b/test/e2e/evm_force_inclusion_e2e_test.go @@ -77,7 +77,7 @@ func setupSequencerWithForceInclusion(t *testing.T, sut *SystemUnderTest, nodeHo t.Helper() // Use common setup (no full node needed initially) - jwtSecret, _, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, false) + jwtSecret, _, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, false) // Create passphrase file passphraseFile := createPassphraseFile(t, nodeHome) @@ -192,7 +192,7 @@ func TestEvmFullNodeForceInclusionE2E(t *testing.T) { // --- Start Sequencer Setup --- // We manually setup sequencer here because we need the force inclusion flag, // and we need to capture variables for full node setup. - jwtSecret, fullNodeJwtSecret, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, true) + jwtSecret, fullNodeJwtSecret, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, true) passphraseFile := createPassphraseFile(t, sequencerHome) jwtSecretFile := createJWTSecretFile(t, sequencerHome, jwtSecret) @@ -284,7 +284,7 @@ func setupMaliciousSequencer(t *testing.T, sut *SystemUnderTest, nodeHome string t.Helper() // Use common setup with full node support - jwtSecret, fullNodeJwtSecret, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, true) + jwtSecret, fullNodeJwtSecret, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, true) passphraseFile := createPassphraseFile(t, nodeHome) jwtSecretFile := createJWTSecretFile(t, nodeHome, jwtSecret) diff --git a/test/e2e/evm_full_node_e2e_test.go b/test/e2e/evm_full_node_e2e_test.go index 72d0b9c1e2..32577fa82a 100644 --- a/test/e2e/evm_full_node_e2e_test.go +++ b/test/e2e/evm_full_node_e2e_test.go @@ -213,7 +213,7 @@ func setupSequencerWithFullNode(t *testing.T, sut *SystemUnderTest, sequencerHom t.Helper() // Common setup for both sequencer and full node - jwtSecret, fullNodeJwtSecret, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, true) + jwtSecret, fullNodeJwtSecret, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, true) // Setup sequencer setupSequencerNode(t, sut, sequencerHome, jwtSecret, genesisHash, endpoints) @@ -645,7 +645,7 @@ func setupSequencerWithFullNodeLazy(t *testing.T, sut *SystemUnderTest, sequence t.Helper() // Common setup for both sequencer and full node - jwtSecret, fullNodeJwtSecret, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, true) + jwtSecret, fullNodeJwtSecret, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, true) t.Logf("Generated test endpoints - Rollkit RPC: %s, P2P: %s, Full Node RPC: %s, P2P: %s, DA Port: %s", endpoints.RollkitRPCPort, endpoints.RollkitP2PPort, endpoints.FullNodeRPCPort, endpoints.FullNodeP2PPort, endpoints.DAPort) @@ -1039,7 +1039,7 @@ func testSequencerFullNodeRestart(t *testing.T, initialLazyMode, restartLazyMode t.Logf("Test mode: initial_lazy=%t, restart_lazy=%t", initialLazyMode, restartLazyMode) // Get JWT secrets and setup common components first - jwtSecret, fullNodeJwtSecret, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, true) + jwtSecret, fullNodeJwtSecret, genesisHash, endpoints, _ := setupCommonEVMTest(t, sut, true) // Setup sequencer based on initial mode if initialLazyMode { From fb545051220b5bea78687e37838947ae5537a5cf Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 19 Feb 2026 11:23:54 +0000 Subject: [PATCH 11/11] chore: fix indentation --- test/e2e/failover_e2e_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/failover_e2e_test.go b/test/e2e/failover_e2e_test.go index a7517ca694..7f7bf18ec7 100644 --- a/test/e2e/failover_e2e_test.go +++ b/test/e2e/failover_e2e_test.go @@ -54,7 +54,7 @@ func TestLeaseFailoverE2E(t *testing.T) { workDir := t.TempDir() // Get JWT secrets and setup common components first - jwtSecret, fullNodeJwtSecret, genesisHash, testEndpoints, _ := setupCommonEVMTest(t, sut, true) + jwtSecret, fullNodeJwtSecret, genesisHash, testEndpoints, _ := setupCommonEVMTest(t, sut, true) rethFn := evmtest.SetupTestRethNode(t) jwtSecret3 := rethFn.JWTSecretHex() fnInfo, err := rethFn.GetNetworkInfo(context.Background()) @@ -253,7 +253,7 @@ func TestHASequencerRollingRestartE2E(t *testing.T) { workDir := t.TempDir() // Get JWT secrets and setup common components first - jwtSecret, fullNodeJwtSecret, genesisHash, testEndpoints, _ := setupCommonEVMTest(t, sut, true) + jwtSecret, fullNodeJwtSecret, genesisHash, testEndpoints, _ := setupCommonEVMTest(t, sut, true) rethFn := evmtest.SetupTestRethNode(t) jwtSecret3 := rethFn.JWTSecretHex() fnInfo, err := rethFn.GetNetworkInfo(context.Background())