Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
17fcc48
feat(sequencer): catchup from base
julienrbrt Feb 9, 2026
c963984
fetch DA height
julienrbrt Feb 9, 2026
3e93c9a
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 10, 2026
b01ab73
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 10, 2026
42f0405
cl
julienrbrt Feb 10, 2026
014510b
align timestamping
julienrbrt Feb 10, 2026
98e546c
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 11, 2026
a30ea68
updates
julienrbrt Feb 11, 2026
90b5ee9
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 13, 2026
bc630c2
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 16, 2026
97f055f
updates
julienrbrt Feb 16, 2026
9288b29
fixes
julienrbrt Feb 16, 2026
325f1fc
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 17, 2026
dfdcfb2
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 17, 2026
bef0bef
rm test
julienrbrt Feb 17, 2026
2f05cb9
ai test
julienrbrt Feb 18, 2026
728bd4c
cleanup
julienrbrt Feb 18, 2026
d7b8b2c
fixes
julienrbrt Feb 18, 2026
489fc33
bump
julienrbrt Feb 18, 2026
551c818
base sequencer don't sign
julienrbrt Feb 18, 2026
53dde2d
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 18, 2026
9f83aec
fill in empty blocks
julienrbrt Feb 19, 2026
a83c1ea
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 19, 2026
bfe40c7
fix test
julienrbrt Feb 19, 2026
0d9c0ff
cherry-pick part of #2963
julienrbrt Feb 19, 2026
555f99a
updates
julienrbrt Feb 19, 2026
255c891
improvements
julienrbrt Feb 19, 2026
5e78832
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 19, 2026
8f1217a
Merge branch 'main' into julien/catchup-base
julienrbrt Feb 19, 2026
0ee3484
fix build
julienrbrt Feb 19, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Added

- Add disaster recovery for sequencer
- Catch up possible DA-only blocks when restarting. [#3057](https://github.com/evstack/ev-node/pull/3057)
- Verify DA and P2P state on restart (prevent double-signing). [#3061](https://github.com/evstack/ev-node/pull/3061)
- Node pruning support. [#2984](https://github.com/evstack/ev-node/pull/2984)
- Two different sort of pruning implemented:
_Classic pruning_ (`all`): prunes given `HEAD-n` blocks from the databases, including store metadatas.
Expand Down
4 changes: 4 additions & 0 deletions apps/evm/server/force_inclusion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,10 @@ func (m *mockDA) HasForcedInclusionNamespace() bool {
return true
}

func (m *mockDA) GetLatestDAHeight(_ context.Context) (uint64, error) {
return 0, nil
}

func TestForceInclusionServer_handleSendRawTransaction_Success(t *testing.T) {
testHeight := uint64(100)

Expand Down
17 changes: 17 additions & 0 deletions block/internal/da/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,23 @@ func (c *client) Retrieve(ctx context.Context, height uint64, namespace []byte)
}
}

// GetLatestDAHeight returns the latest height available on the DA layer by
// querying the network head.
func (c *client) GetLatestDAHeight(ctx context.Context) (uint64, error) {
headCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout)
defer cancel()

header, err := c.headerAPI.NetworkHead(headCtx)
if err != nil {
return 0, fmt.Errorf("failed to get DA network head: %w", err)
}
if header == nil {
return 0, fmt.Errorf("DA network head returned nil header")
}

return header.Height, nil
}

// RetrieveForcedInclusion retrieves blobs from the forced inclusion namespace at the specified height.
func (c *client) RetrieveForcedInclusion(ctx context.Context, height uint64) datypes.ResultRetrieve {
if !c.hasForcedNamespace {
Expand Down
3 changes: 3 additions & 0 deletions block/internal/da/forced_inclusion_retriever.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,9 @@ func (r *forcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context

if result.Code == datypes.StatusNotFound {
r.logger.Debug().Uint64("height", h).Msg("no forced inclusion blobs at height")
syncFetchedBlocks[h] = &BlockData{
Timestamp: result.Timestamp,
}
continue
}

Expand Down
3 changes: 3 additions & 0 deletions block/internal/da/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ type Client interface {
// Get retrieves blobs by their IDs. Used for visualization and fetching specific blobs.
Get(ctx context.Context, ids []datypes.ID, namespace []byte) ([]datypes.Blob, error)

// GetLatestDAHeight returns the latest height available on the DA layer..
GetLatestDAHeight(ctx context.Context) (uint64, error)

// Namespace accessors.
GetHeaderNamespace() []byte
GetDataNamespace() []byte
Expand Down
14 changes: 14 additions & 0 deletions block/internal/da/tracing.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,20 @@ func (t *tracedClient) Validate(ctx context.Context, ids []datypes.ID, proofs []
return res, nil
}

func (t *tracedClient) GetLatestDAHeight(ctx context.Context) (uint64, error) {
ctx, span := t.tracer.Start(ctx, "DA.GetLatestDAHeight")
defer span.End()

height, err := t.inner.GetLatestDAHeight(ctx)
if err != nil {
span.RecordError(err)
span.SetStatus(codes.Error, err.Error())
return 0, err
}
span.SetAttributes(attribute.Int64("da.latest_height", int64(height)))
return height, nil
}

func (t *tracedClient) GetHeaderNamespace() []byte { return t.inner.GetHeaderNamespace() }
func (t *tracedClient) GetDataNamespace() []byte { return t.inner.GetDataNamespace() }
func (t *tracedClient) GetForcedInclusionNamespace() []byte {
Expand Down
9 changes: 5 additions & 4 deletions block/internal/da/tracing_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,11 @@ func (m *mockFullClient) Validate(ctx context.Context, ids []datypes.ID, proofs
}
return nil, nil
}
func (m *mockFullClient) GetHeaderNamespace() []byte { return []byte{0x01} }
func (m *mockFullClient) GetDataNamespace() []byte { return []byte{0x02} }
func (m *mockFullClient) GetForcedInclusionNamespace() []byte { return []byte{0x03} }
func (m *mockFullClient) HasForcedInclusionNamespace() bool { return true }
func (m *mockFullClient) GetLatestDAHeight(_ context.Context) (uint64, error) { return 0, nil }
func (m *mockFullClient) GetHeaderNamespace() []byte { return []byte{0x01} }
func (m *mockFullClient) GetDataNamespace() []byte { return []byte{0x02} }
func (m *mockFullClient) GetForcedInclusionNamespace() []byte { return []byte{0x03} }
func (m *mockFullClient) HasForcedInclusionNamespace() bool { return true }

// setup a tracer provider + span recorder
func setupDATrace(t *testing.T, inner FullClient) (FullClient, *tracetest.SpanRecorder) {
Expand Down
15 changes: 9 additions & 6 deletions block/internal/executing/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -774,12 +774,15 @@ func (e *Executor) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, hea

// ValidateBlock validates the created block.
func (e *Executor) ValidateBlock(_ context.Context, lastState types.State, header *types.SignedHeader, data *types.Data) error {
// Set custom verifier for aggregator node signature
header.SetCustomVerifierForAggregator(e.options.AggregatorNodeSignatureBytesProvider)

// Basic header validation
if err := header.ValidateBasic(); err != nil {
return fmt.Errorf("invalid header: %w", err)
if e.config.Node.BasedSequencer {
if err := header.Header.ValidateBasic(); err != nil {
return fmt.Errorf("invalid header: %w", err)
}
} else {
header.SetCustomVerifierForAggregator(e.options.AggregatorNodeSignatureBytesProvider)
if err := header.ValidateBasic(); err != nil {
return fmt.Errorf("invalid header: %w", err)
}
}

return lastState.AssertValidForNextState(header, data)
Expand Down
2 changes: 1 addition & 1 deletion block/internal/syncing/block_syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,5 +21,5 @@ type BlockSyncer interface {
ValidateBlock(ctx context.Context, currState types.State, data *types.Data, header *types.SignedHeader) error

// VerifyForcedInclusionTxs verifies that forced inclusion transactions are properly handled.
VerifyForcedInclusionTxs(ctx context.Context, currentState types.State, data *types.Data) error
VerifyForcedInclusionTxs(ctx context.Context, daHeight uint64, data *types.Data) error
}
73 changes: 61 additions & 12 deletions block/internal/syncing/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -748,9 +748,18 @@ func (s *Syncer) TrySyncNextBlock(ctx context.Context, event *common.DAHeightEve
return err
}

// Verify forced inclusion transactions if configured
if event.Source == common.SourceDA {
if err := s.VerifyForcedInclusionTxs(ctx, currentState, data); err != nil {
// Verify forced inclusion transactions if configured.
// The checks is actually only performed on DA only enabled nodes, or P2P nodes catching up with the HEAD.
// P2P nodes at HEAD aren't actually able to verify forced inclusions txs as DA inclusion happens later (so DA hints are not available). This is a known limitation described in the ADR.
if event.Source == common.SourceDA || event.DaHeightHints != [2]uint64{0, 0} {
currentDAHeight := currentState.DAHeight
if event.DaHeightHints[0] > currentDAHeight {
currentDAHeight = event.DaHeightHints[0]
} else if event.DaHeightHints[1] > currentDAHeight {
currentDAHeight = event.DaHeightHints[1]
}

if err := s.VerifyForcedInclusionTxs(ctx, currentDAHeight, data); err != nil {
s.logger.Error().Err(err).Uint64("height", nextHeight).Msg("forced inclusion verification failed")
if errors.Is(err, errMaliciousProposer) {
// remove header as da included from cache
Expand All @@ -770,9 +779,49 @@ func (s *Syncer) TrySyncNextBlock(ctx context.Context, event *common.DAHeightEve

// Update DA height if needed
// This height is only updated when a height is processed from DA as P2P
// events do not contain DA height information
// events do not contain DA height information.
//
// When a sequencer restarts after extended downtime, it produces "catch-up"
// blocks containing forced inclusion transactions from missed DA epochs and
// submits them to DA at the current (much higher) DA height. This creates a
// gap between the state's DAHeight (tracking forced inclusion epoch progress)
// and event.DaHeight (the DA submission height).
//
// If we jump state.DAHeight directly to event.DaHeight, subsequent calls to
// VerifyForcedInclusionTxs would check the wrong epoch (the submission epoch
// instead of the next forced-inclusion epoch), causing valid catch-up blocks
// to be incorrectly flagged as malicious.
//
// To handle this, when the gap exceeds one DA epoch, we advance DAHeight by
// exactly one epoch per block. This lets the forced inclusion verifier check
// the correct epoch for each catch-up block. Once the sequencer finishes
// catching up and the gap closes, DAHeight converges to event.DaHeight.
if event.DaHeight > newState.DAHeight {
newState.DAHeight = event.DaHeight
epochSize := s.genesis.DAEpochForcedInclusion
gap := event.DaHeight - newState.DAHeight

if epochSize > 0 && gap > epochSize {
// Large gap detected — likely catch-up blocks from a restarted sequencer.
// Advance DAHeight by one epoch to keep forced inclusion verification
// aligned with the epoch the sequencer is replaying.
_, epochEnd, _ := types.CalculateEpochBoundaries(
newState.DAHeight, s.genesis.DAStartHeight, epochSize,
)
nextEpochStart := epochEnd + 1
if nextEpochStart > event.DaHeight {
// Shouldn't happen, but clamp to event.DaHeight as a safety net.
nextEpochStart = event.DaHeight
}
s.logger.Debug().
Uint64("current_da_height", newState.DAHeight).
Uint64("event_da_height", event.DaHeight).
Uint64("advancing_to", nextEpochStart).
Uint64("gap", gap).
Msg("large DA height gap detected (sequencer catch-up), advancing DA height by one epoch")
newState.DAHeight = nextEpochStart
} else {
newState.DAHeight = event.DaHeight
}
}

batch, err := s.store.NewBatch(ctx)
Expand Down Expand Up @@ -971,7 +1020,7 @@ func (s *Syncer) getEffectiveGracePeriod() uint64 {
// Note: Due to block size constraints (MaxBytes), sequencers may defer forced inclusion transactions
// to future blocks (smoothing). This is legitimate behavior within an epoch.
// However, ALL forced inclusion txs from an epoch MUST be included before the next epoch begins or grace boundary (whichever comes later).
func (s *Syncer) VerifyForcedInclusionTxs(ctx context.Context, currentState types.State, data *types.Data) error {
func (s *Syncer) VerifyForcedInclusionTxs(ctx context.Context, daHeight uint64, data *types.Data) error {
if s.fiRetriever == nil {
return nil
}
Expand All @@ -981,7 +1030,7 @@ func (s *Syncer) VerifyForcedInclusionTxs(ctx context.Context, currentState type
s.updateDynamicGracePeriod(blockFullness)

// Retrieve forced inclusion transactions from DA for current epoch
forcedIncludedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentState.DAHeight)
forcedIncludedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, daHeight)
if err != nil {
if errors.Is(err, da.ErrForceInclusionNotConfigured) {
s.logger.Debug().Msg("forced inclusion namespace not configured, skipping verification")
Expand Down Expand Up @@ -1068,10 +1117,10 @@ func (s *Syncer) VerifyForcedInclusionTxs(ctx context.Context, currentState type
effectiveGracePeriod := s.getEffectiveGracePeriod()
graceBoundary := pending.EpochEnd + (effectiveGracePeriod * s.genesis.DAEpochForcedInclusion)

if currentState.DAHeight > graceBoundary {
if daHeight > graceBoundary {
maliciousTxs = append(maliciousTxs, pending)
s.logger.Warn().
Uint64("current_da_height", currentState.DAHeight).
Uint64("current_da_height", daHeight).
Uint64("epoch_end", pending.EpochEnd).
Uint64("grace_boundary", graceBoundary).
Uint64("base_grace_periods", s.gracePeriodConfig.basePeriod).
Expand All @@ -1081,7 +1130,7 @@ func (s *Syncer) VerifyForcedInclusionTxs(ctx context.Context, currentState type
Msg("forced inclusion transaction past grace boundary - marking as malicious")
} else {
remainingPending = append(remainingPending, pending)
if currentState.DAHeight > pending.EpochEnd {
if daHeight > pending.EpochEnd {
txsInGracePeriod++
}
}
Expand All @@ -1105,7 +1154,7 @@ func (s *Syncer) VerifyForcedInclusionTxs(ctx context.Context, currentState type
effectiveGracePeriod := s.getEffectiveGracePeriod()
s.logger.Error().
Uint64("height", data.Height()).
Uint64("current_da_height", currentState.DAHeight).
Uint64("current_da_height", daHeight).
Int("malicious_count", len(maliciousTxs)).
Uint64("base_grace_periods", s.gracePeriodConfig.basePeriod).
Uint64("effective_grace_periods", effectiveGracePeriod).
Expand All @@ -1125,7 +1174,7 @@ func (s *Syncer) VerifyForcedInclusionTxs(ctx context.Context, currentState type

s.logger.Info().
Uint64("height", data.Height()).
Uint64("da_height", currentState.DAHeight).
Uint64("da_height", daHeight).
Uint64("epoch_start", forcedIncludedTxsEvent.StartDaHeight).
Uint64("epoch_end", forcedIncludedTxsEvent.EndDaHeight).
Int("included_count", includedCount).
Expand Down
Loading
Loading