Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
56 commits
Select commit Hold shift + click to select a range
97ef36d
persist: replace file-per-entry with WAL-based persistence
wen-coding Mar 9, 2026
b3dd4a8
persist: refactor block/commitqc WAL into generic indexedWAL
wen-coding Mar 9, 2026
953cc9e
persist: fix errcheck lint warnings in blocks.go
wen-coding Mar 9, 2026
62c71b8
persist: fix gofmt alignment in wal_test.go
wen-coding Mar 9, 2026
5ce67a7
persist: skip non-lane directories in blocks dir with warning
wen-coding Mar 10, 2026
ed71b43
persist: use Option[LaneID] instead of *LaneID in loadAll
wen-coding Mar 10, 2026
fd5ed2c
persist: derive lane ID from directory name, keep empty WALs open
wen-coding Mar 10, 2026
4276d38
persist: add TestEmptyLaneWALSurvivesReopen
wen-coding Mar 10, 2026
a305f9f
persist: decode lane ID once and add tests for skip paths
wen-coding Mar 10, 2026
d936cb8
persist: replace Replay with ReadAll, remove unnecessary sort
wen-coding Mar 10, 2026
3a0719f
persist: add defense-in-depth checks for WAL index mapping
wen-coding Mar 10, 2026
81c53ff
fix: update state_test to expect write-time sequence check
wen-coding Mar 10, 2026
1dd2020
persist: simplify lazy lane WAL creation in PersistBlock
wen-coding Mar 10, 2026
4b89979
persist: detect gaps in loadAll for blocks and commitQCs
wen-coding Mar 10, 2026
21c995d
persist: demote per-block load log to DEBUG
wen-coding Mar 10, 2026
6c1d172
persist: document why blocks use per-lane WALs
wen-coding Mar 10, 2026
453906d
persist: remove noop flag, use Option for dir/iw instead
wen-coding Mar 10, 2026
73e3633
persist: simplify Close, consolidate loadAll log, drop slog import
wen-coding Mar 10, 2026
0441000
persist: extract directory name string literals into constants
wen-coding Mar 10, 2026
412bc56
persist: handle anchor past all persisted entries, remove ResetNext
wen-coding Mar 10, 2026
69be223
persist: add TODO for WAL Clear/TruncateAll method
wen-coding Mar 10, 2026
c126392
persist: fix crash-recovery bug in CommitQC DeleteBefore with empty WAL
wen-coding Mar 10, 2026
d3b515f
persist: simplify startup re-persist to only anchor's CommitQC
wen-coding Mar 10, 2026
eca2ad6
persist: review feedback — disable fsync, harden ReadAll, fix Close
wen-coding Mar 11, 2026
7d7baf2
persist: use TruncateAll instead of close-remove-reopen for Reset
wen-coding Mar 11, 2026
14d802f
fix stale "reset" references in test comments to say "TruncateAll"/"t…
wen-coding Mar 11, 2026
fe19830
Remove logger arg from NewWAL call after upstream API change
wen-coding Mar 12, 2026
7056256
Add time-based retention for stale lane WAL deletion
wen-coding Mar 13, 2026
f9f4af3
Derive commitQC WAL truncation point from anchor
wen-coding Mar 15, 2026
017da34
Add no-op persister comments to blocks.go and commitqcs.go
wen-coding Mar 15, 2026
9df7f9f
Unexport Close() on BlockPersister and CommitQCPersister
wen-coding Mar 16, 2026
0c2c2bd
Persist block WAL writes concurrently by lane
wen-coding Mar 16, 2026
0365abb
Persist CommitQCs concurrently with block writes
wen-coding Mar 16, 2026
05788cf
Enable fsync on block and commitQC WAL writes
wen-coding Mar 16, 2026
273ab80
Remove stale lane removal — defer to dynamic committee
wen-coding Mar 17, 2026
42595a5
Remove stale comments about DeleteBefore removing missing lanes
wen-coding Mar 17, 2026
8788096
Fix stale comments about DeleteBefore and persistence durability
wen-coding Mar 17, 2026
35bb0d8
Replace errgroup with scope.Parallel for concurrent persistence
wen-coding Mar 18, 2026
1fa7ae3
Add comment explaining why MaybeCreateLane is called outside PersistB…
wen-coding Mar 18, 2026
4adc9a8
Parallelize per-lane truncation in DeleteBefore
wen-coding Mar 18, 2026
bef0d5b
Clean up test style: range-integer loops and TestDiff with Option
wen-coding Mar 18, 2026
83a43a9
Clarify PARALLEL-SAFE comment: safety is from lane isolation, not lw.…
wen-coding Mar 18, 2026
de6b380
Review fixes: document AllowEmpty invariant, wrap errors, fix style nits
wen-coding Mar 18, 2026
21664a4
Re-anchor nextBlockNum after TruncateAll in DeleteBefore
wen-coding Mar 18, 2026
810b7ba
Enforce contiguity after TruncateAll and add missing edge-case tests
wen-coding Mar 18, 2026
4281811
Harden WAL: FirstIdx() accessor, bounds check, idempotent close, test…
wen-coding Mar 18, 2026
3b0adaf
Remove stale comment about lane retention timeouts and fix gofmt alig…
wen-coding Mar 18, 2026
03dc013
Make BlockPersister and CommitQCPersister internally thread-safe
wen-coding Mar 19, 2026
c1abcde
Document DeleteBefore/PersistBlock ordering constraint on same lane
wen-coding Mar 19, 2026
e9c9b7a
Move anchor CommitQC re-persist into DeleteBefore as implementation d…
wen-coding Mar 19, 2026
f5b3e0f
Merge branch 'main' into wen/use_wal_for_persistence
wen-coding Mar 19, 2026
6b8dc6e
Signal block/commitQC persistence per-entry instead of per-batch
wen-coding Mar 20, 2026
3aa615f
Derive block prune cursors from CommitQC instead of precomputing lane…
wen-coding Mar 20, 2026
270fdc3
Use utils.Mutex/RWMutex for explicit mutex protection in persisters
wen-coding Mar 20, 2026
09664f2
Return error from getOrCreateLane on no-op persister instead of nil
wen-coding Mar 20, 2026
c22fbd4
Merge branch 'main' into wen/use_wal_for_persistence
wen-coding Mar 20, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions sei-tendermint/internal/autobahn/avail/inner.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@ import (
"github.com/sei-protocol/sei-chain/sei-tendermint/libs/utils"
)

// TODO: when dynamic committee changes are supported, newly joined members
// must be added to blocks, votes, nextBlockToPersist, and persistedBlockStart.
// Currently all four are initialized once in newInner from c.Lanes().All().
// BlockPersister creates lane WALs lazily inside PersistBlock, but the new
// member must also appear in inner.blocks before the next persist cycle.
type inner struct {
latestAppQC utils.Option[*types.AppQC]
latestCommitQC utils.AtomicSend[utils.Option[*types.CommitQC]]
Expand All @@ -23,8 +28,10 @@ type inner struct {
// reconstructed from the blocks already on disk (see newInner).
//
// TODO: consider giving this its own AtomicSend to avoid waking unrelated
// inner waiters (PushVote, PushCommitQC, etc.) on every markBlockPersisted
// call. Only RecvBatch needs to be notified of cursor changes;
// inner waiters (PushVote, PushCommitQC, etc.) on markBlockPersisted calls.
// Now that blocks are persisted concurrently by lane (one notification per
// lane per batch, not per block), the frequency is lower, but still not
// ideal. Only RecvBatch needs to be notified of cursor changes;
// collectPersistBatch is in the same goroutine and reads it directly.
nextBlockToPersist map[types.LaneID]types.BlockNumber

Expand Down
129 changes: 75 additions & 54 deletions sei-tendermint/internal/autobahn/avail/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,20 +157,18 @@ func NewState(key types.SecretKey, data *data.State, stateDir utils.Option[strin
return nil, err
}

// Delete files below the prune anchor that were filtered out by
// loadPersistedState. Also reset the CommitQC persister's cursor to
// match the post-prune range.
laneFirsts := make(map[types.LaneID]types.BlockNumber, len(inner.blocks))
for lane, q := range inner.blocks {
laneFirsts[lane] = q.first
}
if err := pers.blocks.DeleteBefore(laneFirsts); err != nil {
return nil, fmt.Errorf("prune stale block files: %w", err)
}
if err := pers.commitQCs.DeleteBefore(inner.commitQCs.first); err != nil {
return nil, fmt.Errorf("prune stale commitQC files: %w", err)
// Truncate WAL entries below the prune anchor that were filtered out by
// loadPersistedState.
if ls, ok := loaded.Get(); ok {
if anchor, ok := ls.pruneAnchor.Get(); ok {
if err := pers.blocks.DeleteBefore(anchor.CommitQC); err != nil {
return nil, fmt.Errorf("prune stale block WAL entries: %w", err)
}
if err := pers.commitQCs.DeleteBefore(anchor.CommitQC.Proposal().Index(), utils.Some(anchor.CommitQC)); err != nil {
return nil, fmt.Errorf("prune stale commitQC WAL entries: %w", err)
}
}
}
pers.commitQCs.ResetNext(inner.commitQCs.next)

return &State{
key: key,
Expand Down Expand Up @@ -631,18 +629,20 @@ func (s *State) Run(ctx context.Context) error {

// runPersist is the main loop for the persist goroutine.
// Write order:
// 1. Prune anchor (AppQC + CommitQC pair) — the crash-recovery watermark.
// 2. CommitQCs in order, then publish LastCommitQC immediately
// so consensus can advance without waiting for block writes.
// 3. Blocks per lane in order, markBlockPersisted after each.
// 4. Prune old blocks and CommitQCs.
// 1. Prune anchor (AppQC + CommitQC pair) — the crash-recovery watermark —
// followed by WAL truncation of blocks and CommitQCs. Truncation is
// co-located with the anchor write so the pruning point is derived
// directly from the anchor's CommitQC. Truncation must happen before
// writes because the WAL requires contiguous indices — if the anchor
// advanced past all persisted entries, DeleteBefore resets the WAL so
// new writes start clean.
// 2. CommitQCs and blocks concurrently via scope.Parallel: one goroutine for
// CommitQCs, one goroutine per lane for blocks (sequential within each
// lane). Each goroutine publishes its result (markCommitQCsPersisted /
// markBlockPersisted) per entry so voting unblocks ASAP.
//
// The prune anchor is a pruning watermark: on restart we resume from it.
//
// Blocks are persisted one at a time with inner.nextBlockToPersist
// updated after each write, so vote latency equals single-block write
// time regardless of batch size.
//
// TODO: use a single WAL for anchor and CommitQCs to make
// this atomic rather than relying on write order.
func (s *State) runPersist(ctx context.Context, pers persisters) error {
Expand All @@ -654,51 +654,74 @@ func (s *State) runPersist(ctx context.Context, pers persisters) error {
}

// 1. Persist prune anchor first — establishes the crash-recovery watermark.
// All WAL pruning is co-located here so the truncation point is
// derived directly from the anchor, making the safety invariant
// explicit: we only truncate entries the on-disk anchor covers.
// Block WAL pruning must happen before writes because the WAL
// requires contiguous indices — if the anchor advanced past all
// persisted entries, DeleteBefore resets the WAL so new writes
// start clean.
if anchor, ok := batch.pruneAnchor.Get(); ok {
if err := pers.pruneAnchor.Persist(PruneAnchorConv.Encode(anchor)); err != nil {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

afaiu pruneAnchor will still use the 2-file persister. Is this supposed to stay like this? If so, then we perhaps need to productionize the 2-file persister more - like adding a checksum in the file would be nice.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I actually did that on the first draft, but felt WAL is not the best fit here. We only need one most recent snapshot at all times, there is no history at all.
If you agree with that conclusion, I could of course productionize the 2-file persister more, but I feel that should be its own PR, because this PR is mostly about WAL replacing local files.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

return fmt.Errorf("persist prune anchor: %w", err)
}
s.advancePersistedBlockStart(anchor.CommitQC)
lastPersistedAppQCNext = anchor.CommitQC.Proposal().Index() + 1
}

// 2. Persist new CommitQCs, then publish immediately so consensus
// can advance without waiting for block writes or pruning.
for _, qc := range batch.commitQCs {
if err := pers.commitQCs.PersistCommitQC(qc); err != nil {
return fmt.Errorf("persist commitqc %d: %w", qc.Index(), err)
if err := pers.blocks.DeleteBefore(anchor.CommitQC); err != nil {
return fmt.Errorf("block deleteBefore: %w", err)
}
if err := pers.commitQCs.DeleteBefore(anchor.CommitQC.Proposal().Index(), utils.Some(anchor.CommitQC)); err != nil {
Copy link
Contributor

@pompon0 pompon0 Mar 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: I know we are kind of optimizing in the void without benchmarks, but there are still some disk writes here which are unnecessarily sequential: commitQCs.DeleteBefore can be parallel to blocks.DeleteBefore. CommitQCs.persist can be parallel to blocks.DeleteBefore. In fact each DeleteBefore/persist for each WAL is independent and only Anchor write needs to happen sequentially to everything else. Also we might want to persist first, then do DeleteBefore. Also DeleteBefore doesn't require fsync (I'm not sure how WAL behaves there).

Copy link
Contributor Author

@wen-coding wen-coding Mar 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In blocks, we are mainly doing DeleteBefore before persisting for the "after the anchor, the commitQC must be contiguous check" as a defense in depth. If you think that is unnecessary and guaranteed by caller, I can remove it. But then one screw up and our indices calculation would be wrong. (Of course I can also maintain a map of WAL index to commitQC RoadIndex and discard the late commitQC which tried to fill a hole.)
What do you think?

return fmt.Errorf("commitqc deleteBefore: %w", err)
}
}
if len(batch.commitQCs) > 0 {
s.markCommitQCsPersisted(batch.commitQCs[len(batch.commitQCs)-1])
}

// 3. Persist blocks (mark each individually for vote latency).
// 2. Persist CommitQCs and blocks concurrently. CommitQCs go in
// one goroutine; blocks fan out one goroutine per lane (sequential
// within each lane to preserve block-number ordering). Each
// goroutine publishes its result (markCommitQCsPersisted /
// markBlockPersisted) as soon as it finishes.
blocksByLane := make(map[types.LaneID][]*types.Signed[*types.LaneProposal])
for _, proposal := range batch.blocks {
h := proposal.Msg().Block().Header()
if err := pers.blocks.PersistBlock(proposal); err != nil {
return fmt.Errorf("persist block %s/%d: %w", h.Lane(), h.BlockNumber(), err)
lane := proposal.Msg().Block().Header().Lane()
blocksByLane[lane] = append(blocksByLane[lane], proposal)
}
if err := scope.Parallel(func(ps scope.ParallelScope) error {
if len(batch.commitQCs) > 0 {
ps.Spawn(func() error {
for _, qc := range batch.commitQCs {
if err := pers.commitQCs.PersistCommitQC(qc); err != nil {
return fmt.Errorf("persist commitqc %d: %w", qc.Index(), err)
}
s.markCommitQCsPersisted(qc)
}
return nil
})
}
s.markBlockPersisted(h.Lane(), h.BlockNumber()+1)
}

// 4. Prune old data.
if err := pers.blocks.DeleteBefore(batch.laneFirsts); err != nil {
return fmt.Errorf("block deleteBefore: %w", err)
}
if err := pers.commitQCs.DeleteBefore(batch.commitQCFirst); err != nil {
return fmt.Errorf("commitqc deleteBefore: %w", err)
for lane, proposals := range blocksByLane {
ps.Spawn(func() error {
for _, p := range proposals {
if err := pers.blocks.PersistBlock(p); err != nil {
h := p.Msg().Block().Header()
return fmt.Errorf("persist block %s/%d: %w", h.Lane(), h.BlockNumber(), err)
}
s.markBlockPersisted(lane, p.Msg().Block().Header().BlockNumber()+1)
}
return nil
})
}
return nil
}); err != nil {
return err
}
}
}

// persistBatch holds the data collected under lock for one persist iteration.
type persistBatch struct {
blocks []*types.Signed[*types.LaneProposal]
commitQCs []*types.CommitQC
pruneAnchor utils.Option[*PruneAnchor]
laneFirsts map[types.LaneID]types.BlockNumber
commitQCFirst types.RoadIndex
blocks []*types.Signed[*types.LaneProposal]
commitQCs []*types.CommitQC
pruneAnchor utils.Option[*PruneAnchor]
}

// advancePersistedBlockStart updates the per-lane block admission watermark
Expand All @@ -717,8 +740,9 @@ func (s *State) advancePersistedBlockStart(commitQC *types.CommitQC) {
}

// markBlockPersisted advances the per-lane block persistence cursor.
// Called after each individual block write so that RecvBatch (and therefore
// voting) unblocks with single-block latency regardless of batch size.
// Called after each block is persisted so that RecvBatch (and therefore
// voting) can unblock as soon as the block is durable. Safe for concurrent
// callers (acquires s.inner lock internally).
func (s *State) markBlockPersisted(lane types.LaneID, next types.BlockNumber) {
for inner, ctrl := range s.inner.Lock() {
inner.nextBlockToPersist[lane] = next
Expand Down Expand Up @@ -759,16 +783,13 @@ func (s *State) collectPersistBatch(ctx context.Context, lastPersistedAppQCNext
}); err != nil {
return b, err
}
b.laneFirsts = make(map[types.LaneID]types.BlockNumber, len(inner.blocks))
for lane, q := range inner.blocks {
start := max(inner.nextBlockToPersist[lane], q.first)
for n := start; n < q.next; n++ {
b.blocks = append(b.blocks, q.q[n])
}
b.laneFirsts[lane] = q.first
}
commitQCNext = max(commitQCNext, inner.commitQCs.first)
b.commitQCFirst = inner.commitQCs.first
for n := commitQCNext; n < inner.commitQCs.next; n++ {
b.commitQCs = append(b.commitQCs, inner.commitQCs.q[n])
}
Expand Down
122 changes: 104 additions & 18 deletions sei-tendermint/internal/autobahn/avail/state_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -291,9 +291,10 @@ func TestStateRestartFromPersisted(t *testing.T) {
wantAppQCIdx = appProposal.RoadIndex()
}

// Wait for persistence to complete. markCommitQCsPersisted fires
// after all blocks, commitQCs, the prune anchor, and cleanup in the
// batch are on disk, so this confirms all data is durable.
// Wait for commitQC persistence. markCommitQCsPersisted fires after
// all commitQCs in the batch are on disk. Block goroutines may still
// be in flight, but g.Wait() in runPersist ensures they complete
// before the next batch, so the data is durable by scope exit.
if err := state.waitForCommitQC(ctx, wantAppQCIdx); err != nil {
return fmt.Errorf("waitForCommitQC: %w", err)
}
Expand Down Expand Up @@ -582,9 +583,7 @@ func TestNewStateWithPersistence(t *testing.T) {
// All 3 commitQCs should be loaded (no AppQC to skip past).
require.Equal(t, types.RoadIndex(0), state.FirstCommitQC())
// LastCommitQC should be set to the last loaded one.
latest, ok := state.LastCommitQC().Load().Get()
require.True(t, ok)
require.NoError(t, utils.TestDiff(qcs[2], latest))
require.NoError(t, utils.TestDiff(utils.Some(qcs[2]), state.LastCommitQC().Load()))
})

t.Run("loads persisted commitQCs with AppQC", func(t *testing.T) {
Expand Down Expand Up @@ -622,14 +621,11 @@ func TestNewStateWithPersistence(t *testing.T) {

// inner.prune(appQC@1, commitQC@1) sets commitQCs.first = 1.
require.Equal(t, types.RoadIndex(1), state.FirstCommitQC())
latest, ok := state.LastCommitQC().Load().Get()
require.True(t, ok)
require.NoError(t, utils.TestDiff(qcs[4], latest))
require.NoError(t, utils.TestDiff(utils.Some(qcs[4]), state.LastCommitQC().Load()))
})

t.Run("non-contiguous commitQC files return error", func(t *testing.T) {
dir := t.TempDir()
ds := data.NewState(&data.Config{Committee: committee}, utils.None[data.BlockStore]())

// Build 6 sequential CommitQCs (indices 0-5).
allQCs := make([]*types.CommitQC, 6)
Expand All @@ -649,19 +645,109 @@ func TestNewStateWithPersistence(t *testing.T) {
CommitQc: types.CommitQCConv.Encode(allQCs[0]),
}))

// Persist QCs 0, 1, 2 contiguously, then skip to 5 (simulating
// corruption or manual tampering). Since the anchor is persisted
// first, a gap should never occur normally — treat it as an error.
// Persist QCs 0, 1, 2 contiguously, then try to skip to 5.
// PersistCommitQC enforces strict sequential order, so the gap
// is caught at write time rather than at load time.
cp, _, err := persist.NewCommitQCPersister(utils.Some(dir))
require.NoError(t, err)
for i := 0; i < 3; i++ {
for i := range 3 {
require.NoError(t, cp.PersistCommitQC(allQCs[i]))
}
require.NoError(t, cp.PersistCommitQC(allQCs[5]))

_, err = NewState(keys[0], ds, utils.Some(dir))
err = cp.PersistCommitQC(allQCs[5])
require.Error(t, err)
require.Contains(t, err.Error(), "non-contiguous")
require.Contains(t, err.Error(), "out of sequence")
require.NoError(t, cp.Close())
})

t.Run("anchor past all persisted commitQCs truncates WAL", func(t *testing.T) {
dir := t.TempDir()
ds := data.NewState(&data.Config{Committee: committee}, utils.None[data.BlockStore]())

// Build a chain of 10 CommitQCs (indices 0-9).
qcs := make([]*types.CommitQC, 10)
prev := utils.None[*types.CommitQC]()
for i := range qcs {
qcs[i] = makeCommitQC(rng, committee, keys, prev, nil, utils.None[*types.AppQC]())
prev = utils.Some(qcs[i])
}

// Persist only indices 0-4 to the CommitQC WAL.
cp, _, err := persist.NewCommitQCPersister(utils.Some(dir))
require.NoError(t, err)
for i := range 5 {
require.NoError(t, cp.PersistCommitQC(qcs[i]))
}
require.NoError(t, cp.Close())

// Persist a prune anchor at index 9 — well past the persisted range.
appProposal := types.NewAppProposal(50, 9, types.GenAppHash(rng))
appQC := types.NewAppQC(makeAppVotes(keys, appProposal))
prunePers, _, err := persist.NewPersister[*pb.PersistedAvailPruneAnchor](utils.Some(dir), innerFile)
require.NoError(t, err)
require.NoError(t, prunePers.Persist(&pb.PersistedAvailPruneAnchor{
AppQc: types.AppQCConv.Encode(appQC),
CommitQc: types.CommitQCConv.Encode(qcs[9]),
}))

// NewState should succeed: DeleteBefore truncates the stale WAL,
// then the re-persist loop writes the anchor's CommitQC back.
state, err := NewState(keys[0], ds, utils.Some(dir))
require.NoError(t, err)

require.Equal(t, types.RoadIndex(9), state.FirstCommitQC())
require.NoError(t, utils.TestDiff(utils.Some(qcs[9]), state.LastCommitQC().Load()))

got, ok := state.LastAppQC().Get()
require.True(t, ok)
require.Equal(t, types.RoadIndex(9), got.Proposal().RoadIndex())
})

t.Run("anchor past all persisted blocks truncates lane WAL", func(t *testing.T) {
dir := t.TempDir()
ds := data.NewState(&data.Config{Committee: committee}, utils.None[data.BlockStore]())
lane := keys[0].Public()

// Persist commitQCs 0-9 and blocks 0-2 for one lane.
qcs := make([]*types.CommitQC, 10)
prev := utils.None[*types.CommitQC]()
cp, _, err := persist.NewCommitQCPersister(utils.Some(dir))
require.NoError(t, err)
for i := range qcs {
qcs[i] = makeCommitQC(rng, committee, keys, prev, nil, utils.None[*types.AppQC]())
prev = utils.Some(qcs[i])
require.NoError(t, cp.PersistCommitQC(qcs[i]))
}
require.NoError(t, cp.Close())

bp, _, err := persist.NewBlockPersister(utils.Some(dir))
require.NoError(t, err)
var parent types.BlockHeaderHash
for n := types.BlockNumber(0); n < 3; n++ {
block := types.NewBlock(lane, n, parent, types.GenPayload(rng))
signed := types.Sign(keys[0], types.NewLaneProposal(block))
parent = block.Header().Hash()
require.NoError(t, bp.PersistBlock(signed))
}

// Persist a prune anchor at index 9 with a laneRange that starts past
// all persisted blocks — DeleteBefore will TruncateAll the block WAL.
appProposal := types.NewAppProposal(50, 9, types.GenAppHash(rng))
appQC := types.NewAppQC(makeAppVotes(keys, appProposal))
prunePers, _, err := persist.NewPersister[*pb.PersistedAvailPruneAnchor](utils.Some(dir), innerFile)
require.NoError(t, err)
require.NoError(t, prunePers.Persist(&pb.PersistedAvailPruneAnchor{
AppQc: types.AppQCConv.Encode(appQC),
CommitQc: types.CommitQCConv.Encode(qcs[9]),
}))

// NewState should succeed: block WAL gets truncated, lane starts clean.
state, err := NewState(keys[0], ds, utils.Some(dir))
require.NoError(t, err)

require.Equal(t, types.RoadIndex(9), state.FirstCommitQC())
got, ok := state.LastAppQC().Get()
require.True(t, ok)
require.Equal(t, types.RoadIndex(9), got.Proposal().RoadIndex())
})

t.Run("corrupt AppQC data returns error", func(t *testing.T) {
Expand Down
Loading
Loading