From 2e769b531ed2d0deebd0f5239fc16e94852f02ad Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 3 Dec 2025 21:32:35 +0100 Subject: [PATCH 01/19] refactor(sequencers): persist prepended batch --- sequencers/single/queue.go | 23 +++++++++++++++- sequencers/single/queue_test.go | 49 +++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/sequencers/single/queue.go b/sequencers/single/queue.go index a208aa109b..b9f3ee25a1 100644 --- a/sequencers/single/queue.go +++ b/sequencers/single/queue.go @@ -85,11 +85,32 @@ func (bq *BatchQueue) AddBatch(ctx context.Context, batch coresequencer.Batch) e // Prepend adds a batch to the front of the queue (before head position). // This is used to return transactions that couldn't fit in the current batch. -// TODO(@julienrbrt): The batch is currently NOT persisted to the DB since these are transactions that were already in the queue or were just processed. -- FI txs are lost, this should be tackled. +// The batch is persisted to the DB to ensure durability in case of crashes. func (bq *BatchQueue) Prepend(ctx context.Context, batch coresequencer.Batch) error { bq.mu.Lock() defer bq.mu.Unlock() + hash, err := batch.Hash() + if err != nil { + return err + } + key := hex.EncodeToString(hash) + + pbBatch := &pb.Batch{ + Txs: batch.Transactions, + } + + encodedBatch, err := proto.Marshal(pbBatch) + if err != nil { + return err + } + + // First write to DB for durability + if err := bq.db.Put(ctx, ds.NewKey(key), encodedBatch); err != nil { + return err + } + + // Then add to in-memory queue // If we have room before head, use it if bq.head > 0 { bq.head-- diff --git a/sequencers/single/queue_test.go b/sequencers/single/queue_test.go index b7665ee67f..9c029e779d 100644 --- a/sequencers/single/queue_test.go +++ b/sequencers/single/queue_test.go @@ -720,4 +720,53 @@ func TestBatchQueue_Prepend(t *testing.T) { require.NoError(t, err) assert.Equal(t, []byte("tx1"), nextBatch.Transactions[0]) }) + + t.Run("prepend persistence across restarts", func(t *testing.T) { + prefix := "test-prepend-persistence" + queue := NewBatchQueue(db, prefix, 0) + err := queue.Load(ctx) + require.NoError(t, err) + + // Add some batches + batch1 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx1")}} + batch2 := coresequencer.Batch{Transactions: [][]byte{[]byte("tx2")}} + err = queue.AddBatch(ctx, batch1) + require.NoError(t, err) + err = queue.AddBatch(ctx, batch2) + require.NoError(t, err) + + // Consume first batch + _, err = queue.Next(ctx) + require.NoError(t, err) + + // Prepend a batch (simulating transactions that couldn't fit) + prependedBatch := coresequencer.Batch{Transactions: [][]byte{[]byte("prepended")}} + err = queue.Prepend(ctx, prependedBatch) + require.NoError(t, err) + + assert.Equal(t, 2, queue.Size()) + + // Simulate restart by creating a new queue with same prefix + queue2 := NewBatchQueue(db, prefix, 0) + err = queue2.Load(ctx) + require.NoError(t, err) + + // Should have both the prepended batch and tx2 + assert.Equal(t, 2, queue2.Size()) + + // First should be prepended batch + nextBatch, err := queue2.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 1, len(nextBatch.Transactions)) + assert.Equal(t, []byte("prepended"), nextBatch.Transactions[0]) + + // Then tx2 + nextBatch, err = queue2.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 1, len(nextBatch.Transactions)) + assert.Equal(t, []byte("tx2"), nextBatch.Transactions[0]) + + // Queue should be empty now + assert.Equal(t, 0, queue2.Size()) + }) } From e38887b9e7940b85e4602150329ea0b3c209e6a3 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Wed, 3 Dec 2025 22:58:35 +0100 Subject: [PATCH 02/19] refactor(sequencers): implement tx queue persistance for based seq --- apps/evm/cmd/run.go | 5 +- apps/grpc/cmd/run.go | 5 +- apps/testapp/cmd/run.go | 5 +- proto/evnode/v1/batch.proto | 5 + sequencers/based/queue.go | 328 ++++++++++++++++++++++ sequencers/based/queue_test.go | 419 +++++++++++++++++++++++++++++ sequencers/based/sequencer.go | 76 ++++-- sequencers/based/sequencer_test.go | 95 +++---- types/pb/evnode/v1/batch.pb.go | 54 +++- 9 files changed, 904 insertions(+), 88 deletions(-) create mode 100644 sequencers/based/queue.go create mode 100644 sequencers/based/queue_test.go diff --git a/apps/evm/cmd/run.go b/apps/evm/cmd/run.go index b22599a705..75fadb3cd1 100644 --- a/apps/evm/cmd/run.go +++ b/apps/evm/cmd/run.go @@ -125,7 +125,10 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger, 1000) + if err != nil { + return nil, fmt.Errorf("failed to create based sequencer: %w", err) + } logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). diff --git a/apps/grpc/cmd/run.go b/apps/grpc/cmd/run.go index 942940dde5..eb78a4125b 100644 --- a/apps/grpc/cmd/run.go +++ b/apps/grpc/cmd/run.go @@ -131,7 +131,10 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger, 1000) + if err != nil { + return nil, fmt.Errorf("failed to create based sequencer: %w", err) + } logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index b2035b561d..b9b87e99c8 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -131,7 +131,10 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq := based.NewBasedSequencer(fiRetriever, da, nodeConfig, genesis, logger) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger, 1000) + if err != nil { + return nil, fmt.Errorf("failed to create based sequencer: %w", err) + } logger.Info(). Str("forced_inclusion_namespace", nodeConfig.DA.GetForcedInclusionNamespace()). diff --git a/proto/evnode/v1/batch.proto b/proto/evnode/v1/batch.proto index 85fe466310..c050d7bd5c 100644 --- a/proto/evnode/v1/batch.proto +++ b/proto/evnode/v1/batch.proto @@ -7,3 +7,8 @@ option go_package = "github.com/evstack/ev-node/types/pb/evnode/v1"; message Batch { repeated bytes txs = 1; } + +// Tx is a single transaction. +message Tx { + bytes data = 1; +} diff --git a/sequencers/based/queue.go b/sequencers/based/queue.go new file mode 100644 index 0000000000..63e6d8cad1 --- /dev/null +++ b/sequencers/based/queue.go @@ -0,0 +1,328 @@ +package based + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "sort" + "sync" + + ds "github.com/ipfs/go-datastore" + ktds "github.com/ipfs/go-datastore/keytransform" + "github.com/ipfs/go-datastore/query" + "google.golang.org/protobuf/proto" + + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +// ErrQueueFull is returned when the transaction queue has reached its maximum size +var ErrQueueFull = errors.New("transaction queue is full") + +func newPrefixKV(kvStore ds.Batching, prefix string) ds.Batching { + return ktds.Wrap(kvStore, ktds.PrefixTransform{Prefix: ds.NewKey(prefix)}) +} + +// TxQueue implements a persistent queue for transactions +type TxQueue struct { + queue [][]byte + head int // index of the first element in the queue + maxQueueSize int // maximum number of transactions allowed in queue (0 = unlimited) + mu sync.Mutex + db ds.Batching +} + +// NewTxQueue creates a new TxQueue with the specified maximum size. +// If maxSize is 0, the queue will be unlimited. +func NewTxQueue(db ds.Batching, prefix string, maxSize int) *TxQueue { + return &TxQueue{ + queue: make([][]byte, 0), + head: 0, + maxQueueSize: maxSize, + db: newPrefixKV(db, prefix), + } +} + +// Add adds a new transaction to the queue and writes it to the DB. +// Returns ErrQueueFull if the queue has reached its maximum size. +func (tq *TxQueue) Add(ctx context.Context, tx []byte) error { + tq.mu.Lock() + defer tq.mu.Unlock() + + // Check if queue is full (maxQueueSize of 0 means unlimited) + // Use effective queue size (total length minus processed head items) + effectiveSize := len(tq.queue) - tq.head + if tq.maxQueueSize > 0 && effectiveSize >= tq.maxQueueSize { + return ErrQueueFull + } + + // Generate a unique key for this transaction + // Use a combination of queue position and transaction hash + key := fmt.Sprintf("tx_%d_%s", len(tq.queue), hex.EncodeToString(tx[:min(32, len(tx))])) + + pbTx := &pb.Tx{ + Data: tx, + } + + encodedTx, err := proto.Marshal(pbTx) + if err != nil { + return err + } + + // First write to DB for durability + if err := tq.db.Put(ctx, ds.NewKey(key), encodedTx); err != nil { + return err + } + + // Then add to in-memory queue + tq.queue = append(tq.queue, tx) + + return nil +} + +// AddBatch adds multiple transactions to the queue in a single operation +func (tq *TxQueue) AddBatch(ctx context.Context, txs [][]byte) error { + tq.mu.Lock() + defer tq.mu.Unlock() + + // Check if adding these transactions would exceed the queue size + effectiveSize := len(tq.queue) - tq.head + if tq.maxQueueSize > 0 && effectiveSize+len(txs) > tq.maxQueueSize { + return ErrQueueFull + } + + // Use a batch operation for efficiency + batch, err := tq.db.Batch(ctx) + if err != nil { + return fmt.Errorf("failed to create batch: %w", err) + } + + for i, tx := range txs { + // Generate a unique key for this transaction + key := fmt.Sprintf("tx_%d_%s", len(tq.queue)+i, hex.EncodeToString(tx[:min(32, len(tx))])) + + pbTx := &pb.Tx{ + Data: tx, + } + + encodedTx, err := proto.Marshal(pbTx) + if err != nil { + return err + } + + if err := batch.Put(ctx, ds.NewKey(key), encodedTx); err != nil { + return err + } + } + + // Commit the batch + if err := batch.Commit(ctx); err != nil { + return fmt.Errorf("failed to commit batch: %w", err) + } + + // Then add to in-memory queue + tq.queue = append(tq.queue, txs...) + + return nil +} + +// Next extracts a transaction from the queue and marks it as processed in the DB +func (tq *TxQueue) Next(ctx context.Context) ([]byte, error) { + tq.mu.Lock() + defer tq.mu.Unlock() + + // Check if queue is empty + if tq.head >= len(tq.queue) { + return nil, nil + } + + tx := tq.queue[tq.head] + key := fmt.Sprintf("tx_%d_%s", tq.head, hex.EncodeToString(tx[:min(32, len(tx))])) + + tq.queue[tq.head] = nil // Release memory for the dequeued element + tq.head++ + + // Compact when head gets too large to prevent memory leaks + // Only compact when we have significant waste (more than half processed) + // and when we have a reasonable number of processed items to avoid + // frequent compactions on small queues + if tq.head > len(tq.queue)/2 && tq.head > 100 { + remaining := copy(tq.queue, tq.queue[tq.head:]) + // Zero out the rest of the slice to release memory + for i := remaining; i < len(tq.queue); i++ { + tq.queue[i] = nil + } + tq.queue = tq.queue[:remaining] + tq.head = 0 + } + + // Delete the transaction from the DB since it's been processed + err := tq.db.Delete(ctx, ds.NewKey(key)) + if err != nil { + // Log the error but continue + fmt.Printf("Error deleting processed transaction: %v\n", err) + } + + return tx, nil +} + +// Peek returns transactions from the queue without removing them +// This is useful for creating batches without committing to dequeue +func (tq *TxQueue) Peek(maxBytes uint64) [][]byte { + tq.mu.Lock() + defer tq.mu.Unlock() + + if tq.head >= len(tq.queue) { + return nil + } + + var result [][]byte + var totalBytes uint64 + + for i := tq.head; i < len(tq.queue); i++ { + tx := tq.queue[i] + txSize := uint64(len(tx)) + + if totalBytes+txSize > maxBytes { + break + } + + result = append(result, tx) + totalBytes += txSize + } + + return result +} + +// Consume removes the first n transactions from the queue +// This should be called after successfully processing transactions returned by Peek +func (tq *TxQueue) Consume(ctx context.Context, n int) error { + tq.mu.Lock() + defer tq.mu.Unlock() + + if tq.head+n > len(tq.queue) { + return errors.New("cannot consume more transactions than available") + } + + // Delete from DB + for i := 0; i < n; i++ { + tx := tq.queue[tq.head+i] + key := fmt.Sprintf("tx_%d_%s", tq.head+i, hex.EncodeToString(tx[:min(32, len(tx))])) + + if err := tq.db.Delete(ctx, ds.NewKey(key)); err != nil { + fmt.Printf("Error deleting consumed transaction: %v\n", err) + } + + tq.queue[tq.head+i] = nil // Release memory + } + + tq.head += n + + // Compact if needed + if tq.head > len(tq.queue)/2 && tq.head > 100 { + remaining := copy(tq.queue, tq.queue[tq.head:]) + for i := remaining; i < len(tq.queue); i++ { + tq.queue[i] = nil + } + tq.queue = tq.queue[:remaining] + tq.head = 0 + } + + return nil +} + +// Load reloads all transactions from DB into the in-memory queue after a crash or restart +func (tq *TxQueue) Load(ctx context.Context) error { + tq.mu.Lock() + defer tq.mu.Unlock() + + // Clear the current queue + tq.queue = make([][]byte, 0) + tq.head = 0 + + q := query.Query{} + results, err := tq.db.Query(ctx, q) + if err != nil { + return fmt.Errorf("error querying datastore: %w", err) + } + defer results.Close() + + // Collect all entries with their keys + type entry struct { + key string + tx []byte + } + var entries []entry + + // Load each transaction + for result := range results.Next() { + if result.Error != nil { + fmt.Printf("Error reading entry from datastore: %v\n", result.Error) + continue + } + pbTx := &pb.Tx{} + err := proto.Unmarshal(result.Value, pbTx) + if err != nil { + fmt.Printf("Error decoding transaction for key '%s': %v. Skipping entry.\n", result.Key, err) + continue + } + entries = append(entries, entry{key: result.Key, tx: pbTx.Data}) + } + + // Sort entries by key to maintain FIFO order + sort.Slice(entries, func(i, j int) bool { + return entries[i].key < entries[j].key + }) + + // Add sorted transactions to queue + for _, e := range entries { + tq.queue = append(tq.queue, e.tx) + } + + return nil +} + +// Size returns the effective number of transactions in the queue +// This method is primarily for testing and monitoring purposes +func (tq *TxQueue) Size() int { + tq.mu.Lock() + defer tq.mu.Unlock() + return len(tq.queue) - tq.head +} + +// Clear removes all transactions from the queue and DB +func (tq *TxQueue) Clear(ctx context.Context) error { + tq.mu.Lock() + defer tq.mu.Unlock() + + // Delete all entries from DB + q := query.Query{KeysOnly: true} + results, err := tq.db.Query(ctx, q) + if err != nil { + return fmt.Errorf("error querying datastore: %w", err) + } + defer results.Close() + + for result := range results.Next() { + if result.Error != nil { + fmt.Printf("Error reading key from datastore: %v\n", result.Error) + continue + } + if err := tq.db.Delete(ctx, ds.NewKey(result.Key)); err != nil { + fmt.Printf("Error deleting key '%s': %v\n", result.Key, err) + } + } + + // Clear in-memory queue + tq.queue = make([][]byte, 0) + tq.head = 0 + + return nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/sequencers/based/queue_test.go b/sequencers/based/queue_test.go new file mode 100644 index 0000000000..45de771f0d --- /dev/null +++ b/sequencers/based/queue_test.go @@ -0,0 +1,419 @@ +package based + +import ( + "context" + "testing" + + ds "github.com/ipfs/go-datastore" + syncds "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTxQueue_AddAndNext(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 0) + + ctx := context.Background() + + // Add transactions + err := queue.Add(ctx, []byte("tx1")) + require.NoError(t, err) + err = queue.Add(ctx, []byte("tx2")) + require.NoError(t, err) + + assert.Equal(t, 2, queue.Size()) + + // Get next transaction + tx, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), tx) + assert.Equal(t, 1, queue.Size()) + + // Get second transaction + tx, err = queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), tx) + assert.Equal(t, 0, queue.Size()) + + // Queue should be empty + tx, err = queue.Next(ctx) + require.NoError(t, err) + assert.Nil(t, tx) +} + +func TestTxQueue_AddBatch(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 0) + + ctx := context.Background() + + txs := [][]byte{ + []byte("tx1"), + []byte("tx2"), + []byte("tx3"), + } + + err := queue.AddBatch(ctx, txs) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Verify all transactions + for i, expectedTx := range txs { + tx, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, expectedTx, tx, "transaction %d should match", i) + } + + assert.Equal(t, 0, queue.Size()) +} + +func TestTxQueue_MaxSize(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 2) // Max 2 transactions + + ctx := context.Background() + + // Add first transaction + err := queue.Add(ctx, []byte("tx1")) + require.NoError(t, err) + + // Add second transaction + err = queue.Add(ctx, []byte("tx2")) + require.NoError(t, err) + + // Third transaction should fail + err = queue.Add(ctx, []byte("tx3")) + assert.ErrorIs(t, err, ErrQueueFull) + + // Size should still be 2 + assert.Equal(t, 2, queue.Size()) + + // After removing one, we should be able to add again + _, err = queue.Next(ctx) + require.NoError(t, err) + + err = queue.Add(ctx, []byte("tx3")) + require.NoError(t, err) + assert.Equal(t, 2, queue.Size()) +} + +func TestTxQueue_AddBatchMaxSize(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 3) + + ctx := context.Background() + + // Add one transaction + err := queue.Add(ctx, []byte("tx1")) + require.NoError(t, err) + + // Try to add 3 more (would exceed limit) + txs := [][]byte{ + []byte("tx2"), + []byte("tx3"), + []byte("tx4"), + } + err = queue.AddBatch(ctx, txs) + assert.ErrorIs(t, err, ErrQueueFull) + + // Size should still be 1 + assert.Equal(t, 1, queue.Size()) +} + +func TestTxQueue_Persistence(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 0) + + ctx := context.Background() + + // Add some transactions + err := queue.Add(ctx, []byte("tx1")) + require.NoError(t, err) + err = queue.Add(ctx, []byte("tx2")) + require.NoError(t, err) + err = queue.Add(ctx, []byte("tx3")) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Create a new queue with the same datastore + queue2 := NewTxQueue(db, "test", 0) + + // Load from persistence + err = queue2.Load(ctx) + require.NoError(t, err) + + // Should have all transactions + assert.Equal(t, 3, queue2.Size()) + + // Verify transactions are in order + tx, err := queue2.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), tx) + + tx, err = queue2.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), tx) + + tx, err = queue2.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx3"), tx) + + assert.Equal(t, 0, queue2.Size()) +} + +func TestTxQueue_PersistenceAfterPartialConsumption(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 0) + + ctx := context.Background() + + // Add transactions + err := queue.Add(ctx, []byte("tx1")) + require.NoError(t, err) + err = queue.Add(ctx, []byte("tx2")) + require.NoError(t, err) + err = queue.Add(ctx, []byte("tx3")) + require.NoError(t, err) + + // Consume first transaction + tx, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), tx) + + // Create new queue and load + queue2 := NewTxQueue(db, "test", 0) + err = queue2.Load(ctx) + require.NoError(t, err) + + // Should only have remaining transactions + assert.Equal(t, 2, queue2.Size()) + + tx, err = queue2.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), tx) + + tx, err = queue2.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx3"), tx) +} + +func TestTxQueue_Peek(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 0) + + ctx := context.Background() + + // Add transactions of different sizes + err := queue.Add(ctx, make([]byte, 50)) // 50 bytes + require.NoError(t, err) + err = queue.Add(ctx, make([]byte, 60)) // 60 bytes + require.NoError(t, err) + err = queue.Add(ctx, make([]byte, 100)) // 100 bytes + require.NoError(t, err) + + // Peek with 100 bytes limit - should get first tx only + txs := queue.Peek(100) + assert.Equal(t, 1, len(txs)) + assert.Equal(t, 50, len(txs[0])) + + // Queue size should not change + assert.Equal(t, 3, queue.Size()) + + // Peek with 120 bytes limit - should get first two txs + txs = queue.Peek(120) + assert.Equal(t, 2, len(txs)) + assert.Equal(t, 50, len(txs[0])) + assert.Equal(t, 60, len(txs[1])) + + // Queue size should still not change + assert.Equal(t, 3, queue.Size()) + + // Peek with 300 bytes limit - should get all txs + txs = queue.Peek(300) + assert.Equal(t, 3, len(txs)) +} + +func TestTxQueue_Consume(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 0) + + ctx := context.Background() + + // Add transactions + err := queue.Add(ctx, []byte("tx1")) + require.NoError(t, err) + err = queue.Add(ctx, []byte("tx2")) + require.NoError(t, err) + err = queue.Add(ctx, []byte("tx3")) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Consume first 2 transactions + err = queue.Consume(ctx, 2) + require.NoError(t, err) + + assert.Equal(t, 1, queue.Size()) + + // Next transaction should be tx3 + tx, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx3"), tx) +} + +func TestTxQueue_PeekAndConsume(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 0) + + ctx := context.Background() + + // Add transactions + err := queue.Add(ctx, make([]byte, 50)) + require.NoError(t, err) + err = queue.Add(ctx, make([]byte, 60)) + require.NoError(t, err) + err = queue.Add(ctx, make([]byte, 100)) + require.NoError(t, err) + + // Peek to see what fits in 120 bytes + txs := queue.Peek(120) + assert.Equal(t, 2, len(txs)) + + // Consume those transactions + err = queue.Consume(ctx, len(txs)) + require.NoError(t, err) + + // Should have 1 transaction left + assert.Equal(t, 1, queue.Size()) + + // Next transaction should be the 100-byte one + tx, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, 100, len(tx)) +} + +func TestTxQueue_ConsumeMoreThanAvailable(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 0) + + ctx := context.Background() + + // Add 2 transactions + err := queue.Add(ctx, []byte("tx1")) + require.NoError(t, err) + err = queue.Add(ctx, []byte("tx2")) + require.NoError(t, err) + + // Try to consume 3 transactions + err = queue.Consume(ctx, 3) + assert.Error(t, err) + + // Size should be unchanged + assert.Equal(t, 2, queue.Size()) +} + +func TestTxQueue_Clear(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 0) + + ctx := context.Background() + + // Add transactions + err := queue.Add(ctx, []byte("tx1")) + require.NoError(t, err) + err = queue.Add(ctx, []byte("tx2")) + require.NoError(t, err) + err = queue.Add(ctx, []byte("tx3")) + require.NoError(t, err) + + assert.Equal(t, 3, queue.Size()) + + // Clear the queue + err = queue.Clear(ctx) + require.NoError(t, err) + + assert.Equal(t, 0, queue.Size()) + + // Queue should be empty + tx, err := queue.Next(ctx) + require.NoError(t, err) + assert.Nil(t, tx) + + // Verify persistence is also cleared + queue2 := NewTxQueue(db, "test", 0) + err = queue2.Load(ctx) + require.NoError(t, err) + assert.Equal(t, 0, queue2.Size()) +} + +func TestTxQueue_PrefixIsolation(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + + ctx := context.Background() + + // Create two queues with different prefixes + queue1 := NewTxQueue(db, "queue1", 0) + queue2 := NewTxQueue(db, "queue2", 0) + + // Add different transactions to each + err := queue1.Add(ctx, []byte("tx1")) + require.NoError(t, err) + err = queue2.Add(ctx, []byte("tx2")) + require.NoError(t, err) + + assert.Equal(t, 1, queue1.Size()) + assert.Equal(t, 1, queue2.Size()) + + // Load each queue separately + queue1New := NewTxQueue(db, "queue1", 0) + err = queue1New.Load(ctx) + require.NoError(t, err) + + queue2New := NewTxQueue(db, "queue2", 0) + err = queue2New.Load(ctx) + require.NoError(t, err) + + // Each should have its own transaction + assert.Equal(t, 1, queue1New.Size()) + assert.Equal(t, 1, queue2New.Size()) + + tx, err := queue1New.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx1"), tx) + + tx, err = queue2New.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte("tx2"), tx) +} + +func TestTxQueue_MemoryCompaction(t *testing.T) { + db := syncds.MutexWrap(ds.NewMapDatastore()) + queue := NewTxQueue(db, "test", 0) + + ctx := context.Background() + + // Add more than 100 transactions to trigger compaction + for i := 0; i < 150; i++ { + err := queue.Add(ctx, []byte{byte(i)}) + require.NoError(t, err) + } + + // Consume 100 transactions to trigger compaction + for i := 0; i < 100; i++ { + _, err := queue.Next(ctx) + require.NoError(t, err) + } + + // Size should be 50 + assert.Equal(t, 50, queue.Size()) + + // Remaining transactions should be correct + for i := 100; i < 150; i++ { + tx, err := queue.Next(ctx) + require.NoError(t, err) + assert.Equal(t, []byte{byte(i)}, tx) + } +} diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index e209365db4..3e508c5a4e 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -3,9 +3,11 @@ package based import ( "context" "errors" + "fmt" "sync/atomic" "time" + ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" "github.com/evstack/ev-node/block" @@ -33,28 +35,37 @@ type BasedSequencer struct { logger zerolog.Logger daHeight atomic.Uint64 - txQueue [][]byte + txQueue *TxQueue } // NewBasedSequencer creates a new based sequencer instance func NewBasedSequencer( + ctx context.Context, fiRetriever ForcedInclusionRetriever, da coreda.DA, + db ds.Batching, config config.Config, genesis genesis.Genesis, logger zerolog.Logger, -) *BasedSequencer { + maxQueueSize int, +) (*BasedSequencer, error) { bs := &BasedSequencer{ fiRetriever: fiRetriever, da: da, config: config, genesis: genesis, logger: logger.With().Str("component", "based_sequencer").Logger(), - txQueue: make([][]byte, 0), + txQueue: NewTxQueue(db, "based_txs", maxQueueSize), } bs.SetDAHeight(genesis.DAStartHeight) // will be overridden by the executor - return bs + loadCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := bs.txQueue.Load(loadCtx); err != nil { + return nil, fmt.Errorf("failed to load transaction queue from DB: %w", err) + } + + return bs, nil } // SubmitBatchTxs does nothing for a based sequencer as it only pulls from DA @@ -96,7 +107,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get } // Add forced inclusion transactions to the queue with validation - validTxs := 0 + validTxs := make([][]byte, 0, len(forcedTxsEvent.Txs)) skippedTxs := 0 for _, tx := range forcedTxsEvent.Txs { // Validate blob size against absolute maximum @@ -108,14 +119,26 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get skippedTxs++ continue } - s.txQueue = append(s.txQueue, tx) - validTxs++ + validTxs = append(validTxs, tx) + } + + // Add valid transactions to the persistent queue + if len(validTxs) > 0 { + if err := s.txQueue.AddBatch(ctx, validTxs); err != nil { + if errors.Is(err, ErrQueueFull) { + s.logger.Warn(). + Int("tx_count", len(validTxs)). + Msg("Transaction queue is full, rejecting forced inclusion transactions") + return nil, fmt.Errorf("transaction queue is full: %w", err) + } + return nil, fmt.Errorf("failed to add transactions to queue: %w", err) + } } s.logger.Info(). - Int("valid_tx_count", validTxs). + Int("valid_tx_count", len(validTxs)). Int("skipped_tx_count", skippedTxs). - Int("queue_size", len(s.txQueue)). + Int("queue_size", s.txQueue.Size()). Uint64("da_height_start", forcedTxsEvent.StartDaHeight). Uint64("da_height_end", forcedTxsEvent.EndDaHeight). Msg("processed forced inclusion transactions from DA") @@ -131,32 +154,29 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get // createBatchFromQueue creates a batch from the transaction queue respecting MaxBytes func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *coresequencer.Batch { - if len(s.txQueue) == 0 { + if s.txQueue.Size() == 0 { return &coresequencer.Batch{Transactions: nil} } - var batch [][]byte - var totalBytes uint64 - - for i, tx := range s.txQueue { - txSize := uint64(len(tx)) - // Always respect maxBytes, even for the first transaction - if totalBytes+txSize > maxBytes { - // Would exceed max bytes, stop here - s.txQueue = s.txQueue[i:] - break - } + // Peek at transactions without removing them + txs := s.txQueue.Peek(maxBytes) + if len(txs) == 0 { + return &coresequencer.Batch{Transactions: nil} + } - batch = append(batch, tx) - totalBytes += txSize + // Consume the transactions we're including in the batch + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() - // If this is the last transaction, clear the queue - if i == len(s.txQueue)-1 { - s.txQueue = s.txQueue[:0] - } + if err := s.txQueue.Consume(ctx, len(txs)); err != nil { + s.logger.Error().Err(err). + Int("tx_count", len(txs)). + Msg("failed to consume transactions from queue") + // Return empty batch on error to avoid data inconsistency + return &coresequencer.Batch{Transactions: nil} } - return &coresequencer.Batch{Transactions: batch} + return &coresequencer.Batch{Transactions: txs} } // VerifyBatch verifies a batch of transactions diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index 0f994f91f5..27688af720 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -6,6 +6,8 @@ import ( "testing" "time" + ds "github.com/ipfs/go-datastore" + syncds "github.com/ipfs/go-datastore/sync" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -79,6 +81,20 @@ func (m *MockDA) Commit(ctx context.Context, blobs [][]byte, namespace []byte) ( return args.Get(0).([][]byte), args.Error(1) } +// createTestSequencer is a helper function to create a sequencer for testing +func createTestSequencer(t *testing.T, mockDA *MockDA, cfg config.Config, gen genesis.Genesis) *BasedSequencer { + t.Helper() + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + // Create in-memory datastore + db := syncds.MutexWrap(ds.NewMapDatastore()) + + seq, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, cfg, gen, zerolog.Nop(), 0) + require.NoError(t, err) + return seq +} + func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { mockDA := new(MockDA) gen := genesis.Genesis{ @@ -91,10 +107,7 @@ func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { cfg.DA.DataNamespace = "test-data-ns" cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfg, gen) // Submit should succeed but be ignored req := coresequencer.SubmitBatchTxsRequest{ @@ -109,7 +122,7 @@ func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { require.NoError(t, err) require.NotNil(t, resp) // Transactions should not be added to queue for based sequencer - assert.Equal(t, 0, len(seq.txQueue)) + assert.Equal(t, 0, seq.txQueue.Size()) } func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { @@ -133,10 +146,7 @@ func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { cfg.DA.DataNamespace = "test-data-ns" cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfg, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, @@ -172,10 +182,7 @@ func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { cfg.DA.DataNamespace = "test-data-ns" cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfg, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, @@ -202,10 +209,7 @@ func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { // Create config without forced inclusion namespace cfgNoFI := config.DefaultConfig() cfgNoFI.DA.ForcedInclusionNamespace = "" - daClient := block.NewDAClient(mockDA, cfgNoFI, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfgNoFI, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfgNoFI, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, @@ -247,10 +251,7 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { cfg.DA.DataNamespace = "test-data-ns" cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfg, gen) // First call with max 100 bytes - should get first 2 txs (50 + 60 = 110, but logic allows if batch has content) req := coresequencer.GetNextBatchRequest{ @@ -264,7 +265,7 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { require.NotNil(t, resp.Batch) // Should get first tx (50 bytes), second tx would exceed limit (50+60=110 > 100) assert.Equal(t, 1, len(resp.Batch.Transactions)) - assert.Equal(t, 2, len(seq.txQueue)) // 2 remaining in queue + assert.Equal(t, 2, seq.txQueue.Size()) // 2 remaining in queue // Second call should get next tx from queue resp2, err := seq.GetNextBatch(context.Background(), req) @@ -272,7 +273,7 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { require.NotNil(t, resp2) require.NotNil(t, resp2.Batch) assert.Equal(t, 1, len(resp2.Batch.Transactions)) - assert.Equal(t, 1, len(seq.txQueue)) // 1 remaining in queue + assert.Equal(t, 1, seq.txQueue.Size()) // 1 remaining in queue // Third call with larger maxBytes to get the 100-byte tx req3 := coresequencer.GetNextBatchRequest{ @@ -284,7 +285,7 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { require.NotNil(t, resp3) require.NotNil(t, resp3.Batch) assert.Equal(t, 1, len(resp3.Batch.Transactions)) - assert.Equal(t, 0, len(seq.txQueue)) // Queue should be empty + assert.Equal(t, 0, seq.txQueue.Size()) // Queue should be empty mockDA.AssertExpectations(t) } @@ -304,13 +305,14 @@ func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { cfg.DA.DataNamespace = "test-data-ns" cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfg, gen) // Pre-populate the queue - seq.txQueue = [][]byte{[]byte("queued_tx1"), []byte("queued_tx2")} + ctx := context.Background() + err := seq.txQueue.Add(ctx, []byte("queued_tx1")) + require.NoError(t, err) + err = seq.txQueue.Add(ctx, []byte("queued_tx2")) + require.NoError(t, err) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, @@ -326,7 +328,7 @@ func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) // Queue should be empty now - assert.Equal(t, 0, len(seq.txQueue)) + assert.Equal(t, 0, seq.txQueue.Size()) } func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { @@ -354,10 +356,7 @@ func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testin cfg.DA.DataNamespace = "test-data-ns" cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfg, gen) // First call with maxBytes = 100 // Forced tx (150 bytes) is added to queue, but batch will be empty since it exceeds maxBytes @@ -373,7 +372,7 @@ func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testin assert.Equal(t, 0, len(resp1.Batch.Transactions), "Should have no txs as forced tx exceeds maxBytes") // Verify forced tx is in queue - assert.Equal(t, 1, len(seq.txQueue), "Forced tx should be in queue") + assert.Equal(t, 1, seq.txQueue.Size(), "Forced tx should be in queue") // Second call with larger maxBytes = 200 // Should process tx from queue @@ -390,7 +389,7 @@ func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testin assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) // Queue should now be empty - assert.Equal(t, 0, len(seq.txQueue), "Queue should be empty") + assert.Equal(t, 0, seq.txQueue.Size(), "Queue should be empty") mockDA.AssertExpectations(t) } @@ -421,10 +420,7 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T cfg.DA.DataNamespace = "test-data-ns" cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfg, gen) // First call with maxBytes = 120 // Should get only first forced tx (100 bytes), second stays in queue @@ -441,7 +437,7 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T assert.Equal(t, 100, len(resp1.Batch.Transactions[0])) // Verify second tx is still in queue - assert.Equal(t, 1, len(seq.txQueue), "Second tx should be in queue") + assert.Equal(t, 1, seq.txQueue.Size(), "Second tx should be in queue") // Second call - should get the second tx from queue req2 := coresequencer.GetNextBatchRequest{ @@ -457,7 +453,7 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) // Queue should now be empty - assert.Equal(t, 0, len(seq.txQueue), "Queue should be empty") + assert.Equal(t, 0, seq.txQueue.Size(), "Queue should be empty") mockDA.AssertExpectations(t) } @@ -474,10 +470,7 @@ func TestBasedSequencer_VerifyBatch(t *testing.T) { cfg.DA.DataNamespace = "test-data-ns" cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfg, gen) req := coresequencer.VerifyBatchRequest{ Id: []byte("test-chain"), @@ -502,10 +495,7 @@ func TestBasedSequencer_SetDAHeight(t *testing.T) { cfg.DA.DataNamespace = "test-data-ns" cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfg, gen) assert.Equal(t, uint64(100), seq.GetDAHeight()) @@ -528,10 +518,7 @@ func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { cfg.DA.DataNamespace = "test-data-ns" cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq := NewBasedSequencer(fiRetriever, mockDA, cfg, gen, zerolog.Nop()) + seq := createTestSequencer(t, mockDA, cfg, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, diff --git a/types/pb/evnode/v1/batch.pb.go b/types/pb/evnode/v1/batch.pb.go index 576f1edfcc..d5902516d5 100644 --- a/types/pb/evnode/v1/batch.pb.go +++ b/types/pb/evnode/v1/batch.pb.go @@ -66,13 +66,60 @@ func (x *Batch) GetTxs() [][]byte { return nil } +// Tx is a single transaction. +type Tx struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Tx) Reset() { + *x = Tx{} + mi := &file_evnode_v1_batch_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Tx) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tx) ProtoMessage() {} + +func (x *Tx) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_batch_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tx.ProtoReflect.Descriptor instead. +func (*Tx) Descriptor() ([]byte, []int) { + return file_evnode_v1_batch_proto_rawDescGZIP(), []int{1} +} + +func (x *Tx) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + var File_evnode_v1_batch_proto protoreflect.FileDescriptor const file_evnode_v1_batch_proto_rawDesc = "" + "\n" + "\x15evnode/v1/batch.proto\x12\tevnode.v1\"\x19\n" + "\x05Batch\x12\x10\n" + - "\x03txs\x18\x01 \x03(\fR\x03txsB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" + "\x03txs\x18\x01 \x03(\fR\x03txs\"\x18\n" + + "\x02Tx\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04dataB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" var ( file_evnode_v1_batch_proto_rawDescOnce sync.Once @@ -86,9 +133,10 @@ func file_evnode_v1_batch_proto_rawDescGZIP() []byte { return file_evnode_v1_batch_proto_rawDescData } -var file_evnode_v1_batch_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_evnode_v1_batch_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_evnode_v1_batch_proto_goTypes = []any{ (*Batch)(nil), // 0: evnode.v1.Batch + (*Tx)(nil), // 1: evnode.v1.Tx } var file_evnode_v1_batch_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -109,7 +157,7 @@ func file_evnode_v1_batch_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_evnode_v1_batch_proto_rawDesc), len(file_evnode_v1_batch_proto_rawDesc)), NumEnums: 0, - NumMessages: 1, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, From 0bd6be4b048f782d79748c9ca88c3e4c69926be4 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 4 Dec 2025 17:07:49 +0100 Subject: [PATCH 03/19] ai one shot --- apps/evm/cmd/run.go | 2 +- apps/grpc/cmd/run.go | 2 +- apps/testapp/cmd/run.go | 2 +- proto/evnode/v1/batch.proto | 5 - proto/evnode/v1/state.proto | 13 + sequencers/based/checkpoint.go | 148 ++++++++++++ sequencers/based/checkpoint_test.go | 197 +++++++++++++++ sequencers/based/sequencer.go | 203 ++++++++++------ sequencers/based/sequencer_test.go | 359 +++++++++++++++++----------- types/pb/evnode/v1/batch.pb.go | 54 +---- types/pb/evnode/v1/state.pb.go | 121 +++++++++- 11 files changed, 826 insertions(+), 280 deletions(-) create mode 100644 sequencers/based/checkpoint.go create mode 100644 sequencers/based/checkpoint_test.go diff --git a/apps/evm/cmd/run.go b/apps/evm/cmd/run.go index 75fadb3cd1..0be7bf19d7 100644 --- a/apps/evm/cmd/run.go +++ b/apps/evm/cmd/run.go @@ -125,7 +125,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger, 1000) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger) if err != nil { return nil, fmt.Errorf("failed to create based sequencer: %w", err) } diff --git a/apps/grpc/cmd/run.go b/apps/grpc/cmd/run.go index eb78a4125b..7d884ae91c 100644 --- a/apps/grpc/cmd/run.go +++ b/apps/grpc/cmd/run.go @@ -131,7 +131,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger, 1000) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger) if err != nil { return nil, fmt.Errorf("failed to create based sequencer: %w", err) } diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index b9b87e99c8..f6d4c92d9a 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -131,7 +131,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger, 1000) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger) if err != nil { return nil, fmt.Errorf("failed to create based sequencer: %w", err) } diff --git a/proto/evnode/v1/batch.proto b/proto/evnode/v1/batch.proto index c050d7bd5c..85fe466310 100644 --- a/proto/evnode/v1/batch.proto +++ b/proto/evnode/v1/batch.proto @@ -7,8 +7,3 @@ option go_package = "github.com/evstack/ev-node/types/pb/evnode/v1"; message Batch { repeated bytes txs = 1; } - -// Tx is a single transaction. -message Tx { - bytes data = 1; -} diff --git a/proto/evnode/v1/state.proto b/proto/evnode/v1/state.proto index 2aa3025676..6c5dbe4bed 100644 --- a/proto/evnode/v1/state.proto +++ b/proto/evnode/v1/state.proto @@ -19,3 +19,16 @@ message State { reserved 7; } + +// BasedCheckpoint tracks the position in the DA where transactions were last processed +message BasedCheckpoint { + // DA block height being processed + uint64 da_height = 1; + // Index of the next transaction to process within the DA block's forced inclusion batch + uint64 tx_index = 2; +} + +// Tx represents a transaction with its raw data +message Tx { + bytes data = 1; +} diff --git a/sequencers/based/checkpoint.go b/sequencers/based/checkpoint.go new file mode 100644 index 0000000000..8b3f120603 --- /dev/null +++ b/sequencers/based/checkpoint.go @@ -0,0 +1,148 @@ +package based + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "google.golang.org/protobuf/proto" + + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +var ( + // checkpointKey is the datastore key for persisting the checkpoint + checkpointKey = ds.NewKey("/based/checkpoint") + + // ErrCheckpointNotFound is returned when no checkpoint exists in the datastore + ErrCheckpointNotFound = errors.New("checkpoint not found") +) + +// Checkpoint tracks the position in the DA where we last processed transactions +type Checkpoint struct { + // DAHeight is the DA block height we're currently processing or have just finished + DAHeight uint64 + // TxIndex is the index of the next transaction to process within the DA block's forced inclusion batch + // If TxIndex == 0, it means we've finished processing the previous DA block and should fetch the next one + TxIndex uint64 +} + +// CheckpointStore manages persistence of the checkpoint +type CheckpointStore struct { + db ds.Batching +} + +// NewCheckpointStore creates a new checkpoint store +func NewCheckpointStore(db ds.Batching) *CheckpointStore { + return &CheckpointStore{ + db: db, + } +} + +// Load loads the checkpoint from the datastore +// Returns ErrCheckpointNotFound if no checkpoint exists +func (cs *CheckpointStore) Load(ctx context.Context) (*Checkpoint, error) { + data, err := cs.db.Get(ctx, checkpointKey) + if err != nil { + if errors.Is(err, ds.ErrNotFound) { + return nil, ErrCheckpointNotFound + } + return nil, fmt.Errorf("failed to load checkpoint: %w", err) + } + + pbCheckpoint := &pb.BasedCheckpoint{} + if err := proto.Unmarshal(data, pbCheckpoint); err != nil { + return nil, fmt.Errorf("failed to unmarshal checkpoint: %w", err) + } + + return &Checkpoint{ + DAHeight: pbCheckpoint.DaHeight, + TxIndex: pbCheckpoint.TxIndex, + }, nil +} + +// Save persists the checkpoint to the datastore +func (cs *CheckpointStore) Save(ctx context.Context, checkpoint *Checkpoint) error { + pbCheckpoint := &pb.BasedCheckpoint{ + DaHeight: checkpoint.DAHeight, + TxIndex: checkpoint.TxIndex, + } + + data, err := proto.Marshal(pbCheckpoint) + if err != nil { + return fmt.Errorf("failed to marshal checkpoint: %w", err) + } + + if err := cs.db.Put(ctx, checkpointKey, data); err != nil { + return fmt.Errorf("failed to save checkpoint: %w", err) + } + + return nil +} + +// Delete removes the checkpoint from the datastore +func (cs *CheckpointStore) Delete(ctx context.Context) error { + if err := cs.db.Delete(ctx, checkpointKey); err != nil { + if errors.Is(err, ds.ErrNotFound) { + return nil // Already deleted + } + return fmt.Errorf("failed to delete checkpoint: %w", err) + } + return nil +} + +// Legacy key format for migration from old queue-based implementation +// This allows us to detect and clean up old queue data +func isLegacyQueueKey(key ds.Key) bool { + // Old queue keys had format "/based_txs/tx_..." + return key.String() != checkpointKey.String() && + len(key.String()) > 0 +} + +// CleanupLegacyQueue removes all legacy queue entries from the datastore +// This should be called during migration from the old queue-based implementation +func (cs *CheckpointStore) CleanupLegacyQueue(ctx context.Context) error { + // Query all keys in the datastore + results, err := cs.db.Query(ctx, query.Query{KeysOnly: true}) + if err != nil { + return fmt.Errorf("failed to query datastore: %w", err) + } + defer results.Close() + + deletedCount := 0 + for result := range results.Next() { + if result.Error != nil { + continue + } + + key := ds.NewKey(result.Key) + // Only delete keys that are not the checkpoint + if key.String() != checkpointKey.String() { + if err := cs.db.Delete(ctx, key); err != nil { + // Log but continue - best effort cleanup + continue + } + deletedCount++ + } + } + + return nil +} + +// Helper function to encode uint64 to bytes for potential future use +func encodeUint64(v uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, v) + return b +} + +// Helper function to decode bytes to uint64 for potential future use +func decodeUint64(b []byte) (uint64, error) { + if len(b) != 8 { + return 0, fmt.Errorf("invalid length for uint64: got %d, expected 8", len(b)) + } + return binary.BigEndian.Uint64(b), nil +} diff --git a/sequencers/based/checkpoint_test.go b/sequencers/based/checkpoint_test.go new file mode 100644 index 0000000000..9a9eee049f --- /dev/null +++ b/sequencers/based/checkpoint_test.go @@ -0,0 +1,197 @@ +package based + +import ( + "context" + "testing" + + ds "github.com/ipfs/go-datastore" + "github.com/stretchr/testify/require" +) + +func TestCheckpointStore_SaveAndLoad(t *testing.T) { + ctx := context.Background() + db := ds.NewMapDatastore() + store := NewCheckpointStore(db) + + // Test loading when no checkpoint exists + _, err := store.Load(ctx) + require.ErrorIs(t, err, ErrCheckpointNotFound) + + // Test saving a checkpoint + checkpoint := &Checkpoint{ + DAHeight: 100, + TxIndex: 5, + } + err = store.Save(ctx, checkpoint) + require.NoError(t, err) + + // Test loading the saved checkpoint + loaded, err := store.Load(ctx) + require.NoError(t, err) + require.Equal(t, checkpoint.DAHeight, loaded.DAHeight) + require.Equal(t, checkpoint.TxIndex, loaded.TxIndex) + + // Test updating the checkpoint + checkpoint.DAHeight = 200 + checkpoint.TxIndex = 10 + err = store.Save(ctx, checkpoint) + require.NoError(t, err) + + loaded, err = store.Load(ctx) + require.NoError(t, err) + require.Equal(t, uint64(200), loaded.DAHeight) + require.Equal(t, uint64(10), loaded.TxIndex) +} + +func TestCheckpointStore_Delete(t *testing.T) { + ctx := context.Background() + db := ds.NewMapDatastore() + store := NewCheckpointStore(db) + + // Save a checkpoint + checkpoint := &Checkpoint{ + DAHeight: 100, + TxIndex: 5, + } + err := store.Save(ctx, checkpoint) + require.NoError(t, err) + + // Delete it + err = store.Delete(ctx) + require.NoError(t, err) + + // Verify it's gone + _, err = store.Load(ctx) + require.ErrorIs(t, err, ErrCheckpointNotFound) + + // Delete again should not error + err = store.Delete(ctx) + require.NoError(t, err) +} + +func TestCheckpointStore_CleanupLegacyQueue(t *testing.T) { + ctx := context.Background() + db := ds.NewMapDatastore() + store := NewCheckpointStore(db) + + // Add some legacy queue keys (simulating old implementation) + legacyKeys := []string{ + "/based_txs/tx_0_abc123", + "/based_txs/tx_1_def456", + "/based_txs/tx_2_ghi789", + } + for _, key := range legacyKeys { + err := db.Put(ctx, ds.NewKey(key), []byte("dummy data")) + require.NoError(t, err) + } + + // Save a checkpoint (should not be cleaned up) + checkpoint := &Checkpoint{ + DAHeight: 100, + TxIndex: 5, + } + err := store.Save(ctx, checkpoint) + require.NoError(t, err) + + // Cleanup legacy queue + err = store.CleanupLegacyQueue(ctx) + require.NoError(t, err) + + // Verify legacy keys are gone + for _, key := range legacyKeys { + has, err := db.Has(ctx, ds.NewKey(key)) + require.NoError(t, err) + require.False(t, has, "legacy key should be deleted: %s", key) + } + + // Verify checkpoint still exists + loaded, err := store.Load(ctx) + require.NoError(t, err) + require.Equal(t, checkpoint.DAHeight, loaded.DAHeight) + require.Equal(t, checkpoint.TxIndex, loaded.TxIndex) +} + +func TestCheckpoint_EdgeCases(t *testing.T) { + ctx := context.Background() + db := ds.NewMapDatastore() + store := NewCheckpointStore(db) + + // Test with zero values + checkpoint := &Checkpoint{ + DAHeight: 0, + TxIndex: 0, + } + err := store.Save(ctx, checkpoint) + require.NoError(t, err) + + loaded, err := store.Load(ctx) + require.NoError(t, err) + require.Equal(t, uint64(0), loaded.DAHeight) + require.Equal(t, uint64(0), loaded.TxIndex) + + // Test with max uint64 values + checkpoint = &Checkpoint{ + DAHeight: ^uint64(0), + TxIndex: ^uint64(0), + } + err = store.Save(ctx, checkpoint) + require.NoError(t, err) + + loaded, err = store.Load(ctx) + require.NoError(t, err) + require.Equal(t, ^uint64(0), loaded.DAHeight) + require.Equal(t, ^uint64(0), loaded.TxIndex) +} + +func TestCheckpointStore_ConcurrentAccess(t *testing.T) { + ctx := context.Background() + db := ds.NewMapDatastore() + store := NewCheckpointStore(db) + + // Save initial checkpoint + checkpoint := &Checkpoint{ + DAHeight: 100, + TxIndex: 0, + } + err := store.Save(ctx, checkpoint) + require.NoError(t, err) + + // Test concurrent reads + done := make(chan bool, 10) + for i := 0; i < 10; i++ { + go func() { + defer func() { done <- true }() + loaded, err := store.Load(ctx) + require.NoError(t, err) + require.NotNil(t, loaded) + }() + } + + for i := 0; i < 10; i++ { + <-done + } +} + +func TestEncodeDecodeUint64(t *testing.T) { + testCases := []uint64{ + 0, + 1, + 100, + 1000000, + ^uint64(0), // max uint64 + } + + for _, tc := range testCases { + encoded := encodeUint64(tc) + require.Equal(t, 8, len(encoded), "encoded length should be 8 bytes") + + decoded, err := decodeUint64(encoded) + require.NoError(t, err) + require.Equal(t, tc, decoded) + } + + // Test invalid length + _, err := decodeUint64([]byte{1, 2, 3}) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid length") +} diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index 3e508c5a4e..15b4aff9ec 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -27,6 +27,7 @@ var _ coresequencer.Sequencer = (*BasedSequencer)(nil) // BasedSequencer is a sequencer that only retrieves transactions from the DA layer // via the forced inclusion mechanism. It does not accept transactions from the reaper. +// It uses DA as a queue and only persists a checkpoint of where it is in processing. type BasedSequencer struct { fiRetriever ForcedInclusionRetriever da coreda.DA @@ -34,8 +35,13 @@ type BasedSequencer struct { genesis genesis.Genesis logger zerolog.Logger - daHeight atomic.Uint64 - txQueue *TxQueue + daHeight atomic.Uint64 + checkpointStore *CheckpointStore + checkpoint *Checkpoint + + // Cached transactions from the current DA block being processed + currentBatchTxs [][]byte + currentBatchDA *block.ForcedInclusionEvent } // NewBasedSequencer creates a new based sequencer instance @@ -47,22 +53,46 @@ func NewBasedSequencer( config config.Config, genesis genesis.Genesis, logger zerolog.Logger, - maxQueueSize int, ) (*BasedSequencer, error) { bs := &BasedSequencer{ - fiRetriever: fiRetriever, - da: da, - config: config, - genesis: genesis, - logger: logger.With().Str("component", "based_sequencer").Logger(), - txQueue: NewTxQueue(db, "based_txs", maxQueueSize), + fiRetriever: fiRetriever, + da: da, + config: config, + genesis: genesis, + logger: logger.With().Str("component", "based_sequencer").Logger(), + checkpointStore: NewCheckpointStore(db), } bs.SetDAHeight(genesis.DAStartHeight) // will be overridden by the executor + // Load checkpoint from DB, or initialize if none exists loadCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - if err := bs.txQueue.Load(loadCtx); err != nil { - return nil, fmt.Errorf("failed to load transaction queue from DB: %w", err) + + checkpoint, err := bs.checkpointStore.Load(loadCtx) + if err != nil { + if errors.Is(err, ErrCheckpointNotFound) { + // No checkpoint exists, initialize with genesis DA height + bs.checkpoint = &Checkpoint{ + DAHeight: genesis.DAStartHeight, + TxIndex: 0, + } + bs.logger.Info(). + Uint64("da_height", genesis.DAStartHeight). + Msg("initialized checkpoint from genesis") + } else { + return nil, fmt.Errorf("failed to load checkpoint from DB: %w", err) + } + } else { + bs.checkpoint = checkpoint + bs.logger.Info(). + Uint64("da_height", checkpoint.DAHeight). + Uint64("tx_index", checkpoint.TxIndex). + Msg("loaded checkpoint from DB") + } + + // Clean up any legacy queue data from previous implementation + if err := bs.checkpointStore.CleanupLegacyQueue(loadCtx); err != nil { + bs.logger.Warn().Err(err).Msg("failed to cleanup legacy queue data, continuing anyway") } return bs, nil @@ -75,38 +105,77 @@ func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.S return &coresequencer.SubmitBatchTxsResponse{}, nil } -// GetNextBatch retrieves the next batch of transactions from the DA layer -// It fetches forced inclusion transactions and returns them as the next batch +// GetNextBatch retrieves the next batch of transactions from the DA layer using the checkpoint +// It treats DA as a queue and only persists where it is in processing func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { - currentDAHeight := s.GetDAHeight() + // If we have no cached transactions or we've consumed all from the current DA block, + // fetch the next DA block + if len(s.currentBatchTxs) == 0 || s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { + if err := s.fetchNextDABatch(ctx); err != nil { + return nil, err + } + } + + // Create batch from current position up to MaxBytes + batch := s.createBatchFromCheckpoint(req.MaxBytes) - s.logger.Debug().Uint64("da_height", currentDAHeight).Msg("fetching forced inclusion transactions from DA") + // Update checkpoint with how many transactions we consumed + txCount := uint64(len(batch.Transactions)) + if txCount > 0 { + s.checkpoint.TxIndex += txCount + + // If we've consumed all transactions from this DA block, move to next + if s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { + s.checkpoint.DAHeight++ + s.checkpoint.TxIndex = 0 + s.currentBatchTxs = nil + s.currentBatchDA = nil + + // Update the global DA height + s.SetDAHeight(s.checkpoint.DAHeight) + } + + // Persist checkpoint + if err := s.checkpointStore.Save(ctx, s.checkpoint); err != nil { + s.logger.Error().Err(err).Msg("failed to save checkpoint") + return nil, fmt.Errorf("failed to save checkpoint: %w", err) + } + } + + return &coresequencer.GetNextBatchResponse{ + Batch: batch, + Timestamp: time.Time{}, // TODO(@julienrbrt): we need to use DA block timestamp for determinism + BatchData: req.LastBatchData, + }, nil +} + +// fetchNextDABatch fetches transactions from the next DA block +func (s *BasedSequencer) fetchNextDABatch(ctx context.Context) error { + currentDAHeight := s.checkpoint.DAHeight + + s.logger.Debug(). + Uint64("da_height", currentDAHeight). + Uint64("tx_index", s.checkpoint.TxIndex). + Msg("fetching forced inclusion transactions from DA") forcedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) if err != nil { // Check if forced inclusion is not configured if errors.Is(err, block.ErrForceInclusionNotConfigured) { - return nil, errors.New("forced inclusion not configured") + return errors.New("forced inclusion not configured") } else if errors.Is(err, coreda.ErrHeightFromFuture) { - // If we get a height from future error, keep the current DA height and return batch + // If we get a height from future error, stay at current position // We'll retry the same height on the next call until DA produces that block s.logger.Debug(). Uint64("da_height", currentDAHeight). Msg("DA height from future, waiting for DA to produce block") - } else { - s.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") - return nil, err - } - } else { - // Update DA height. - // If we are in between epochs, we still need to bump the da height. - // At the end of an epoch, we need to bump to go to the next epoch. - if forcedTxsEvent.EndDaHeight >= currentDAHeight { - s.SetDAHeight(forcedTxsEvent.EndDaHeight + 1) + return nil } + s.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + return err } - // Add forced inclusion transactions to the queue with validation + // Validate and filter transactions validTxs := make([][]byte, 0, len(forcedTxsEvent.Txs)) skippedTxs := 0 for _, tx := range forcedTxsEvent.Txs { @@ -122,61 +191,51 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get validTxs = append(validTxs, tx) } - // Add valid transactions to the persistent queue - if len(validTxs) > 0 { - if err := s.txQueue.AddBatch(ctx, validTxs); err != nil { - if errors.Is(err, ErrQueueFull) { - s.logger.Warn(). - Int("tx_count", len(validTxs)). - Msg("Transaction queue is full, rejecting forced inclusion transactions") - return nil, fmt.Errorf("transaction queue is full: %w", err) - } - return nil, fmt.Errorf("failed to add transactions to queue: %w", err) - } - } - s.logger.Info(). Int("valid_tx_count", len(validTxs)). Int("skipped_tx_count", skippedTxs). - Int("queue_size", s.txQueue.Size()). Uint64("da_height_start", forcedTxsEvent.StartDaHeight). Uint64("da_height_end", forcedTxsEvent.EndDaHeight). - Msg("processed forced inclusion transactions from DA") + Msg("fetched forced inclusion transactions from DA") - batch := s.createBatchFromQueue(req.MaxBytes) + // Cache the transactions for this DA block + s.currentBatchTxs = validTxs + s.currentBatchDA = forcedTxsEvent - return &coresequencer.GetNextBatchResponse{ - Batch: batch, - Timestamp: time.Time{}, // TODO(@julienrbrt): we need to use DA block timestamp for determinism - BatchData: req.LastBatchData, - }, nil + // If we had a non-zero tx index, we're resuming from a crash mid-block + // The transactions starting from that index are what we need + if s.checkpoint.TxIndex > 0 { + s.logger.Info(). + Uint64("tx_index", s.checkpoint.TxIndex). + Msg("resuming from checkpoint within DA block") + } + + return nil } -// createBatchFromQueue creates a batch from the transaction queue respecting MaxBytes -func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *coresequencer.Batch { - if s.txQueue.Size() == 0 { +// createBatchFromCheckpoint creates a batch from the current checkpoint position respecting MaxBytes +func (s *BasedSequencer) createBatchFromCheckpoint(maxBytes uint64) *coresequencer.Batch { + if len(s.currentBatchTxs) == 0 || s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { return &coresequencer.Batch{Transactions: nil} } - // Peek at transactions without removing them - txs := s.txQueue.Peek(maxBytes) - if len(txs) == 0 { - return &coresequencer.Batch{Transactions: nil} - } + var result [][]byte + var totalBytes uint64 - // Consume the transactions we're including in the batch - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() + // Start from the checkpoint index + for i := s.checkpoint.TxIndex; i < uint64(len(s.currentBatchTxs)); i++ { + tx := s.currentBatchTxs[i] + txSize := uint64(len(tx)) - if err := s.txQueue.Consume(ctx, len(txs)); err != nil { - s.logger.Error().Err(err). - Int("tx_count", len(txs)). - Msg("failed to consume transactions from queue") - // Return empty batch on error to avoid data inconsistency - return &coresequencer.Batch{Transactions: nil} + if totalBytes+txSize > maxBytes { + break + } + + result = append(result, tx) + totalBytes += txSize } - return &coresequencer.Batch{Transactions: txs} + return &coresequencer.Batch{Transactions: result} } // VerifyBatch verifies a batch of transactions @@ -189,12 +248,12 @@ func (s *BasedSequencer) VerifyBatch(ctx context.Context, req coresequencer.Veri // SetDAHeight sets the current DA height for the sequencer // This should be called when the sequencer needs to sync to a specific DA height -func (c *BasedSequencer) SetDAHeight(height uint64) { - c.daHeight.Store(height) - c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") +func (s *BasedSequencer) SetDAHeight(height uint64) { + s.daHeight.Store(height) + s.logger.Debug().Uint64("da_height", height).Msg("DA height updated") } // GetDAHeight returns the current DA height -func (c *BasedSequencer) GetDAHeight() uint64 { - return c.daHeight.Load() +func (s *BasedSequencer) GetDAHeight() uint64 { + return s.daHeight.Load() } diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index 27688af720..7870bb56a7 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -90,7 +90,7 @@ func createTestSequencer(t *testing.T, mockDA *MockDA, cfg config.Config, gen ge // Create in-memory datastore db := syncds.MutexWrap(ds.NewMapDatastore()) - seq, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, cfg, gen, zerolog.Nop(), 0) + seq, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, cfg, gen, zerolog.Nop()) require.NoError(t, err) return seq } @@ -121,8 +121,8 @@ func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { require.NoError(t, err) require.NotNil(t, resp) - // Transactions should not be added to queue for based sequencer - assert.Equal(t, 0, seq.txQueue.Size()) + // Transactions should not be processed for based sequencer + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) } func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { @@ -161,15 +161,19 @@ func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { assert.Equal(t, []byte("tx1"), resp.Batch.Transactions[0]) assert.Equal(t, []byte("tx2"), resp.Batch.Transactions[1]) - // DA height should be updated to epochEnd + 1 - assert.Equal(t, uint64(101), seq.GetDAHeight()) + // Checkpoint should have moved to next DA height + assert.Equal(t, uint64(101), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) mockDA.AssertExpectations(t) } func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { mockDA := new(MockDA) - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrBlobNotFound) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{}, + Timestamp: time.Now(), + }, nil) gen := genesis.Genesis{ ChainID: "test-chain", @@ -193,6 +197,7 @@ func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { require.NoError(t, err) require.NotNil(t, resp) require.NotNil(t, resp.Batch) + // Should return empty batch when DA has no transactions assert.Equal(t, 0, len(resp.Batch.Transactions)) mockDA.AssertExpectations(t) @@ -200,45 +205,50 @@ func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { mockDA := new(MockDA) + gen := genesis.Genesis{ ChainID: "test-chain", DAStartHeight: 100, - DAEpochForcedInclusion: 1, + DAEpochForcedInclusion: 0, // Not configured } - // Create config without forced inclusion namespace - cfgNoFI := config.DefaultConfig() - cfgNoFI.DA.ForcedInclusionNamespace = "" - seq := createTestSequencer(t, mockDA, cfgNoFI, gen) + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "" // Empty to trigger not configured + + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + // Create in-memory datastore + db := syncds.MutexWrap(ds.NewMapDatastore()) + + seq, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, cfg, gen, zerolog.Nop()) + require.NoError(t, err) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, LastBatchData: nil, } - resp, err := seq.GetNextBatch(context.Background(), req) + _, err = seq.GetNextBatch(context.Background(), req) require.Error(t, err) - require.Nil(t, resp) + assert.Contains(t, err.Error(), "forced inclusion not configured") } func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { - testBlobs := [][]byte{ - make([]byte, 50), // 50 bytes - make([]byte, 60), // 60 bytes - make([]byte, 100), // 100 bytes - } + // Create transactions of known sizes + tx1 := make([]byte, 100) + tx2 := make([]byte, 150) + tx3 := make([]byte, 200) + testBlobs := [][]byte{tx1, tx2, tx3} mockDA := new(MockDA) - // First call returns forced txs at height 100 mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, Timestamp: time.Now(), - }, nil).Once() - mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil).Once() - - // Subsequent calls at height 101 and 102 (after DA height bumps) should return no new forced txs - mockDA.On("GetIDs", mock.Anything, uint64(101), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() - mockDA.On("GetIDs", mock.Anything, uint64(102), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + }, nil) + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) gen := genesis.Genesis{ ChainID: "test-chain", @@ -253,9 +263,9 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { seq := createTestSequencer(t, mockDA, cfg, gen) - // First call with max 100 bytes - should get first 2 txs (50 + 60 = 110, but logic allows if batch has content) + // First call with MaxBytes that fits only first 2 transactions req := coresequencer.GetNextBatchRequest{ - MaxBytes: 100, + MaxBytes: 250, LastBatchData: nil, } @@ -263,36 +273,49 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { require.NoError(t, err) require.NotNil(t, resp) require.NotNil(t, resp.Batch) - // Should get first tx (50 bytes), second tx would exceed limit (50+60=110 > 100) - assert.Equal(t, 1, len(resp.Batch.Transactions)) - assert.Equal(t, 2, seq.txQueue.Size()) // 2 remaining in queue + // Should only get first 2 transactions (100 + 150 = 250 bytes) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, uint64(100), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(2), seq.checkpoint.TxIndex) - // Second call should get next tx from queue - resp2, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp2) - require.NotNil(t, resp2.Batch) - assert.Equal(t, 1, len(resp2.Batch.Transactions)) - assert.Equal(t, 1, seq.txQueue.Size()) // 1 remaining in queue - - // Third call with larger maxBytes to get the 100-byte tx - req3 := coresequencer.GetNextBatchRequest{ - MaxBytes: 200, + // Second call should get the remaining transaction + req = coresequencer.GetNextBatchRequest{ + MaxBytes: 1000, LastBatchData: nil, } - resp3, err := seq.GetNextBatch(context.Background(), req3) + + resp, err = seq.GetNextBatch(context.Background(), req) require.NoError(t, err) - require.NotNil(t, resp3) - require.NotNil(t, resp3.Batch) - assert.Equal(t, 1, len(resp3.Batch.Transactions)) - assert.Equal(t, 0, seq.txQueue.Size()) // Queue should be empty + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 1, len(resp.Batch.Transactions)) + assert.Equal(t, 200, len(resp.Batch.Transactions[0])) + + // After consuming all transactions, checkpoint should move to next DA height + assert.Equal(t, uint64(101), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) mockDA.AssertExpectations(t) } -func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { +func TestBasedSequencer_GetNextBatch_MultipleDABlocks(t *testing.T) { + testBlobs1 := [][]byte{[]byte("tx1"), []byte("tx2")} + testBlobs2 := [][]byte{[]byte("tx3"), []byte("tx4")} + mockDA := new(MockDA) - mockDA.On("GetIDs", mock.Anything, mock.Anything, mock.Anything).Return(nil, coreda.ErrBlobNotFound) + // First DA block + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs1, nil).Once() + + // Second DA block + mockDA.On("GetIDs", mock.Anything, uint64(101), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id3"), []byte("id4")}, + Timestamp: time.Now(), + }, nil).Once() + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs2, nil).Once() gen := genesis.Genesis{ ChainID: "test-chain", @@ -307,43 +330,37 @@ func TestBasedSequencer_GetNextBatch_FromQueue(t *testing.T) { seq := createTestSequencer(t, mockDA, cfg, gen) - // Pre-populate the queue - ctx := context.Background() - err := seq.txQueue.Add(ctx, []byte("queued_tx1")) - require.NoError(t, err) - err = seq.txQueue.Add(ctx, []byte("queued_tx2")) - require.NoError(t, err) - req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, LastBatchData: nil, } + // First batch from first DA block resp, err := seq.GetNextBatch(context.Background(), req) require.NoError(t, err) require.NotNil(t, resp) - require.NotNil(t, resp.Batch) assert.Equal(t, 2, len(resp.Batch.Transactions)) - assert.Equal(t, []byte("queued_tx1"), resp.Batch.Transactions[0]) - assert.Equal(t, []byte("queued_tx2"), resp.Batch.Transactions[1]) + assert.Equal(t, []byte("tx1"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("tx2"), resp.Batch.Transactions[1]) + assert.Equal(t, uint64(101), seq.checkpoint.DAHeight) - // Queue should be empty now - assert.Equal(t, 0, seq.txQueue.Size()) + // Second batch from second DA block + resp, err = seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("tx3"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("tx4"), resp.Batch.Transactions[1]) + assert.Equal(t, uint64(102), seq.checkpoint.DAHeight) + + mockDA.AssertExpectations(t) } -func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) { - mockDA := new(MockDA) +func TestBasedSequencer_GetNextBatch_ResumesFromCheckpoint(t *testing.T) { + testBlobs := [][]byte{[]byte("tx1"), []byte("tx2"), []byte("tx3")} - // First call: return a forced tx that will be added to queue - forcedTx := make([]byte, 150) - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ - IDs: []coreda.ID{[]byte("id1")}, - Timestamp: time.Now(), - }, nil).Once() - mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx}, nil).Once() - - // Second call: no new forced txs at height 101 (after first call bumped DA height to epochEnd + 1) - mockDA.On("GetIDs", mock.Anything, uint64(101), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + mockDA := new(MockDA) + // No DA calls expected since we manually set the state gen := genesis.Genesis{ ChainID: "test-chain", @@ -358,56 +375,45 @@ func TestBasedSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testin seq := createTestSequencer(t, mockDA, cfg, gen) - // First call with maxBytes = 100 - // Forced tx (150 bytes) is added to queue, but batch will be empty since it exceeds maxBytes - req1 := coresequencer.GetNextBatchRequest{ - MaxBytes: 100, - LastBatchData: nil, + // Simulate processing first transaction (resuming from checkpoint after restart) + seq.checkpoint.DAHeight = 100 + seq.checkpoint.TxIndex = 1 + seq.currentBatchTxs = testBlobs + seq.currentBatchDA = &block.ForcedInclusionEvent{ + StartDaHeight: 100, + EndDaHeight: 100, + Txs: testBlobs, } - resp1, err := seq.GetNextBatch(context.Background(), req1) - require.NoError(t, err) - require.NotNil(t, resp1) - require.NotNil(t, resp1.Batch) - assert.Equal(t, 0, len(resp1.Batch.Transactions), "Should have no txs as forced tx exceeds maxBytes") - - // Verify forced tx is in queue - assert.Equal(t, 1, seq.txQueue.Size(), "Forced tx should be in queue") - - // Second call with larger maxBytes = 200 - // Should process tx from queue - req2 := coresequencer.GetNextBatchRequest{ - MaxBytes: 200, + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, LastBatchData: nil, } - resp2, err := seq.GetNextBatch(context.Background(), req2) + // Should resume from index 1, getting tx2 and tx3 + resp, err := seq.GetNextBatch(context.Background(), req) require.NoError(t, err) - require.NotNil(t, resp2) - require.NotNil(t, resp2.Batch) - assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include tx from queue") - assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) - - // Queue should now be empty - assert.Equal(t, 0, seq.txQueue.Size(), "Queue should be empty") + require.NotNil(t, resp) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + assert.Equal(t, []byte("tx2"), resp.Batch.Transactions[0]) + assert.Equal(t, []byte("tx3"), resp.Batch.Transactions[1]) - mockDA.AssertExpectations(t) + // Should have moved to next DA height + assert.Equal(t, uint64(101), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) } func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T) { - mockDA := new(MockDA) + // Create a transaction larger than max bytes + largeTx := make([]byte, 2000000) // 2MB + testBlobs := [][]byte{largeTx} - // Return forced txs where combined they exceed maxBytes - forcedTx1 := make([]byte, 100) - forcedTx2 := make([]byte, 80) + mockDA := new(MockDA) mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ - IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + IDs: []coreda.ID{[]byte("id1")}, Timestamp: time.Now(), - }, nil).Once() - mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return([][]byte{forcedTx1, forcedTx2}, nil).Once() - - // Second call at height 101 (after first call bumped DA height to epochEnd + 1) - mockDA.On("GetIDs", mock.Anything, uint64(101), mock.Anything).Return(nil, coreda.ErrBlobNotFound).Once() + }, nil) + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) gen := genesis.Genesis{ ChainID: "test-chain", @@ -422,38 +428,17 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T seq := createTestSequencer(t, mockDA, cfg, gen) - // First call with maxBytes = 120 - // Should get only first forced tx (100 bytes), second stays in queue - req1 := coresequencer.GetNextBatchRequest{ - MaxBytes: 120, - LastBatchData: nil, - } - - resp1, err := seq.GetNextBatch(context.Background(), req1) - require.NoError(t, err) - require.NotNil(t, resp1) - require.NotNil(t, resp1.Batch) - assert.Equal(t, 1, len(resp1.Batch.Transactions), "Should only include first forced tx") - assert.Equal(t, 100, len(resp1.Batch.Transactions[0])) - - // Verify second tx is still in queue - assert.Equal(t, 1, seq.txQueue.Size(), "Second tx should be in queue") - - // Second call - should get the second tx from queue - req2 := coresequencer.GetNextBatchRequest{ - MaxBytes: 120, + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000, // Much smaller than the transaction LastBatchData: nil, } - resp2, err := seq.GetNextBatch(context.Background(), req2) + resp, err := seq.GetNextBatch(context.Background(), req) require.NoError(t, err) - require.NotNil(t, resp2) - require.NotNil(t, resp2.Batch) - assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include second tx from queue") - assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) - - // Queue should now be empty - assert.Equal(t, 0, seq.txQueue.Size(), "Queue should be empty") + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + // Should return empty batch since transaction exceeds max bytes + assert.Equal(t, 0, len(resp.Batch.Transactions)) mockDA.AssertExpectations(t) } @@ -462,7 +447,7 @@ func TestBasedSequencer_VerifyBatch(t *testing.T) { mockDA := new(MockDA) gen := genesis.Genesis{ ChainID: "test-chain", - DAEpochForcedInclusion: 1, + DAEpochForcedInclusion: 10, } cfg := config.DefaultConfig() @@ -474,11 +459,13 @@ func TestBasedSequencer_VerifyBatch(t *testing.T) { req := coresequencer.VerifyBatchRequest{ Id: []byte("test-chain"), - BatchData: [][]byte{[]byte("tx1")}, + BatchData: [][]byte{[]byte("tx1"), []byte("tx2")}, } resp, err := seq.VerifyBatch(context.Background(), req) require.NoError(t, err) + require.NotNil(t, resp) + // Based sequencer always verifies as true since all txs come from DA assert.True(t, resp.Status) } @@ -487,7 +474,7 @@ func TestBasedSequencer_SetDAHeight(t *testing.T) { gen := genesis.Genesis{ ChainID: "test-chain", DAStartHeight: 100, - DAEpochForcedInclusion: 1, + DAEpochForcedInclusion: 10, } cfg := config.DefaultConfig() @@ -497,8 +484,10 @@ func TestBasedSequencer_SetDAHeight(t *testing.T) { seq := createTestSequencer(t, mockDA, cfg, gen) + // Initial height from genesis assert.Equal(t, uint64(100), seq.GetDAHeight()) + // Set new height seq.SetDAHeight(200) assert.Equal(t, uint64(200), seq.GetDAHeight()) } @@ -525,12 +514,98 @@ func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { LastBatchData: nil, } - // With new error handling, errors during blob processing return empty batch instead of error + // DA errors are handled gracefully by returning empty batch and retrying resp, err := seq.GetNextBatch(context.Background(), req) require.NoError(t, err) require.NotNil(t, resp) - require.NotNil(t, resp.Batch) assert.Equal(t, 0, len(resp.Batch.Transactions), "Should return empty batch on DA error") mockDA.AssertExpectations(t) } + +func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrHeightFromFuture) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + seq := createTestSequencer(t, mockDA, cfg, gen) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + // Should not error, but return empty batch + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + // DA height should stay the same + assert.Equal(t, uint64(100), seq.checkpoint.DAHeight) + + mockDA.AssertExpectations(t) +} + +func TestBasedSequencer_CheckpointPersistence(t *testing.T) { + testBlobs := [][]byte{[]byte("tx1"), []byte("tx2")} + + mockDA := new(MockDA) + mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ + IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, + Timestamp: time.Now(), + }, nil) + mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.DA.Namespace = "test-ns" + cfg.DA.DataNamespace = "test-data-ns" + cfg.DA.ForcedInclusionNamespace = "test-fi-ns" + + // Create persistent datastore + db := syncds.MutexWrap(ds.NewMapDatastore()) + + // Create first sequencer + daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) + fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + seq1, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, cfg, gen, zerolog.Nop()) + require.NoError(t, err) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + // Process a batch + resp, err := seq1.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + assert.Equal(t, 2, len(resp.Batch.Transactions)) + + // Create a new sequencer with the same datastore (simulating restart) + seq2, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, cfg, gen, zerolog.Nop()) + require.NoError(t, err) + + // Checkpoint should be loaded from DB + assert.Equal(t, uint64(101), seq2.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq2.checkpoint.TxIndex) + + mockDA.AssertExpectations(t) +} diff --git a/types/pb/evnode/v1/batch.pb.go b/types/pb/evnode/v1/batch.pb.go index d5902516d5..576f1edfcc 100644 --- a/types/pb/evnode/v1/batch.pb.go +++ b/types/pb/evnode/v1/batch.pb.go @@ -66,60 +66,13 @@ func (x *Batch) GetTxs() [][]byte { return nil } -// Tx is a single transaction. -type Tx struct { - state protoimpl.MessageState `protogen:"open.v1"` - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Tx) Reset() { - *x = Tx{} - mi := &file_evnode_v1_batch_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Tx) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Tx) ProtoMessage() {} - -func (x *Tx) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_batch_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Tx.ProtoReflect.Descriptor instead. -func (*Tx) Descriptor() ([]byte, []int) { - return file_evnode_v1_batch_proto_rawDescGZIP(), []int{1} -} - -func (x *Tx) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - var File_evnode_v1_batch_proto protoreflect.FileDescriptor const file_evnode_v1_batch_proto_rawDesc = "" + "\n" + "\x15evnode/v1/batch.proto\x12\tevnode.v1\"\x19\n" + "\x05Batch\x12\x10\n" + - "\x03txs\x18\x01 \x03(\fR\x03txs\"\x18\n" + - "\x02Tx\x12\x12\n" + - "\x04data\x18\x01 \x01(\fR\x04dataB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" + "\x03txs\x18\x01 \x03(\fR\x03txsB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" var ( file_evnode_v1_batch_proto_rawDescOnce sync.Once @@ -133,10 +86,9 @@ func file_evnode_v1_batch_proto_rawDescGZIP() []byte { return file_evnode_v1_batch_proto_rawDescData } -var file_evnode_v1_batch_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_evnode_v1_batch_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_evnode_v1_batch_proto_goTypes = []any{ (*Batch)(nil), // 0: evnode.v1.Batch - (*Tx)(nil), // 1: evnode.v1.Tx } var file_evnode_v1_batch_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -157,7 +109,7 @@ func file_evnode_v1_batch_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_evnode_v1_batch_proto_rawDesc), len(file_evnode_v1_batch_proto_rawDesc)), NumEnums: 0, - NumMessages: 2, + NumMessages: 1, NumExtensions: 0, NumServices: 0, }, diff --git a/types/pb/evnode/v1/state.pb.go b/types/pb/evnode/v1/state.pb.go index ea7610b7d9..aa0f23155e 100644 --- a/types/pb/evnode/v1/state.pb.go +++ b/types/pb/evnode/v1/state.pb.go @@ -123,6 +123,106 @@ func (x *State) GetLastHeaderHash() []byte { return nil } +// BasedCheckpoint tracks the position in the DA where transactions were last processed +type BasedCheckpoint struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DA block height being processed + DaHeight uint64 `protobuf:"varint,1,opt,name=da_height,json=daHeight,proto3" json:"da_height,omitempty"` + // Index of the next transaction to process within the DA block's forced inclusion batch + TxIndex uint64 `protobuf:"varint,2,opt,name=tx_index,json=txIndex,proto3" json:"tx_index,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BasedCheckpoint) Reset() { + *x = BasedCheckpoint{} + mi := &file_evnode_v1_state_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BasedCheckpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasedCheckpoint) ProtoMessage() {} + +func (x *BasedCheckpoint) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_state_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasedCheckpoint.ProtoReflect.Descriptor instead. +func (*BasedCheckpoint) Descriptor() ([]byte, []int) { + return file_evnode_v1_state_proto_rawDescGZIP(), []int{1} +} + +func (x *BasedCheckpoint) GetDaHeight() uint64 { + if x != nil { + return x.DaHeight + } + return 0 +} + +func (x *BasedCheckpoint) GetTxIndex() uint64 { + if x != nil { + return x.TxIndex + } + return 0 +} + +// Tx represents a transaction with its raw data +type Tx struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Tx) Reset() { + *x = Tx{} + mi := &file_evnode_v1_state_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Tx) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tx) ProtoMessage() {} + +func (x *Tx) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_state_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tx.ProtoReflect.Descriptor instead. +func (*Tx) Descriptor() ([]byte, []int) { + return file_evnode_v1_state_proto_rawDescGZIP(), []int{2} +} + +func (x *Tx) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + var File_evnode_v1_state_proto protoreflect.FileDescriptor const file_evnode_v1_state_proto_rawDesc = "" + @@ -136,7 +236,12 @@ const file_evnode_v1_state_proto_rawDesc = "" + "\x0flast_block_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\rlastBlockTime\x12\x1b\n" + "\tda_height\x18\x06 \x01(\x04R\bdaHeight\x12\x19\n" + "\bapp_hash\x18\b \x01(\fR\aappHash\x12(\n" + - "\x10last_header_hash\x18\t \x01(\fR\x0elastHeaderHashJ\x04\b\a\x10\bB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" + "\x10last_header_hash\x18\t \x01(\fR\x0elastHeaderHashJ\x04\b\a\x10\b\"I\n" + + "\x0fBasedCheckpoint\x12\x1b\n" + + "\tda_height\x18\x01 \x01(\x04R\bdaHeight\x12\x19\n" + + "\btx_index\x18\x02 \x01(\x04R\atxIndex\"\x18\n" + + "\x02Tx\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04dataB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" var ( file_evnode_v1_state_proto_rawDescOnce sync.Once @@ -150,15 +255,17 @@ func file_evnode_v1_state_proto_rawDescGZIP() []byte { return file_evnode_v1_state_proto_rawDescData } -var file_evnode_v1_state_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_evnode_v1_state_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_evnode_v1_state_proto_goTypes = []any{ (*State)(nil), // 0: evnode.v1.State - (*Version)(nil), // 1: evnode.v1.Version - (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp + (*BasedCheckpoint)(nil), // 1: evnode.v1.BasedCheckpoint + (*Tx)(nil), // 2: evnode.v1.Tx + (*Version)(nil), // 3: evnode.v1.Version + (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp } var file_evnode_v1_state_proto_depIdxs = []int32{ - 1, // 0: evnode.v1.State.version:type_name -> evnode.v1.Version - 2, // 1: evnode.v1.State.last_block_time:type_name -> google.protobuf.Timestamp + 3, // 0: evnode.v1.State.version:type_name -> evnode.v1.Version + 4, // 1: evnode.v1.State.last_block_time:type_name -> google.protobuf.Timestamp 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name @@ -178,7 +285,7 @@ func file_evnode_v1_state_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_evnode_v1_state_proto_rawDesc), len(file_evnode_v1_state_proto_rawDesc)), NumEnums: 0, - NumMessages: 1, + NumMessages: 3, NumExtensions: 0, NumServices: 0, }, From 95e06b7ed6af0fb84f00f8725e50952c1a5277e5 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 10:50:51 +0100 Subject: [PATCH 04/19] simplify --- docs/guides/migrating-to-ev-abci.md | 6 +- pkg/blob/README.md | 3 + sequencers/based/checkpoint.go | 55 ---- sequencers/based/checkpoint_test.go | 66 ----- sequencers/based/queue.go | 328 ---------------------- sequencers/based/queue_test.go | 419 ---------------------------- sequencers/based/sequencer.go | 16 +- sequencers/based/sequencer_test.go | 5 - 8 files changed, 8 insertions(+), 890 deletions(-) delete mode 100644 sequencers/based/queue.go delete mode 100644 sequencers/based/queue_test.go diff --git a/docs/guides/migrating-to-ev-abci.md b/docs/guides/migrating-to-ev-abci.md index f49ba6df6f..eb6abcd9e0 100644 --- a/docs/guides/migrating-to-ev-abci.md +++ b/docs/guides/migrating-to-ev-abci.md @@ -41,9 +41,9 @@ import ( ) ``` -2. Add the migration manager keeper to your app struct -3. Register the module in your module manager -4. Configure the migration manager in your app initialization +1. Add the migration manager keeper to your app struct +2. Register the module in your module manager +3. Configure the migration manager in your app initialization ### Step 2: Replace Staking Module with Wrapper diff --git a/pkg/blob/README.md b/pkg/blob/README.md index 3424342da5..837dc6c314 100644 --- a/pkg/blob/README.md +++ b/pkg/blob/README.md @@ -3,14 +3,17 @@ This package is a **trimmed copy** of code from `celestia-node` to stay JSON-compatible with the blob RPC without importing the full Cosmos/Celestia dependency set. ## Upstream source + - `blob.go` comes from `celestia-node/blob/blob.go` @ tag `v0.28.4` (release v0.28.4), with unused pieces removed (blob v1, proof helpers, share length calc, appconsts dependency, etc.). - `submit_options.go` mirrors the exported JSON fields of `celestia-node/state/tx_config.go` @ the same tag, leaving out functional options, defaults, and Cosmos keyring helpers. ## Why copy instead of import? + - Avoids pulling Cosmos SDK / celestia-app dependencies into ev-node for the small surface we need (blob JSON and commitment for v0). - Keeps binary size and module graph smaller while remaining wire-compatible with celestia-node's blob service. ## Keeping it in sync + - When celestia-node changes blob JSON or tx config fields, update this package manually: 1. `diff -u pkg/blob/blob.go ../Celestia/celestia-node/blob/blob.go` 2. `diff -u pkg/blob/submit_options.go ../Celestia/celestia-node/state/tx_config.go` diff --git a/sequencers/based/checkpoint.go b/sequencers/based/checkpoint.go index 8b3f120603..539e26251b 100644 --- a/sequencers/based/checkpoint.go +++ b/sequencers/based/checkpoint.go @@ -2,12 +2,10 @@ package based import ( "context" - "encoding/binary" "errors" "fmt" ds "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/query" "google.golang.org/protobuf/proto" pb "github.com/evstack/ev-node/types/pb/evnode/v1" @@ -93,56 +91,3 @@ func (cs *CheckpointStore) Delete(ctx context.Context) error { } return nil } - -// Legacy key format for migration from old queue-based implementation -// This allows us to detect and clean up old queue data -func isLegacyQueueKey(key ds.Key) bool { - // Old queue keys had format "/based_txs/tx_..." - return key.String() != checkpointKey.String() && - len(key.String()) > 0 -} - -// CleanupLegacyQueue removes all legacy queue entries from the datastore -// This should be called during migration from the old queue-based implementation -func (cs *CheckpointStore) CleanupLegacyQueue(ctx context.Context) error { - // Query all keys in the datastore - results, err := cs.db.Query(ctx, query.Query{KeysOnly: true}) - if err != nil { - return fmt.Errorf("failed to query datastore: %w", err) - } - defer results.Close() - - deletedCount := 0 - for result := range results.Next() { - if result.Error != nil { - continue - } - - key := ds.NewKey(result.Key) - // Only delete keys that are not the checkpoint - if key.String() != checkpointKey.String() { - if err := cs.db.Delete(ctx, key); err != nil { - // Log but continue - best effort cleanup - continue - } - deletedCount++ - } - } - - return nil -} - -// Helper function to encode uint64 to bytes for potential future use -func encodeUint64(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} - -// Helper function to decode bytes to uint64 for potential future use -func decodeUint64(b []byte) (uint64, error) { - if len(b) != 8 { - return 0, fmt.Errorf("invalid length for uint64: got %d, expected 8", len(b)) - } - return binary.BigEndian.Uint64(b), nil -} diff --git a/sequencers/based/checkpoint_test.go b/sequencers/based/checkpoint_test.go index 9a9eee049f..489233926b 100644 --- a/sequencers/based/checkpoint_test.go +++ b/sequencers/based/checkpoint_test.go @@ -69,48 +69,6 @@ func TestCheckpointStore_Delete(t *testing.T) { require.NoError(t, err) } -func TestCheckpointStore_CleanupLegacyQueue(t *testing.T) { - ctx := context.Background() - db := ds.NewMapDatastore() - store := NewCheckpointStore(db) - - // Add some legacy queue keys (simulating old implementation) - legacyKeys := []string{ - "/based_txs/tx_0_abc123", - "/based_txs/tx_1_def456", - "/based_txs/tx_2_ghi789", - } - for _, key := range legacyKeys { - err := db.Put(ctx, ds.NewKey(key), []byte("dummy data")) - require.NoError(t, err) - } - - // Save a checkpoint (should not be cleaned up) - checkpoint := &Checkpoint{ - DAHeight: 100, - TxIndex: 5, - } - err := store.Save(ctx, checkpoint) - require.NoError(t, err) - - // Cleanup legacy queue - err = store.CleanupLegacyQueue(ctx) - require.NoError(t, err) - - // Verify legacy keys are gone - for _, key := range legacyKeys { - has, err := db.Has(ctx, ds.NewKey(key)) - require.NoError(t, err) - require.False(t, has, "legacy key should be deleted: %s", key) - } - - // Verify checkpoint still exists - loaded, err := store.Load(ctx) - require.NoError(t, err) - require.Equal(t, checkpoint.DAHeight, loaded.DAHeight) - require.Equal(t, checkpoint.TxIndex, loaded.TxIndex) -} - func TestCheckpoint_EdgeCases(t *testing.T) { ctx := context.Background() db := ds.NewMapDatastore() @@ -171,27 +129,3 @@ func TestCheckpointStore_ConcurrentAccess(t *testing.T) { <-done } } - -func TestEncodeDecodeUint64(t *testing.T) { - testCases := []uint64{ - 0, - 1, - 100, - 1000000, - ^uint64(0), // max uint64 - } - - for _, tc := range testCases { - encoded := encodeUint64(tc) - require.Equal(t, 8, len(encoded), "encoded length should be 8 bytes") - - decoded, err := decodeUint64(encoded) - require.NoError(t, err) - require.Equal(t, tc, decoded) - } - - // Test invalid length - _, err := decodeUint64([]byte{1, 2, 3}) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid length") -} diff --git a/sequencers/based/queue.go b/sequencers/based/queue.go deleted file mode 100644 index 63e6d8cad1..0000000000 --- a/sequencers/based/queue.go +++ /dev/null @@ -1,328 +0,0 @@ -package based - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "sort" - "sync" - - ds "github.com/ipfs/go-datastore" - ktds "github.com/ipfs/go-datastore/keytransform" - "github.com/ipfs/go-datastore/query" - "google.golang.org/protobuf/proto" - - pb "github.com/evstack/ev-node/types/pb/evnode/v1" -) - -// ErrQueueFull is returned when the transaction queue has reached its maximum size -var ErrQueueFull = errors.New("transaction queue is full") - -func newPrefixKV(kvStore ds.Batching, prefix string) ds.Batching { - return ktds.Wrap(kvStore, ktds.PrefixTransform{Prefix: ds.NewKey(prefix)}) -} - -// TxQueue implements a persistent queue for transactions -type TxQueue struct { - queue [][]byte - head int // index of the first element in the queue - maxQueueSize int // maximum number of transactions allowed in queue (0 = unlimited) - mu sync.Mutex - db ds.Batching -} - -// NewTxQueue creates a new TxQueue with the specified maximum size. -// If maxSize is 0, the queue will be unlimited. -func NewTxQueue(db ds.Batching, prefix string, maxSize int) *TxQueue { - return &TxQueue{ - queue: make([][]byte, 0), - head: 0, - maxQueueSize: maxSize, - db: newPrefixKV(db, prefix), - } -} - -// Add adds a new transaction to the queue and writes it to the DB. -// Returns ErrQueueFull if the queue has reached its maximum size. -func (tq *TxQueue) Add(ctx context.Context, tx []byte) error { - tq.mu.Lock() - defer tq.mu.Unlock() - - // Check if queue is full (maxQueueSize of 0 means unlimited) - // Use effective queue size (total length minus processed head items) - effectiveSize := len(tq.queue) - tq.head - if tq.maxQueueSize > 0 && effectiveSize >= tq.maxQueueSize { - return ErrQueueFull - } - - // Generate a unique key for this transaction - // Use a combination of queue position and transaction hash - key := fmt.Sprintf("tx_%d_%s", len(tq.queue), hex.EncodeToString(tx[:min(32, len(tx))])) - - pbTx := &pb.Tx{ - Data: tx, - } - - encodedTx, err := proto.Marshal(pbTx) - if err != nil { - return err - } - - // First write to DB for durability - if err := tq.db.Put(ctx, ds.NewKey(key), encodedTx); err != nil { - return err - } - - // Then add to in-memory queue - tq.queue = append(tq.queue, tx) - - return nil -} - -// AddBatch adds multiple transactions to the queue in a single operation -func (tq *TxQueue) AddBatch(ctx context.Context, txs [][]byte) error { - tq.mu.Lock() - defer tq.mu.Unlock() - - // Check if adding these transactions would exceed the queue size - effectiveSize := len(tq.queue) - tq.head - if tq.maxQueueSize > 0 && effectiveSize+len(txs) > tq.maxQueueSize { - return ErrQueueFull - } - - // Use a batch operation for efficiency - batch, err := tq.db.Batch(ctx) - if err != nil { - return fmt.Errorf("failed to create batch: %w", err) - } - - for i, tx := range txs { - // Generate a unique key for this transaction - key := fmt.Sprintf("tx_%d_%s", len(tq.queue)+i, hex.EncodeToString(tx[:min(32, len(tx))])) - - pbTx := &pb.Tx{ - Data: tx, - } - - encodedTx, err := proto.Marshal(pbTx) - if err != nil { - return err - } - - if err := batch.Put(ctx, ds.NewKey(key), encodedTx); err != nil { - return err - } - } - - // Commit the batch - if err := batch.Commit(ctx); err != nil { - return fmt.Errorf("failed to commit batch: %w", err) - } - - // Then add to in-memory queue - tq.queue = append(tq.queue, txs...) - - return nil -} - -// Next extracts a transaction from the queue and marks it as processed in the DB -func (tq *TxQueue) Next(ctx context.Context) ([]byte, error) { - tq.mu.Lock() - defer tq.mu.Unlock() - - // Check if queue is empty - if tq.head >= len(tq.queue) { - return nil, nil - } - - tx := tq.queue[tq.head] - key := fmt.Sprintf("tx_%d_%s", tq.head, hex.EncodeToString(tx[:min(32, len(tx))])) - - tq.queue[tq.head] = nil // Release memory for the dequeued element - tq.head++ - - // Compact when head gets too large to prevent memory leaks - // Only compact when we have significant waste (more than half processed) - // and when we have a reasonable number of processed items to avoid - // frequent compactions on small queues - if tq.head > len(tq.queue)/2 && tq.head > 100 { - remaining := copy(tq.queue, tq.queue[tq.head:]) - // Zero out the rest of the slice to release memory - for i := remaining; i < len(tq.queue); i++ { - tq.queue[i] = nil - } - tq.queue = tq.queue[:remaining] - tq.head = 0 - } - - // Delete the transaction from the DB since it's been processed - err := tq.db.Delete(ctx, ds.NewKey(key)) - if err != nil { - // Log the error but continue - fmt.Printf("Error deleting processed transaction: %v\n", err) - } - - return tx, nil -} - -// Peek returns transactions from the queue without removing them -// This is useful for creating batches without committing to dequeue -func (tq *TxQueue) Peek(maxBytes uint64) [][]byte { - tq.mu.Lock() - defer tq.mu.Unlock() - - if tq.head >= len(tq.queue) { - return nil - } - - var result [][]byte - var totalBytes uint64 - - for i := tq.head; i < len(tq.queue); i++ { - tx := tq.queue[i] - txSize := uint64(len(tx)) - - if totalBytes+txSize > maxBytes { - break - } - - result = append(result, tx) - totalBytes += txSize - } - - return result -} - -// Consume removes the first n transactions from the queue -// This should be called after successfully processing transactions returned by Peek -func (tq *TxQueue) Consume(ctx context.Context, n int) error { - tq.mu.Lock() - defer tq.mu.Unlock() - - if tq.head+n > len(tq.queue) { - return errors.New("cannot consume more transactions than available") - } - - // Delete from DB - for i := 0; i < n; i++ { - tx := tq.queue[tq.head+i] - key := fmt.Sprintf("tx_%d_%s", tq.head+i, hex.EncodeToString(tx[:min(32, len(tx))])) - - if err := tq.db.Delete(ctx, ds.NewKey(key)); err != nil { - fmt.Printf("Error deleting consumed transaction: %v\n", err) - } - - tq.queue[tq.head+i] = nil // Release memory - } - - tq.head += n - - // Compact if needed - if tq.head > len(tq.queue)/2 && tq.head > 100 { - remaining := copy(tq.queue, tq.queue[tq.head:]) - for i := remaining; i < len(tq.queue); i++ { - tq.queue[i] = nil - } - tq.queue = tq.queue[:remaining] - tq.head = 0 - } - - return nil -} - -// Load reloads all transactions from DB into the in-memory queue after a crash or restart -func (tq *TxQueue) Load(ctx context.Context) error { - tq.mu.Lock() - defer tq.mu.Unlock() - - // Clear the current queue - tq.queue = make([][]byte, 0) - tq.head = 0 - - q := query.Query{} - results, err := tq.db.Query(ctx, q) - if err != nil { - return fmt.Errorf("error querying datastore: %w", err) - } - defer results.Close() - - // Collect all entries with their keys - type entry struct { - key string - tx []byte - } - var entries []entry - - // Load each transaction - for result := range results.Next() { - if result.Error != nil { - fmt.Printf("Error reading entry from datastore: %v\n", result.Error) - continue - } - pbTx := &pb.Tx{} - err := proto.Unmarshal(result.Value, pbTx) - if err != nil { - fmt.Printf("Error decoding transaction for key '%s': %v. Skipping entry.\n", result.Key, err) - continue - } - entries = append(entries, entry{key: result.Key, tx: pbTx.Data}) - } - - // Sort entries by key to maintain FIFO order - sort.Slice(entries, func(i, j int) bool { - return entries[i].key < entries[j].key - }) - - // Add sorted transactions to queue - for _, e := range entries { - tq.queue = append(tq.queue, e.tx) - } - - return nil -} - -// Size returns the effective number of transactions in the queue -// This method is primarily for testing and monitoring purposes -func (tq *TxQueue) Size() int { - tq.mu.Lock() - defer tq.mu.Unlock() - return len(tq.queue) - tq.head -} - -// Clear removes all transactions from the queue and DB -func (tq *TxQueue) Clear(ctx context.Context) error { - tq.mu.Lock() - defer tq.mu.Unlock() - - // Delete all entries from DB - q := query.Query{KeysOnly: true} - results, err := tq.db.Query(ctx, q) - if err != nil { - return fmt.Errorf("error querying datastore: %w", err) - } - defer results.Close() - - for result := range results.Next() { - if result.Error != nil { - fmt.Printf("Error reading key from datastore: %v\n", result.Error) - continue - } - if err := tq.db.Delete(ctx, ds.NewKey(result.Key)); err != nil { - fmt.Printf("Error deleting key '%s': %v\n", result.Key, err) - } - } - - // Clear in-memory queue - tq.queue = make([][]byte, 0) - tq.head = 0 - - return nil -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/sequencers/based/queue_test.go b/sequencers/based/queue_test.go deleted file mode 100644 index 45de771f0d..0000000000 --- a/sequencers/based/queue_test.go +++ /dev/null @@ -1,419 +0,0 @@ -package based - -import ( - "context" - "testing" - - ds "github.com/ipfs/go-datastore" - syncds "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestTxQueue_AddAndNext(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 0) - - ctx := context.Background() - - // Add transactions - err := queue.Add(ctx, []byte("tx1")) - require.NoError(t, err) - err = queue.Add(ctx, []byte("tx2")) - require.NoError(t, err) - - assert.Equal(t, 2, queue.Size()) - - // Get next transaction - tx, err := queue.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx1"), tx) - assert.Equal(t, 1, queue.Size()) - - // Get second transaction - tx, err = queue.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx2"), tx) - assert.Equal(t, 0, queue.Size()) - - // Queue should be empty - tx, err = queue.Next(ctx) - require.NoError(t, err) - assert.Nil(t, tx) -} - -func TestTxQueue_AddBatch(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 0) - - ctx := context.Background() - - txs := [][]byte{ - []byte("tx1"), - []byte("tx2"), - []byte("tx3"), - } - - err := queue.AddBatch(ctx, txs) - require.NoError(t, err) - - assert.Equal(t, 3, queue.Size()) - - // Verify all transactions - for i, expectedTx := range txs { - tx, err := queue.Next(ctx) - require.NoError(t, err) - assert.Equal(t, expectedTx, tx, "transaction %d should match", i) - } - - assert.Equal(t, 0, queue.Size()) -} - -func TestTxQueue_MaxSize(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 2) // Max 2 transactions - - ctx := context.Background() - - // Add first transaction - err := queue.Add(ctx, []byte("tx1")) - require.NoError(t, err) - - // Add second transaction - err = queue.Add(ctx, []byte("tx2")) - require.NoError(t, err) - - // Third transaction should fail - err = queue.Add(ctx, []byte("tx3")) - assert.ErrorIs(t, err, ErrQueueFull) - - // Size should still be 2 - assert.Equal(t, 2, queue.Size()) - - // After removing one, we should be able to add again - _, err = queue.Next(ctx) - require.NoError(t, err) - - err = queue.Add(ctx, []byte("tx3")) - require.NoError(t, err) - assert.Equal(t, 2, queue.Size()) -} - -func TestTxQueue_AddBatchMaxSize(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 3) - - ctx := context.Background() - - // Add one transaction - err := queue.Add(ctx, []byte("tx1")) - require.NoError(t, err) - - // Try to add 3 more (would exceed limit) - txs := [][]byte{ - []byte("tx2"), - []byte("tx3"), - []byte("tx4"), - } - err = queue.AddBatch(ctx, txs) - assert.ErrorIs(t, err, ErrQueueFull) - - // Size should still be 1 - assert.Equal(t, 1, queue.Size()) -} - -func TestTxQueue_Persistence(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 0) - - ctx := context.Background() - - // Add some transactions - err := queue.Add(ctx, []byte("tx1")) - require.NoError(t, err) - err = queue.Add(ctx, []byte("tx2")) - require.NoError(t, err) - err = queue.Add(ctx, []byte("tx3")) - require.NoError(t, err) - - assert.Equal(t, 3, queue.Size()) - - // Create a new queue with the same datastore - queue2 := NewTxQueue(db, "test", 0) - - // Load from persistence - err = queue2.Load(ctx) - require.NoError(t, err) - - // Should have all transactions - assert.Equal(t, 3, queue2.Size()) - - // Verify transactions are in order - tx, err := queue2.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx1"), tx) - - tx, err = queue2.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx2"), tx) - - tx, err = queue2.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx3"), tx) - - assert.Equal(t, 0, queue2.Size()) -} - -func TestTxQueue_PersistenceAfterPartialConsumption(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 0) - - ctx := context.Background() - - // Add transactions - err := queue.Add(ctx, []byte("tx1")) - require.NoError(t, err) - err = queue.Add(ctx, []byte("tx2")) - require.NoError(t, err) - err = queue.Add(ctx, []byte("tx3")) - require.NoError(t, err) - - // Consume first transaction - tx, err := queue.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx1"), tx) - - // Create new queue and load - queue2 := NewTxQueue(db, "test", 0) - err = queue2.Load(ctx) - require.NoError(t, err) - - // Should only have remaining transactions - assert.Equal(t, 2, queue2.Size()) - - tx, err = queue2.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx2"), tx) - - tx, err = queue2.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx3"), tx) -} - -func TestTxQueue_Peek(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 0) - - ctx := context.Background() - - // Add transactions of different sizes - err := queue.Add(ctx, make([]byte, 50)) // 50 bytes - require.NoError(t, err) - err = queue.Add(ctx, make([]byte, 60)) // 60 bytes - require.NoError(t, err) - err = queue.Add(ctx, make([]byte, 100)) // 100 bytes - require.NoError(t, err) - - // Peek with 100 bytes limit - should get first tx only - txs := queue.Peek(100) - assert.Equal(t, 1, len(txs)) - assert.Equal(t, 50, len(txs[0])) - - // Queue size should not change - assert.Equal(t, 3, queue.Size()) - - // Peek with 120 bytes limit - should get first two txs - txs = queue.Peek(120) - assert.Equal(t, 2, len(txs)) - assert.Equal(t, 50, len(txs[0])) - assert.Equal(t, 60, len(txs[1])) - - // Queue size should still not change - assert.Equal(t, 3, queue.Size()) - - // Peek with 300 bytes limit - should get all txs - txs = queue.Peek(300) - assert.Equal(t, 3, len(txs)) -} - -func TestTxQueue_Consume(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 0) - - ctx := context.Background() - - // Add transactions - err := queue.Add(ctx, []byte("tx1")) - require.NoError(t, err) - err = queue.Add(ctx, []byte("tx2")) - require.NoError(t, err) - err = queue.Add(ctx, []byte("tx3")) - require.NoError(t, err) - - assert.Equal(t, 3, queue.Size()) - - // Consume first 2 transactions - err = queue.Consume(ctx, 2) - require.NoError(t, err) - - assert.Equal(t, 1, queue.Size()) - - // Next transaction should be tx3 - tx, err := queue.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx3"), tx) -} - -func TestTxQueue_PeekAndConsume(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 0) - - ctx := context.Background() - - // Add transactions - err := queue.Add(ctx, make([]byte, 50)) - require.NoError(t, err) - err = queue.Add(ctx, make([]byte, 60)) - require.NoError(t, err) - err = queue.Add(ctx, make([]byte, 100)) - require.NoError(t, err) - - // Peek to see what fits in 120 bytes - txs := queue.Peek(120) - assert.Equal(t, 2, len(txs)) - - // Consume those transactions - err = queue.Consume(ctx, len(txs)) - require.NoError(t, err) - - // Should have 1 transaction left - assert.Equal(t, 1, queue.Size()) - - // Next transaction should be the 100-byte one - tx, err := queue.Next(ctx) - require.NoError(t, err) - assert.Equal(t, 100, len(tx)) -} - -func TestTxQueue_ConsumeMoreThanAvailable(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 0) - - ctx := context.Background() - - // Add 2 transactions - err := queue.Add(ctx, []byte("tx1")) - require.NoError(t, err) - err = queue.Add(ctx, []byte("tx2")) - require.NoError(t, err) - - // Try to consume 3 transactions - err = queue.Consume(ctx, 3) - assert.Error(t, err) - - // Size should be unchanged - assert.Equal(t, 2, queue.Size()) -} - -func TestTxQueue_Clear(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 0) - - ctx := context.Background() - - // Add transactions - err := queue.Add(ctx, []byte("tx1")) - require.NoError(t, err) - err = queue.Add(ctx, []byte("tx2")) - require.NoError(t, err) - err = queue.Add(ctx, []byte("tx3")) - require.NoError(t, err) - - assert.Equal(t, 3, queue.Size()) - - // Clear the queue - err = queue.Clear(ctx) - require.NoError(t, err) - - assert.Equal(t, 0, queue.Size()) - - // Queue should be empty - tx, err := queue.Next(ctx) - require.NoError(t, err) - assert.Nil(t, tx) - - // Verify persistence is also cleared - queue2 := NewTxQueue(db, "test", 0) - err = queue2.Load(ctx) - require.NoError(t, err) - assert.Equal(t, 0, queue2.Size()) -} - -func TestTxQueue_PrefixIsolation(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - - ctx := context.Background() - - // Create two queues with different prefixes - queue1 := NewTxQueue(db, "queue1", 0) - queue2 := NewTxQueue(db, "queue2", 0) - - // Add different transactions to each - err := queue1.Add(ctx, []byte("tx1")) - require.NoError(t, err) - err = queue2.Add(ctx, []byte("tx2")) - require.NoError(t, err) - - assert.Equal(t, 1, queue1.Size()) - assert.Equal(t, 1, queue2.Size()) - - // Load each queue separately - queue1New := NewTxQueue(db, "queue1", 0) - err = queue1New.Load(ctx) - require.NoError(t, err) - - queue2New := NewTxQueue(db, "queue2", 0) - err = queue2New.Load(ctx) - require.NoError(t, err) - - // Each should have its own transaction - assert.Equal(t, 1, queue1New.Size()) - assert.Equal(t, 1, queue2New.Size()) - - tx, err := queue1New.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx1"), tx) - - tx, err = queue2New.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte("tx2"), tx) -} - -func TestTxQueue_MemoryCompaction(t *testing.T) { - db := syncds.MutexWrap(ds.NewMapDatastore()) - queue := NewTxQueue(db, "test", 0) - - ctx := context.Background() - - // Add more than 100 transactions to trigger compaction - for i := 0; i < 150; i++ { - err := queue.Add(ctx, []byte{byte(i)}) - require.NoError(t, err) - } - - // Consume 100 transactions to trigger compaction - for i := 0; i < 100; i++ { - _, err := queue.Next(ctx) - require.NoError(t, err) - } - - // Size should be 50 - assert.Equal(t, 50, queue.Size()) - - // Remaining transactions should be correct - for i := 100; i < 150; i++ { - tx, err := queue.Next(ctx) - require.NoError(t, err) - assert.Equal(t, []byte{byte(i)}, tx) - } -} diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index 605c670046..435cec91b9 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -41,7 +41,6 @@ type BasedSequencer struct { // Cached transactions from the current DA block being processed currentBatchTxs [][]byte - currentBatchDA *block.ForcedInclusionEvent } // NewBasedSequencer creates a new based sequencer instance @@ -73,12 +72,9 @@ func NewBasedSequencer( if errors.Is(err, ErrCheckpointNotFound) { // No checkpoint exists, initialize with genesis DA height bs.checkpoint = &Checkpoint{ - DAHeight: genesis.DAStartHeight, + DAHeight: bs.GetDAHeight(), TxIndex: 0, } - bs.logger.Info(). - Uint64("da_height", genesis.DAStartHeight). - Msg("initialized checkpoint from genesis") } else { return nil, fmt.Errorf("failed to load checkpoint from DB: %w", err) } @@ -87,12 +83,7 @@ func NewBasedSequencer( bs.logger.Info(). Uint64("da_height", checkpoint.DAHeight). Uint64("tx_index", checkpoint.TxIndex). - Msg("loaded checkpoint from DB") - } - - // Clean up any legacy queue data from previous implementation - if err := bs.checkpointStore.CleanupLegacyQueue(loadCtx); err != nil { - bs.logger.Warn().Err(err).Msg("failed to cleanup legacy queue data, continuing anyway") + Msg("loaded based sequencer checkpoint from DB") } return bs, nil @@ -129,7 +120,6 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get s.checkpoint.DAHeight++ s.checkpoint.TxIndex = 0 s.currentBatchTxs = nil - s.currentBatchDA = nil // Update the global DA height s.SetDAHeight(s.checkpoint.DAHeight) @@ -200,7 +190,6 @@ func (s *BasedSequencer) fetchNextDABatch(ctx context.Context) error { // Cache the transactions for this DA block s.currentBatchTxs = validTxs - s.currentBatchDA = forcedTxsEvent // If we had a non-zero tx index, we're resuming from a crash mid-block // The transactions starting from that index are what we need @@ -259,7 +248,6 @@ func (s *BasedSequencer) VerifyBatch(ctx context.Context, req coresequencer.Veri // This should be called when the sequencer needs to sync to a specific DA height func (s *BasedSequencer) SetDAHeight(height uint64) { s.daHeight.Store(height) - s.logger.Debug().Uint64("da_height", height).Msg("DA height updated") } // GetDAHeight returns the current DA height diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index 7870bb56a7..eae1f93cc3 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -379,11 +379,6 @@ func TestBasedSequencer_GetNextBatch_ResumesFromCheckpoint(t *testing.T) { seq.checkpoint.DAHeight = 100 seq.checkpoint.TxIndex = 1 seq.currentBatchTxs = testBlobs - seq.currentBatchDA = &block.ForcedInclusionEvent{ - StartDaHeight: 100, - EndDaHeight: 100, - Txs: testBlobs, - } req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, From 87f9da596fa3e81f2e13bfc4ba1444c5d22502d8 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 11:10:47 +0100 Subject: [PATCH 05/19] describe checkpoint design and generalize --- proto/evnode/v1/state.proto | 4 +- sequencers/based/README.md | 481 ++++++++++++++++++ sequencers/based/sequencer.go | 12 +- sequencers/{based => common}/checkpoint.go | 29 +- .../{based => common}/checkpoint_test.go | 14 +- sequencers/single/sequencer.go | 3 +- types/pb/evnode/v1/state.pb.go | 28 +- 7 files changed, 526 insertions(+), 45 deletions(-) create mode 100644 sequencers/based/README.md rename sequencers/{based => common}/checkpoint.go (77%) rename sequencers/{based => common}/checkpoint_test.go (91%) diff --git a/proto/evnode/v1/state.proto b/proto/evnode/v1/state.proto index 6c5dbe4bed..c545408dbc 100644 --- a/proto/evnode/v1/state.proto +++ b/proto/evnode/v1/state.proto @@ -20,8 +20,8 @@ message State { reserved 7; } -// BasedCheckpoint tracks the position in the DA where transactions were last processed -message BasedCheckpoint { +// SequencerDACheckpoint tracks the position in the DA where transactions were last processed +message SequencerDACheckpoint { // DA block height being processed uint64 da_height = 1; // Index of the next transaction to process within the DA block's forced inclusion batch diff --git a/sequencers/based/README.md b/sequencers/based/README.md new file mode 100644 index 0000000000..05b1f888c6 --- /dev/null +++ b/sequencers/based/README.md @@ -0,0 +1,481 @@ +# Based Sequencer + +## Overview + +The Based Sequencer is a sequencer implementation that exclusively retrieves transactions from the Data Availability (DA) layer via the forced inclusion mechanism. Unlike other sequencer types, it does not accept transactions from a mempool or reaper - it treats the DA layer as a transaction queue. + +This design ensures that all transactions are force-included from DA, making the sequencer completely "based" on the DA layer's transaction ordering. + +## Architecture + +### Core Components + +1. **ForcedInclusionRetriever**: Fetches transactions from DA at epoch boundaries +2. **CheckpointStore**: Persists processing position to enable crash recovery +3. **BasedSequencer**: Orchestrates transaction retrieval and batch creation + +### Key Interfaces + +The Based Sequencer implements the `Sequencer` interface from `core/sequencer.go`: + +- `SubmitBatchTxs()` - No-op for based sequencer (transactions are not accepted) +- `GetNextBatch()` - Retrieves the next batch from DA via forced inclusion +- `VerifyBatch()` - Always returns true (all transactions come from DA) + +## Epoch-Based Transaction Retrieval + +### How Epochs Work + +Transactions are retrieved from DA in **epochs**, not individual DA blocks. An epoch is a range of DA blocks defined by `DAEpochForcedInclusion` in the genesis configuration. + +**Example**: If `DAStartHeight = 100` and `DAEpochForcedInclusion = 10`: +- Epoch 1: DA heights 100-109 +- Epoch 2: DA heights 110-119 +- Epoch 3: DA heights 120-129 + +### Epoch Boundary Fetching + +The `ForcedInclusionRetriever` only returns transactions when queried at the **epoch end** (the last DA height in an epoch): + +```go +// When NOT at epoch end -> returns empty transactions +if daHeight != epochEnd { + return &ForcedInclusionEvent{ + StartDaHeight: daHeight, + EndDaHeight: daHeight, + Txs: [][]byte{}, + }, nil +} + +// When AT epoch end -> fetches entire epoch +// Retrieves ALL transactions from epochStart to epochEnd (inclusive) +``` + +When at an epoch end, the retriever fetches transactions from **all DA blocks in that epoch**: + +1. Fetches forced inclusion blobs from `epochStart` +2. Fetches forced inclusion blobs from each height between start and end +3. Fetches forced inclusion blobs from `epochEnd` +4. Returns all transactions as a single `ForcedInclusionEvent` + +### Why Epoch-Based? + +- **Efficiency**: Reduces the number of DA queries +- **Batching**: Allows processing multiple DA blocks worth of transactions together +- **Determinism**: Clear boundaries for when to fetch from DA +- **Gas optimization**: Fewer DA reads means lower operational costs + +## Checkpoint System + +### Purpose + +The checkpoint system tracks the exact position in the transaction stream to enable crash recovery and ensure no transactions are lost or duplicated. + +### Checkpoint Structure + +```go +type Checkpoint struct { + // DAHeight is the DA block height currently being processed + DAHeight uint64 + + // TxIndex is the index of the next transaction to process + // within the DA block's forced inclusion batch + TxIndex uint64 +} +``` + +### How Checkpoints Work + +#### 1. Initial State +``` +Checkpoint: (DAHeight: 100, TxIndex: 0) +- Ready to fetch epoch starting at DA height 100 +``` + +#### 2. Fetching Transactions +When `GetNextBatch()` is called and we're at an epoch end: +``` +Request: GetNextBatch(maxBytes: 1MB) +Action: Fetch all transactions from epoch (DA heights 100-109) +Result: currentBatchTxs = [tx1, tx2, tx3, ..., txN] (from entire epoch) +``` + +#### 3. Processing Transactions +Transactions are processed incrementally, respecting `maxBytes`: +``` +Batch 1: [tx1, tx2] (fits in maxBytes) +Checkpoint: (DAHeight: 100, TxIndex: 2) + +Batch 2: [tx3, tx4, tx5] +Checkpoint: (DAHeight: 100, TxIndex: 5) + +... continue until all transactions from DA height 100 are consumed + +Checkpoint: (DAHeight: 101, TxIndex: 0) +- Moved to next DA height within the same epoch +``` + +#### 4. Checkpoint Persistence + +**Critical**: The checkpoint is persisted to disk **after every batch** of transactions is processed: + +```go +if txCount > 0 { + s.checkpoint.TxIndex += txCount + + // Move to next DA height when current one is exhausted + if s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { + s.checkpoint.DAHeight++ + s.checkpoint.TxIndex = 0 + s.currentBatchTxs = nil + s.SetDAHeight(s.checkpoint.DAHeight) + } + + // Persist checkpoint to disk + if err := s.checkpointStore.Save(ctx, s.checkpoint); err != nil { + return nil, fmt.Errorf("failed to save checkpoint: %w", err) + } +} +``` + +### Crash Recovery Behavior + +#### Scenario: Crash Mid-Epoch + +**Setup**: +- Epoch 1 spans DA heights 100-109 +- At DA height 109, fetched all transactions from the epoch +- Processed transactions up to DA height 105, TxIndex 3 +- **Crash occurs** + +**On Restart**: + +1. **Load Checkpoint**: `(DAHeight: 105, TxIndex: 3)` +2. **Lost Cache**: `currentBatchTxs` is empty (in-memory only) +3. **Attempt Fetch**: `RetrieveForcedIncludedTxs(105)` +4. **Result**: Empty (105 is not an epoch end) +5. **Continue**: Increment DA height, keep trying +6. **Eventually**: Reach DA height 109 (epoch end) +7. **Re-fetch**: Retrieve **entire epoch** again (DA heights 100-109) +8. **Resume**: Use checkpoint to skip already-processed transactions + +#### Important Implications + +**The entire epoch will be re-fetched after a crash**, even with fine-grained checkpoints. + +**Why?** +- Transactions are only available at epoch boundaries +- In-memory cache (`currentBatchTxs`) is lost on restart +- Must wait until the next epoch end to fetch transactions again + +**What the checkpoint prevents**: +- ✅ Re-execution of already processed transactions +- ✅ Correct resumption within a DA block's transaction list +- ✅ No transaction loss or duplication + +**What the checkpoint does NOT prevent**: +- ❌ Re-fetching the entire epoch from DA +- ❌ Re-validation of previously fetched transactions + +### Checkpoint Storage + +The checkpoint is stored using a key-value datastore: + +```go +// Checkpoint key in the datastore +checkpointKey = ds.NewKey("/based/checkpoint") + +// Operations +checkpoint, err := checkpointStore.Load(ctx) // Load from disk +err := checkpointStore.Save(ctx, checkpoint) // Save to disk +err := checkpointStore.Delete(ctx) // Delete from disk +``` + +The checkpoint is serialized using Protocol Buffers (`pb.BasedCheckpoint`) for efficient storage and cross-version compatibility. + +## Transaction Processing Flow + +### Full Flow Diagram + +``` +┌─────────────────────────────────────────────────────────────┐ +│ 1. GetNextBatch() called │ +└──────────────────────┬──────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 2. Check: Do we have cached transactions? │ +│ - currentBatchTxs empty OR all consumed? │ +└──────────────────────┬──────────────────────────────────────┘ + │ YES + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 3. fetchNextDABatch(checkpoint.DAHeight) │ +│ - Calls RetrieveForcedIncludedTxs() │ +└──────────────────────┬──────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 4. Is current DAHeight an epoch end? │ +└──────┬──────────────────────────────────────┬───────────────┘ + │ NO │ YES + ▼ ▼ +┌──────────────────┐ ┌──────────────────────────┐ +│ Return empty │ │ Fetch entire epoch from │ +│ transactions │ │ DA (all heights in epoch)│ +└──────┬───────────┘ └──────────┬───────────────┘ + │ │ + │ ▼ + │ ┌──────────────────────────┐ + │ │ Validate blob sizes │ + │ │ Cache in currentBatchTxs │ + │ └──────────┬───────────────┘ + │ │ + └─────────────────┬───────────────────┘ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 5. createBatchFromCheckpoint(maxBytes) │ +│ - Start from checkpoint.TxIndex │ +│ - Add transactions until maxBytes reached │ +│ - Mark all as force-included │ +└──────────────────────┬──────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 6. Update checkpoint │ +│ - checkpoint.TxIndex += len(batch.Transactions) │ +│ - If consumed all txs from current DA height: │ +│ * checkpoint.DAHeight++ │ +│ * checkpoint.TxIndex = 0 │ +│ * Clear currentBatchTxs cache │ +└──────────────────────┬──────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 7. Persist checkpoint to disk │ +└──────────────────────┬──────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ 8. Return batch to executor for processing │ +└─────────────────────────────────────────────────────────────┘ +``` + +### Error Handling + +**DA Height from Future**: +```go +if errors.Is(err, coreda.ErrHeightFromFuture) { + // Stay at current position + // Will retry on next call + s.logger.Debug().Msg("DA height from future, waiting for DA to produce block") + return nil +} +``` + +**Forced Inclusion Not Configured**: +```go +if errors.Is(err, block.ErrForceInclusionNotConfigured) { + return errors.New("forced inclusion not configured") +} +``` + +**Invalid Blob Size**: +```go +if !seqcommon.ValidateBlobSize(tx) { + s.logger.Warn().Msg("forced inclusion blob exceeds absolute maximum size - skipping") + skippedTxs++ + continue +} +``` + +## Relationship with Executor + +### DA Height Synchronization + +The executor maintains a separate `DAHeight` field in the blockchain state: + +```go +// In executor.go +newState.DAHeight = e.sequencer.GetDAHeight() + +// State is saved after EVERY block +if err := batch.UpdateState(newState); err != nil { + return fmt.Errorf("failed to update state: %w", err) +} +``` + +**Key Differences**: + +| Aspect | Based Sequencer Checkpoint | Executor State | +|--------|---------------------------|----------------| +| **Frequency** | After every batch | After every block | +| **Granularity** | DAHeight + TxIndex | DAHeight only | +| **Purpose** | Track position in DA transaction stream | Track blockchain state | +| **Storage** | Checkpoint datastore | State datastore | +| **Scope** | Sequencer-specific | Chain-wide | + +### Initialization Flow + +On startup: + +1. **Executor** loads State from disk +2. **Executor** calls `sequencer.SetDAHeight(state.DAHeight)` +3. **Based Sequencer** loads checkpoint from disk +4. If no checkpoint exists, initializes with current DA height +5. Both systems now synchronized + +## Configuration + +### Genesis Parameters + +```go +type Genesis struct { + // Starting DA height for the chain + DAStartHeight uint64 + + // Number of DA blocks per epoch for forced inclusion + // Set to 0 to disable epochs (all blocks in epoch 1) + DAEpochForcedInclusion uint64 +} +``` + +### Example Configurations + +**Frequent Fetching** (small epochs): +```go +DAStartHeight: 1000 +DAEpochForcedInclusion: 1 // Fetch every DA block +``` + +**Batched Fetching** (larger epochs): +```go +DAStartHeight: 1000 +DAEpochForcedInclusion: 100 // Fetch every 100 DA blocks +``` + +**Single Epoch** (no epoch boundaries): +```go +DAStartHeight: 1000 +DAEpochForcedInclusion: 0 // All blocks in one epoch +``` + +## Performance Considerations + +### Memory Usage + +- `currentBatchTxs` holds all transactions from all DA heights in the current epoch +- With large epochs and many transactions, memory usage can be significant +- Example: Epoch size 100, 1000 txs/block, 1KB/tx = ~100MB + +### DA Query Efficiency + +**Pros**: +- Fewer DA queries (one per epoch instead of per block) +- Reduced DA layer costs + +**Cons**: +- Longer wait times between transaction fetches +- Larger re-fetch overhead on crash recovery + +### Crash Recovery Trade-offs + +**Fine-grained checkpoints** (current approach): +- ✅ No transaction re-execution after crash +- ✅ Fast recovery within cached transactions +- ❌ Entire epoch re-fetched from DA +- ❌ All transactions re-validated + +**Alternative** (epoch-level checkpoints): +- ✅ Simpler implementation +- ❌ All transactions in epoch re-executed after crash +- ❌ Longer recovery time + +The current design prioritizes **no re-execution** over DA re-fetching, as execution is typically more expensive than fetching. + +## Testing + +### Unit Tests + +- `checkpoint_test.go`: Tests checkpoint persistence operations +- `sequencer_test.go`: Tests sequencer batch retrieval logic + +### Integration Testing + +To test the based sequencer with a real DA layer: + +```bash +# Run with based sequencer configuration +make run-n NODES=1 SEQUENCER_TYPE=based + +# Simulate crash recovery +# 1. Stop node mid-epoch +# 2. Check checkpoint value +# 3. Restart node +# 4. Verify correct resumption +``` + +## Debugging + +### Log Messages + +**Checkpoint Loading**: +``` +loaded based sequencer checkpoint from DB da_height=105 tx_index=3 +``` + +**DA Fetching**: +``` +fetching forced inclusion transactions from DA da_height=109 +``` + +**Not at Epoch End**: +``` +not at epoch end - returning empty transactions da_height=105 epoch_end=109 +``` + +**Transactions Retrieved**: +``` +fetched forced inclusion transactions from DA valid_tx_count=150 skipped_tx_count=2 da_height_start=100 da_height_end=109 +``` + +**Checkpoint Resumption**: +``` +resuming from checkpoint within DA block tx_index=3 +``` + +### Common Issues + +**Problem**: No transactions being processed +- **Check**: Are you at an epoch end? Transactions only arrive at epoch boundaries. +- **Check**: Is forced inclusion configured? Look for `ErrForceInclusionNotConfigured`. + +**Problem**: Transactions re-executed after restart +- **Check**: Is checkpoint being persisted? Look for checkpoint save errors. +- **Check**: Is checkpoint being loaded on restart? + +**Problem**: Slow recovery after crash +- **Cause**: Entire epoch is re-fetched from DA. +- **Solution**: Reduce epoch size for faster recovery (at cost of more DA queries). + +## Future Improvements + +### Potential Optimizations + +1. **Persistent Transaction Cache**: Store fetched transactions on disk to avoid re-fetching entire epoch after crash +2. **Progressive Fetching**: Fetch DA blocks incrementally within an epoch instead of all at once +3. **Compression**: Compress checkpoint data for faster I/O +4. **Parallel Validation**: Validate transactions from multiple DA heights concurrently + +### Design Alternatives + +1. **Streaming Model**: Instead of epoch boundaries, stream transactions as DA blocks become available +2. **Hybrid Checkpointing**: Save both fine-grained position and transaction cache +3. **Two-Phase Commit**: Separate checkpoint updates from transaction processing for better crash consistency + +## References + +- Core interfaces: `core/sequencer.go` +- Forced inclusion: `block/internal/da/forced_inclusion_retriever.go` +- Epoch calculations: `types/epoch.go` +- Executor integration: `block/internal/executing/executor.go` diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index 435cec91b9..705622a36f 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -36,8 +36,8 @@ type BasedSequencer struct { logger zerolog.Logger daHeight atomic.Uint64 - checkpointStore *CheckpointStore - checkpoint *Checkpoint + checkpointStore *seqcommon.CheckpointStore + checkpoint *seqcommon.Checkpoint // Cached transactions from the current DA block being processed currentBatchTxs [][]byte @@ -59,7 +59,7 @@ func NewBasedSequencer( config: config, genesis: genesis, logger: logger.With().Str("component", "based_sequencer").Logger(), - checkpointStore: NewCheckpointStore(db), + checkpointStore: seqcommon.NewCheckpointStore(db, ds.NewKey("/based/checkpoint")), } bs.SetDAHeight(genesis.DAStartHeight) // will be overridden by the executor @@ -69,9 +69,9 @@ func NewBasedSequencer( checkpoint, err := bs.checkpointStore.Load(loadCtx) if err != nil { - if errors.Is(err, ErrCheckpointNotFound) { - // No checkpoint exists, initialize with genesis DA height - bs.checkpoint = &Checkpoint{ + if errors.Is(err, seqcommon.ErrCheckpointNotFound) { + // No checkpoint exists, initialize with current DA height + bs.checkpoint = &seqcommon.Checkpoint{ DAHeight: bs.GetDAHeight(), TxIndex: 0, } diff --git a/sequencers/based/checkpoint.go b/sequencers/common/checkpoint.go similarity index 77% rename from sequencers/based/checkpoint.go rename to sequencers/common/checkpoint.go index 539e26251b..039451d2c9 100644 --- a/sequencers/based/checkpoint.go +++ b/sequencers/common/checkpoint.go @@ -1,4 +1,4 @@ -package based +package common import ( "context" @@ -11,13 +11,8 @@ import ( pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) -var ( - // checkpointKey is the datastore key for persisting the checkpoint - checkpointKey = ds.NewKey("/based/checkpoint") - - // ErrCheckpointNotFound is returned when no checkpoint exists in the datastore - ErrCheckpointNotFound = errors.New("checkpoint not found") -) +// ErrCheckpointNotFound is returned when no checkpoint exists in the datastore +var ErrCheckpointNotFound = errors.New("checkpoint not found") // Checkpoint tracks the position in the DA where we last processed transactions type Checkpoint struct { @@ -30,20 +25,22 @@ type Checkpoint struct { // CheckpointStore manages persistence of the checkpoint type CheckpointStore struct { - db ds.Batching + db ds.Batching + checkpointKey ds.Key } // NewCheckpointStore creates a new checkpoint store -func NewCheckpointStore(db ds.Batching) *CheckpointStore { +func NewCheckpointStore(db ds.Batching, checkpointkey ds.Key) *CheckpointStore { return &CheckpointStore{ - db: db, + db: db, + checkpointKey: checkpointkey, } } // Load loads the checkpoint from the datastore // Returns ErrCheckpointNotFound if no checkpoint exists func (cs *CheckpointStore) Load(ctx context.Context) (*Checkpoint, error) { - data, err := cs.db.Get(ctx, checkpointKey) + data, err := cs.db.Get(ctx, cs.checkpointKey) if err != nil { if errors.Is(err, ds.ErrNotFound) { return nil, ErrCheckpointNotFound @@ -51,7 +48,7 @@ func (cs *CheckpointStore) Load(ctx context.Context) (*Checkpoint, error) { return nil, fmt.Errorf("failed to load checkpoint: %w", err) } - pbCheckpoint := &pb.BasedCheckpoint{} + pbCheckpoint := &pb.SequencerDACheckpoint{} if err := proto.Unmarshal(data, pbCheckpoint); err != nil { return nil, fmt.Errorf("failed to unmarshal checkpoint: %w", err) } @@ -64,7 +61,7 @@ func (cs *CheckpointStore) Load(ctx context.Context) (*Checkpoint, error) { // Save persists the checkpoint to the datastore func (cs *CheckpointStore) Save(ctx context.Context, checkpoint *Checkpoint) error { - pbCheckpoint := &pb.BasedCheckpoint{ + pbCheckpoint := &pb.SequencerDACheckpoint{ DaHeight: checkpoint.DAHeight, TxIndex: checkpoint.TxIndex, } @@ -74,7 +71,7 @@ func (cs *CheckpointStore) Save(ctx context.Context, checkpoint *Checkpoint) err return fmt.Errorf("failed to marshal checkpoint: %w", err) } - if err := cs.db.Put(ctx, checkpointKey, data); err != nil { + if err := cs.db.Put(ctx, cs.checkpointKey, data); err != nil { return fmt.Errorf("failed to save checkpoint: %w", err) } @@ -83,7 +80,7 @@ func (cs *CheckpointStore) Save(ctx context.Context, checkpoint *Checkpoint) err // Delete removes the checkpoint from the datastore func (cs *CheckpointStore) Delete(ctx context.Context) error { - if err := cs.db.Delete(ctx, checkpointKey); err != nil { + if err := cs.db.Delete(ctx, cs.checkpointKey); err != nil { if errors.Is(err, ds.ErrNotFound) { return nil // Already deleted } diff --git a/sequencers/based/checkpoint_test.go b/sequencers/common/checkpoint_test.go similarity index 91% rename from sequencers/based/checkpoint_test.go rename to sequencers/common/checkpoint_test.go index 489233926b..88b59ad888 100644 --- a/sequencers/based/checkpoint_test.go +++ b/sequencers/common/checkpoint_test.go @@ -1,4 +1,4 @@ -package based +package common import ( "context" @@ -8,10 +8,14 @@ import ( "github.com/stretchr/testify/require" ) +var ( + checkpointKey = ds.NewKey("/checkpoint") +) + func TestCheckpointStore_SaveAndLoad(t *testing.T) { ctx := context.Background() db := ds.NewMapDatastore() - store := NewCheckpointStore(db) + store := NewCheckpointStore(db, checkpointKey) // Test loading when no checkpoint exists _, err := store.Load(ctx) @@ -46,7 +50,7 @@ func TestCheckpointStore_SaveAndLoad(t *testing.T) { func TestCheckpointStore_Delete(t *testing.T) { ctx := context.Background() db := ds.NewMapDatastore() - store := NewCheckpointStore(db) + store := NewCheckpointStore(db, checkpointKey) // Save a checkpoint checkpoint := &Checkpoint{ @@ -72,7 +76,7 @@ func TestCheckpointStore_Delete(t *testing.T) { func TestCheckpoint_EdgeCases(t *testing.T) { ctx := context.Background() db := ds.NewMapDatastore() - store := NewCheckpointStore(db) + store := NewCheckpointStore(db, checkpointKey) // Test with zero values checkpoint := &Checkpoint{ @@ -104,7 +108,7 @@ func TestCheckpoint_EdgeCases(t *testing.T) { func TestCheckpointStore_ConcurrentAccess(t *testing.T) { ctx := context.Background() db := ds.NewMapDatastore() - store := NewCheckpointStore(db) + store := NewCheckpointStore(db, checkpointKey) // Save initial checkpoint checkpoint := &Checkpoint{ diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index ea60cf003c..22c0d95828 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -46,8 +46,7 @@ type Sequencer struct { da coreda.DA batchTime time.Duration - - queue *BatchQueue // single queue for immediate availability + queue *BatchQueue // single queue for immediate availability // Forced inclusion support fiRetriever ForcedInclusionRetriever diff --git a/types/pb/evnode/v1/state.pb.go b/types/pb/evnode/v1/state.pb.go index aa0f23155e..5765e57d8b 100644 --- a/types/pb/evnode/v1/state.pb.go +++ b/types/pb/evnode/v1/state.pb.go @@ -123,8 +123,8 @@ func (x *State) GetLastHeaderHash() []byte { return nil } -// BasedCheckpoint tracks the position in the DA where transactions were last processed -type BasedCheckpoint struct { +// SequencerDACheckpoint tracks the position in the DA where transactions were last processed +type SequencerDACheckpoint struct { state protoimpl.MessageState `protogen:"open.v1"` // DA block height being processed DaHeight uint64 `protobuf:"varint,1,opt,name=da_height,json=daHeight,proto3" json:"da_height,omitempty"` @@ -134,20 +134,20 @@ type BasedCheckpoint struct { sizeCache protoimpl.SizeCache } -func (x *BasedCheckpoint) Reset() { - *x = BasedCheckpoint{} +func (x *SequencerDACheckpoint) Reset() { + *x = SequencerDACheckpoint{} mi := &file_evnode_v1_state_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *BasedCheckpoint) String() string { +func (x *SequencerDACheckpoint) String() string { return protoimpl.X.MessageStringOf(x) } -func (*BasedCheckpoint) ProtoMessage() {} +func (*SequencerDACheckpoint) ProtoMessage() {} -func (x *BasedCheckpoint) ProtoReflect() protoreflect.Message { +func (x *SequencerDACheckpoint) ProtoReflect() protoreflect.Message { mi := &file_evnode_v1_state_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -159,19 +159,19 @@ func (x *BasedCheckpoint) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BasedCheckpoint.ProtoReflect.Descriptor instead. -func (*BasedCheckpoint) Descriptor() ([]byte, []int) { +// Deprecated: Use SequencerDACheckpoint.ProtoReflect.Descriptor instead. +func (*SequencerDACheckpoint) Descriptor() ([]byte, []int) { return file_evnode_v1_state_proto_rawDescGZIP(), []int{1} } -func (x *BasedCheckpoint) GetDaHeight() uint64 { +func (x *SequencerDACheckpoint) GetDaHeight() uint64 { if x != nil { return x.DaHeight } return 0 } -func (x *BasedCheckpoint) GetTxIndex() uint64 { +func (x *SequencerDACheckpoint) GetTxIndex() uint64 { if x != nil { return x.TxIndex } @@ -236,8 +236,8 @@ const file_evnode_v1_state_proto_rawDesc = "" + "\x0flast_block_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\rlastBlockTime\x12\x1b\n" + "\tda_height\x18\x06 \x01(\x04R\bdaHeight\x12\x19\n" + "\bapp_hash\x18\b \x01(\fR\aappHash\x12(\n" + - "\x10last_header_hash\x18\t \x01(\fR\x0elastHeaderHashJ\x04\b\a\x10\b\"I\n" + - "\x0fBasedCheckpoint\x12\x1b\n" + + "\x10last_header_hash\x18\t \x01(\fR\x0elastHeaderHashJ\x04\b\a\x10\b\"O\n" + + "\x15SequencerDACheckpoint\x12\x1b\n" + "\tda_height\x18\x01 \x01(\x04R\bdaHeight\x12\x19\n" + "\btx_index\x18\x02 \x01(\x04R\atxIndex\"\x18\n" + "\x02Tx\x12\x12\n" + @@ -258,7 +258,7 @@ func file_evnode_v1_state_proto_rawDescGZIP() []byte { var file_evnode_v1_state_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_evnode_v1_state_proto_goTypes = []any{ (*State)(nil), // 0: evnode.v1.State - (*BasedCheckpoint)(nil), // 1: evnode.v1.BasedCheckpoint + (*SequencerDACheckpoint)(nil), // 1: evnode.v1.SequencerDACheckpoint (*Tx)(nil), // 2: evnode.v1.Tx (*Version)(nil), // 3: evnode.v1.Version (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp From 6890d326c56969d55704c189e84dc5572ed035b2 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 11:10:56 +0100 Subject: [PATCH 06/19] update docs --- sequencers/based/README.md | 305 ++----------------------------------- 1 file changed, 14 insertions(+), 291 deletions(-) diff --git a/sequencers/based/README.md b/sequencers/based/README.md index 05b1f888c6..9b425b5a96 100644 --- a/sequencers/based/README.md +++ b/sequencers/based/README.md @@ -29,6 +29,7 @@ The Based Sequencer implements the `Sequencer` interface from `core/sequencer.go Transactions are retrieved from DA in **epochs**, not individual DA blocks. An epoch is a range of DA blocks defined by `DAEpochForcedInclusion` in the genesis configuration. **Example**: If `DAStartHeight = 100` and `DAEpochForcedInclusion = 10`: + - Epoch 1: DA heights 100-109 - Epoch 2: DA heights 110-119 - Epoch 3: DA heights 120-129 @@ -77,7 +78,7 @@ The checkpoint system tracks the exact position in the transaction stream to ena type Checkpoint struct { // DAHeight is the DA block height currently being processed DAHeight uint64 - + // TxIndex is the index of the next transaction to process // within the DA block's forced inclusion batch TxIndex uint64 @@ -87,13 +88,16 @@ type Checkpoint struct { ### How Checkpoints Work #### 1. Initial State + ``` Checkpoint: (DAHeight: 100, TxIndex: 0) - Ready to fetch epoch starting at DA height 100 ``` #### 2. Fetching Transactions + When `GetNextBatch()` is called and we're at an epoch end: + ``` Request: GetNextBatch(maxBytes: 1MB) Action: Fetch all transactions from epoch (DA heights 100-109) @@ -101,7 +105,9 @@ Result: currentBatchTxs = [tx1, tx2, tx3, ..., txN] (from entire epoch) ``` #### 3. Processing Transactions + Transactions are processed incrementally, respecting `maxBytes`: + ``` Batch 1: [tx1, tx2] (fits in maxBytes) Checkpoint: (DAHeight: 100, TxIndex: 2) @@ -122,7 +128,7 @@ Checkpoint: (DAHeight: 101, TxIndex: 0) ```go if txCount > 0 { s.checkpoint.TxIndex += txCount - + // Move to next DA height when current one is exhausted if s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { s.checkpoint.DAHeight++ @@ -130,7 +136,7 @@ if txCount > 0 { s.currentBatchTxs = nil s.SetDAHeight(s.checkpoint.DAHeight) } - + // Persist checkpoint to disk if err := s.checkpointStore.Save(ctx, s.checkpoint); err != nil { return nil, fmt.Errorf("failed to save checkpoint: %w", err) @@ -143,6 +149,7 @@ if txCount > 0 { #### Scenario: Crash Mid-Epoch **Setup**: + - Epoch 1 spans DA heights 100-109 - At DA height 109, fetched all transactions from the epoch - Processed transactions up to DA height 105, TxIndex 3 @@ -164,16 +171,19 @@ if txCount > 0 { **The entire epoch will be re-fetched after a crash**, even with fine-grained checkpoints. **Why?** + - Transactions are only available at epoch boundaries - In-memory cache (`currentBatchTxs`) is lost on restart - Must wait until the next epoch end to fetch transactions again **What the checkpoint prevents**: + - ✅ Re-execution of already processed transactions - ✅ Correct resumption within a DA block's transaction list - ✅ No transaction loss or duplication **What the checkpoint does NOT prevent**: + - ❌ Re-fetching the entire epoch from DA - ❌ Re-validation of previously fetched transactions @@ -191,291 +201,4 @@ err := checkpointStore.Save(ctx, checkpoint) // Save to disk err := checkpointStore.Delete(ctx) // Delete from disk ``` -The checkpoint is serialized using Protocol Buffers (`pb.BasedCheckpoint`) for efficient storage and cross-version compatibility. - -## Transaction Processing Flow - -### Full Flow Diagram - -``` -┌─────────────────────────────────────────────────────────────┐ -│ 1. GetNextBatch() called │ -└──────────────────────┬──────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 2. Check: Do we have cached transactions? │ -│ - currentBatchTxs empty OR all consumed? │ -└──────────────────────┬──────────────────────────────────────┘ - │ YES - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 3. fetchNextDABatch(checkpoint.DAHeight) │ -│ - Calls RetrieveForcedIncludedTxs() │ -└──────────────────────┬──────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 4. Is current DAHeight an epoch end? │ -└──────┬──────────────────────────────────────┬───────────────┘ - │ NO │ YES - ▼ ▼ -┌──────────────────┐ ┌──────────────────────────┐ -│ Return empty │ │ Fetch entire epoch from │ -│ transactions │ │ DA (all heights in epoch)│ -└──────┬───────────┘ └──────────┬───────────────┘ - │ │ - │ ▼ - │ ┌──────────────────────────┐ - │ │ Validate blob sizes │ - │ │ Cache in currentBatchTxs │ - │ └──────────┬───────────────┘ - │ │ - └─────────────────┬───────────────────┘ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 5. createBatchFromCheckpoint(maxBytes) │ -│ - Start from checkpoint.TxIndex │ -│ - Add transactions until maxBytes reached │ -│ - Mark all as force-included │ -└──────────────────────┬──────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 6. Update checkpoint │ -│ - checkpoint.TxIndex += len(batch.Transactions) │ -│ - If consumed all txs from current DA height: │ -│ * checkpoint.DAHeight++ │ -│ * checkpoint.TxIndex = 0 │ -│ * Clear currentBatchTxs cache │ -└──────────────────────┬──────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 7. Persist checkpoint to disk │ -└──────────────────────┬──────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ 8. Return batch to executor for processing │ -└─────────────────────────────────────────────────────────────┘ -``` - -### Error Handling - -**DA Height from Future**: -```go -if errors.Is(err, coreda.ErrHeightFromFuture) { - // Stay at current position - // Will retry on next call - s.logger.Debug().Msg("DA height from future, waiting for DA to produce block") - return nil -} -``` - -**Forced Inclusion Not Configured**: -```go -if errors.Is(err, block.ErrForceInclusionNotConfigured) { - return errors.New("forced inclusion not configured") -} -``` - -**Invalid Blob Size**: -```go -if !seqcommon.ValidateBlobSize(tx) { - s.logger.Warn().Msg("forced inclusion blob exceeds absolute maximum size - skipping") - skippedTxs++ - continue -} -``` - -## Relationship with Executor - -### DA Height Synchronization - -The executor maintains a separate `DAHeight` field in the blockchain state: - -```go -// In executor.go -newState.DAHeight = e.sequencer.GetDAHeight() - -// State is saved after EVERY block -if err := batch.UpdateState(newState); err != nil { - return fmt.Errorf("failed to update state: %w", err) -} -``` - -**Key Differences**: - -| Aspect | Based Sequencer Checkpoint | Executor State | -|--------|---------------------------|----------------| -| **Frequency** | After every batch | After every block | -| **Granularity** | DAHeight + TxIndex | DAHeight only | -| **Purpose** | Track position in DA transaction stream | Track blockchain state | -| **Storage** | Checkpoint datastore | State datastore | -| **Scope** | Sequencer-specific | Chain-wide | - -### Initialization Flow - -On startup: - -1. **Executor** loads State from disk -2. **Executor** calls `sequencer.SetDAHeight(state.DAHeight)` -3. **Based Sequencer** loads checkpoint from disk -4. If no checkpoint exists, initializes with current DA height -5. Both systems now synchronized - -## Configuration - -### Genesis Parameters - -```go -type Genesis struct { - // Starting DA height for the chain - DAStartHeight uint64 - - // Number of DA blocks per epoch for forced inclusion - // Set to 0 to disable epochs (all blocks in epoch 1) - DAEpochForcedInclusion uint64 -} -``` - -### Example Configurations - -**Frequent Fetching** (small epochs): -```go -DAStartHeight: 1000 -DAEpochForcedInclusion: 1 // Fetch every DA block -``` - -**Batched Fetching** (larger epochs): -```go -DAStartHeight: 1000 -DAEpochForcedInclusion: 100 // Fetch every 100 DA blocks -``` - -**Single Epoch** (no epoch boundaries): -```go -DAStartHeight: 1000 -DAEpochForcedInclusion: 0 // All blocks in one epoch -``` - -## Performance Considerations - -### Memory Usage - -- `currentBatchTxs` holds all transactions from all DA heights in the current epoch -- With large epochs and many transactions, memory usage can be significant -- Example: Epoch size 100, 1000 txs/block, 1KB/tx = ~100MB - -### DA Query Efficiency - -**Pros**: -- Fewer DA queries (one per epoch instead of per block) -- Reduced DA layer costs - -**Cons**: -- Longer wait times between transaction fetches -- Larger re-fetch overhead on crash recovery - -### Crash Recovery Trade-offs - -**Fine-grained checkpoints** (current approach): -- ✅ No transaction re-execution after crash -- ✅ Fast recovery within cached transactions -- ❌ Entire epoch re-fetched from DA -- ❌ All transactions re-validated - -**Alternative** (epoch-level checkpoints): -- ✅ Simpler implementation -- ❌ All transactions in epoch re-executed after crash -- ❌ Longer recovery time - -The current design prioritizes **no re-execution** over DA re-fetching, as execution is typically more expensive than fetching. - -## Testing - -### Unit Tests - -- `checkpoint_test.go`: Tests checkpoint persistence operations -- `sequencer_test.go`: Tests sequencer batch retrieval logic - -### Integration Testing - -To test the based sequencer with a real DA layer: - -```bash -# Run with based sequencer configuration -make run-n NODES=1 SEQUENCER_TYPE=based - -# Simulate crash recovery -# 1. Stop node mid-epoch -# 2. Check checkpoint value -# 3. Restart node -# 4. Verify correct resumption -``` - -## Debugging - -### Log Messages - -**Checkpoint Loading**: -``` -loaded based sequencer checkpoint from DB da_height=105 tx_index=3 -``` - -**DA Fetching**: -``` -fetching forced inclusion transactions from DA da_height=109 -``` - -**Not at Epoch End**: -``` -not at epoch end - returning empty transactions da_height=105 epoch_end=109 -``` - -**Transactions Retrieved**: -``` -fetched forced inclusion transactions from DA valid_tx_count=150 skipped_tx_count=2 da_height_start=100 da_height_end=109 -``` - -**Checkpoint Resumption**: -``` -resuming from checkpoint within DA block tx_index=3 -``` - -### Common Issues - -**Problem**: No transactions being processed -- **Check**: Are you at an epoch end? Transactions only arrive at epoch boundaries. -- **Check**: Is forced inclusion configured? Look for `ErrForceInclusionNotConfigured`. - -**Problem**: Transactions re-executed after restart -- **Check**: Is checkpoint being persisted? Look for checkpoint save errors. -- **Check**: Is checkpoint being loaded on restart? - -**Problem**: Slow recovery after crash -- **Cause**: Entire epoch is re-fetched from DA. -- **Solution**: Reduce epoch size for faster recovery (at cost of more DA queries). - -## Future Improvements - -### Potential Optimizations - -1. **Persistent Transaction Cache**: Store fetched transactions on disk to avoid re-fetching entire epoch after crash -2. **Progressive Fetching**: Fetch DA blocks incrementally within an epoch instead of all at once -3. **Compression**: Compress checkpoint data for faster I/O -4. **Parallel Validation**: Validate transactions from multiple DA heights concurrently - -### Design Alternatives - -1. **Streaming Model**: Instead of epoch boundaries, stream transactions as DA blocks become available -2. **Hybrid Checkpointing**: Save both fine-grained position and transaction cache -3. **Two-Phase Commit**: Separate checkpoint updates from transaction processing for better crash consistency - -## References - -- Core interfaces: `core/sequencer.go` -- Forced inclusion: `block/internal/da/forced_inclusion_retriever.go` -- Epoch calculations: `types/epoch.go` -- Executor integration: `block/internal/executing/executor.go` +The checkpoint is serialized using Protocol Buffers (`pb.SequencerDACheckpoint`) for efficient storage and cross-version compatibility. From 408d76da68b421de52f3773a5cd87f3d11660df3 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 11:13:25 +0100 Subject: [PATCH 07/19] simplify constructor --- apps/evm/cmd/run.go | 2 +- apps/grpc/cmd/run.go | 2 +- apps/testapp/cmd/run.go | 2 +- sequencers/based/sequencer.go | 4 ---- sequencers/based/sequencer_test.go | 8 ++++---- sequencers/single/doc.go | 4 ---- sequencers/single/sequencer.go | 1 + 7 files changed, 8 insertions(+), 15 deletions(-) delete mode 100644 sequencers/single/doc.go diff --git a/apps/evm/cmd/run.go b/apps/evm/cmd/run.go index 7a6f027b7f..b504d73c92 100644 --- a/apps/evm/cmd/run.go +++ b/apps/evm/cmd/run.go @@ -169,7 +169,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, genesis, logger) if err != nil { return nil, fmt.Errorf("failed to create based sequencer: %w", err) } diff --git a/apps/grpc/cmd/run.go b/apps/grpc/cmd/run.go index 7d884ae91c..311de5881a 100644 --- a/apps/grpc/cmd/run.go +++ b/apps/grpc/cmd/run.go @@ -131,7 +131,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, genesis, logger) if err != nil { return nil, fmt.Errorf("failed to create based sequencer: %w", err) } diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index f6d4c92d9a..9b8eba7aac 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -131,7 +131,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, nodeConfig, genesis, logger) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, genesis, logger) if err != nil { return nil, fmt.Errorf("failed to create based sequencer: %w", err) } diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index 705622a36f..b42a6903e8 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -13,7 +13,6 @@ import ( "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" - "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" seqcommon "github.com/evstack/ev-node/sequencers/common" ) @@ -31,7 +30,6 @@ var _ coresequencer.Sequencer = (*BasedSequencer)(nil) type BasedSequencer struct { fiRetriever ForcedInclusionRetriever da coreda.DA - config config.Config genesis genesis.Genesis logger zerolog.Logger @@ -49,14 +47,12 @@ func NewBasedSequencer( fiRetriever ForcedInclusionRetriever, da coreda.DA, db ds.Batching, - config config.Config, genesis genesis.Genesis, logger zerolog.Logger, ) (*BasedSequencer, error) { bs := &BasedSequencer{ fiRetriever: fiRetriever, da: da, - config: config, genesis: genesis, logger: logger.With().Str("component", "based_sequencer").Logger(), checkpointStore: seqcommon.NewCheckpointStore(db, ds.NewKey("/based/checkpoint")), diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index eae1f93cc3..f85a069ee8 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -90,7 +90,7 @@ func createTestSequencer(t *testing.T, mockDA *MockDA, cfg config.Config, gen ge // Create in-memory datastore db := syncds.MutexWrap(ds.NewMapDatastore()) - seq, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, cfg, gen, zerolog.Nop()) + seq, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, gen, zerolog.Nop()) require.NoError(t, err) return seq } @@ -223,7 +223,7 @@ func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { // Create in-memory datastore db := syncds.MutexWrap(ds.NewMapDatastore()) - seq, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, cfg, gen, zerolog.Nop()) + seq, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, gen, zerolog.Nop()) require.NoError(t, err) req := coresequencer.GetNextBatchRequest{ @@ -580,7 +580,7 @@ func TestBasedSequencer_CheckpointPersistence(t *testing.T) { daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - seq1, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, cfg, gen, zerolog.Nop()) + seq1, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, gen, zerolog.Nop()) require.NoError(t, err) req := coresequencer.GetNextBatchRequest{ @@ -595,7 +595,7 @@ func TestBasedSequencer_CheckpointPersistence(t *testing.T) { assert.Equal(t, 2, len(resp.Batch.Transactions)) // Create a new sequencer with the same datastore (simulating restart) - seq2, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, cfg, gen, zerolog.Nop()) + seq2, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, gen, zerolog.Nop()) require.NoError(t, err) // Checkpoint should be loaded from DB diff --git a/sequencers/single/doc.go b/sequencers/single/doc.go deleted file mode 100644 index b4f6abe716..0000000000 --- a/sequencers/single/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -This package implements a single sequencer. -*/ -package single diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index 22c0d95828..6007b8d9f5 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -1,3 +1,4 @@ +// Package single implements a single sequencer. package single import ( From 21e3eadcbfbe185403089a789a4108e83321d56f Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 11:35:03 +0100 Subject: [PATCH 08/19] remove unused type --- proto/evnode/v1/state.proto | 5 --- types/pb/evnode/v1/state.pb.go | 62 ++++------------------------------ 2 files changed, 7 insertions(+), 60 deletions(-) diff --git a/proto/evnode/v1/state.proto b/proto/evnode/v1/state.proto index c545408dbc..263d3de993 100644 --- a/proto/evnode/v1/state.proto +++ b/proto/evnode/v1/state.proto @@ -27,8 +27,3 @@ message SequencerDACheckpoint { // Index of the next transaction to process within the DA block's forced inclusion batch uint64 tx_index = 2; } - -// Tx represents a transaction with its raw data -message Tx { - bytes data = 1; -} diff --git a/types/pb/evnode/v1/state.pb.go b/types/pb/evnode/v1/state.pb.go index 5765e57d8b..87030d2fd9 100644 --- a/types/pb/evnode/v1/state.pb.go +++ b/types/pb/evnode/v1/state.pb.go @@ -178,51 +178,6 @@ func (x *SequencerDACheckpoint) GetTxIndex() uint64 { return 0 } -// Tx represents a transaction with its raw data -type Tx struct { - state protoimpl.MessageState `protogen:"open.v1"` - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Tx) Reset() { - *x = Tx{} - mi := &file_evnode_v1_state_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Tx) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Tx) ProtoMessage() {} - -func (x *Tx) ProtoReflect() protoreflect.Message { - mi := &file_evnode_v1_state_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Tx.ProtoReflect.Descriptor instead. -func (*Tx) Descriptor() ([]byte, []int) { - return file_evnode_v1_state_proto_rawDescGZIP(), []int{2} -} - -func (x *Tx) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - var File_evnode_v1_state_proto protoreflect.FileDescriptor const file_evnode_v1_state_proto_rawDesc = "" + @@ -239,9 +194,7 @@ const file_evnode_v1_state_proto_rawDesc = "" + "\x10last_header_hash\x18\t \x01(\fR\x0elastHeaderHashJ\x04\b\a\x10\b\"O\n" + "\x15SequencerDACheckpoint\x12\x1b\n" + "\tda_height\x18\x01 \x01(\x04R\bdaHeight\x12\x19\n" + - "\btx_index\x18\x02 \x01(\x04R\atxIndex\"\x18\n" + - "\x02Tx\x12\x12\n" + - "\x04data\x18\x01 \x01(\fR\x04dataB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" + "\btx_index\x18\x02 \x01(\x04R\atxIndexB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" var ( file_evnode_v1_state_proto_rawDescOnce sync.Once @@ -255,17 +208,16 @@ func file_evnode_v1_state_proto_rawDescGZIP() []byte { return file_evnode_v1_state_proto_rawDescData } -var file_evnode_v1_state_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_evnode_v1_state_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_evnode_v1_state_proto_goTypes = []any{ (*State)(nil), // 0: evnode.v1.State (*SequencerDACheckpoint)(nil), // 1: evnode.v1.SequencerDACheckpoint - (*Tx)(nil), // 2: evnode.v1.Tx - (*Version)(nil), // 3: evnode.v1.Version - (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp + (*Version)(nil), // 2: evnode.v1.Version + (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp } var file_evnode_v1_state_proto_depIdxs = []int32{ - 3, // 0: evnode.v1.State.version:type_name -> evnode.v1.Version - 4, // 1: evnode.v1.State.last_block_time:type_name -> google.protobuf.Timestamp + 2, // 0: evnode.v1.State.version:type_name -> evnode.v1.Version + 3, // 1: evnode.v1.State.last_block_time:type_name -> google.protobuf.Timestamp 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name @@ -285,7 +237,7 @@ func file_evnode_v1_state_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_evnode_v1_state_proto_rawDesc), len(file_evnode_v1_state_proto_rawDesc)), NumEnums: 0, - NumMessages: 3, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, From ab669ead0c7d987698933c217618066464d605de Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 12:30:47 +0100 Subject: [PATCH 09/19] implement checkpoint for single sequencer --- apps/evm/cmd/run.go | 2 +- apps/grpc/cmd/run.go | 2 +- apps/testapp/cmd/run.go | 2 +- sequencers/based/sequencer.go | 31 ++- sequencers/based/sequencer_test.go | 8 +- sequencers/common/size_validation.go | 13 +- sequencers/common/size_validation_test.go | 92 ------- sequencers/single/README.md | 201 +++++++++++---- sequencers/single/sequencer.go | 279 ++++++++++---------- sequencers/single/sequencer_test.go | 300 +++++++++++++++------- 10 files changed, 525 insertions(+), 405 deletions(-) diff --git a/apps/evm/cmd/run.go b/apps/evm/cmd/run.go index b504d73c92..06af78e61f 100644 --- a/apps/evm/cmd/run.go +++ b/apps/evm/cmd/run.go @@ -169,7 +169,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, genesis, logger) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, datastore, genesis, logger) if err != nil { return nil, fmt.Errorf("failed to create based sequencer: %w", err) } diff --git a/apps/grpc/cmd/run.go b/apps/grpc/cmd/run.go index 311de5881a..305b2e2a44 100644 --- a/apps/grpc/cmd/run.go +++ b/apps/grpc/cmd/run.go @@ -131,7 +131,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, genesis, logger) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, datastore, genesis, logger) if err != nil { return nil, fmt.Errorf("failed to create based sequencer: %w", err) } diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index 9b8eba7aac..690a2764db 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -131,7 +131,7 @@ func createSequencer( return nil, fmt.Errorf("based sequencer mode requires aggregator mode to be enabled") } - basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, da, datastore, genesis, logger) + basedSeq, err := based.NewBasedSequencer(ctx, fiRetriever, datastore, genesis, logger) if err != nil { return nil, fmt.Errorf("failed to create based sequencer: %w", err) } diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index b42a6903e8..bbade78082 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -29,8 +29,6 @@ var _ coresequencer.Sequencer = (*BasedSequencer)(nil) // It uses DA as a queue and only persists a checkpoint of where it is in processing. type BasedSequencer struct { fiRetriever ForcedInclusionRetriever - da coreda.DA - genesis genesis.Genesis logger zerolog.Logger daHeight atomic.Uint64 @@ -45,15 +43,12 @@ type BasedSequencer struct { func NewBasedSequencer( ctx context.Context, fiRetriever ForcedInclusionRetriever, - da coreda.DA, db ds.Batching, genesis genesis.Genesis, logger zerolog.Logger, ) (*BasedSequencer, error) { bs := &BasedSequencer{ fiRetriever: fiRetriever, - da: da, - genesis: genesis, logger: logger.With().Str("component", "based_sequencer").Logger(), checkpointStore: seqcommon.NewCheckpointStore(db, ds.NewKey("/based/checkpoint")), } @@ -96,11 +91,15 @@ func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.S // It treats DA as a queue and only persists where it is in processing func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { // If we have no cached transactions or we've consumed all from the current DA block, - // fetch the next DA block + // fetch the next DA epoch + daHeight := s.GetDAHeight() if len(s.currentBatchTxs) == 0 || s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { - if err := s.fetchNextDABatch(ctx); err != nil { + daEndHeight, err := s.fetchNextDAEpoch(ctx) + if err != nil { return nil, err } + + daHeight = daEndHeight } // Create batch from current position up to MaxBytes @@ -113,7 +112,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get // If we've consumed all transactions from this DA block, move to next if s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { - s.checkpoint.DAHeight++ + s.checkpoint.DAHeight = daHeight + 1 s.checkpoint.TxIndex = 0 s.currentBatchTxs = nil @@ -135,8 +134,8 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get }, nil } -// fetchNextDABatch fetches transactions from the next DA block -func (s *BasedSequencer) fetchNextDABatch(ctx context.Context) error { +// fetchNextDAEpoch fetches transactions from the next DA epoch +func (s *BasedSequencer) fetchNextDAEpoch(ctx context.Context) (uint64, error) { currentDAHeight := s.checkpoint.DAHeight s.logger.Debug(). @@ -148,17 +147,17 @@ func (s *BasedSequencer) fetchNextDABatch(ctx context.Context) error { if err != nil { // Check if forced inclusion is not configured if errors.Is(err, block.ErrForceInclusionNotConfigured) { - return errors.New("forced inclusion not configured") + return currentDAHeight, block.ErrForceInclusionNotConfigured } else if errors.Is(err, coreda.ErrHeightFromFuture) { // If we get a height from future error, stay at current position // We'll retry the same height on the next call until DA produces that block s.logger.Debug(). Uint64("da_height", currentDAHeight). Msg("DA height from future, waiting for DA to produce block") - return nil + return currentDAHeight, nil } s.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") - return err + return currentDAHeight, err } // Validate and filter transactions @@ -184,7 +183,7 @@ func (s *BasedSequencer) fetchNextDABatch(ctx context.Context) error { Uint64("da_height_end", forcedTxsEvent.EndDaHeight). Msg("fetched forced inclusion transactions from DA") - // Cache the transactions for this DA block + // Cache the transactions for this DA epoch s.currentBatchTxs = validTxs // If we had a non-zero tx index, we're resuming from a crash mid-block @@ -192,10 +191,10 @@ func (s *BasedSequencer) fetchNextDABatch(ctx context.Context) error { if s.checkpoint.TxIndex > 0 { s.logger.Info(). Uint64("tx_index", s.checkpoint.TxIndex). - Msg("resuming from checkpoint within DA block") + Msg("resuming from checkpoint within DA epoch") } - return nil + return forcedTxsEvent.EndDaHeight, nil } // createBatchFromCheckpoint creates a batch from the current checkpoint position respecting MaxBytes diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index f85a069ee8..af65d45fbb 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -90,7 +90,7 @@ func createTestSequencer(t *testing.T, mockDA *MockDA, cfg config.Config, gen ge // Create in-memory datastore db := syncds.MutexWrap(ds.NewMapDatastore()) - seq, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, gen, zerolog.Nop()) + seq, err := NewBasedSequencer(context.Background(), fiRetriever, db, gen, zerolog.Nop()) require.NoError(t, err) return seq } @@ -223,7 +223,7 @@ func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { // Create in-memory datastore db := syncds.MutexWrap(ds.NewMapDatastore()) - seq, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, gen, zerolog.Nop()) + seq, err := NewBasedSequencer(context.Background(), fiRetriever, db, gen, zerolog.Nop()) require.NoError(t, err) req := coresequencer.GetNextBatchRequest{ @@ -580,7 +580,7 @@ func TestBasedSequencer_CheckpointPersistence(t *testing.T) { daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - seq1, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, gen, zerolog.Nop()) + seq1, err := NewBasedSequencer(context.Background(), fiRetriever, db, gen, zerolog.Nop()) require.NoError(t, err) req := coresequencer.GetNextBatchRequest{ @@ -595,7 +595,7 @@ func TestBasedSequencer_CheckpointPersistence(t *testing.T) { assert.Equal(t, 2, len(resp.Batch.Transactions)) // Create a new sequencer with the same datastore (simulating restart) - seq2, err := NewBasedSequencer(context.Background(), fiRetriever, mockDA, db, gen, zerolog.Nop()) + seq2, err := NewBasedSequencer(context.Background(), fiRetriever, db, gen, zerolog.Nop()) require.NoError(t, err) // Checkpoint should be loaded from DB diff --git a/sequencers/common/size_validation.go b/sequencers/common/size_validation.go index ee781ce205..11dbc5ae36 100644 --- a/sequencers/common/size_validation.go +++ b/sequencers/common/size_validation.go @@ -12,16 +12,5 @@ const ( // This checks against the DA layer limit, not the per-batch limit. // Returns true if the blob is within the absolute size limit, false otherwise. func ValidateBlobSize(blob []byte) bool { - return uint64(GetBlobSize(blob)) <= AbsoluteMaxBlobSize -} - -// WouldExceedCumulativeSize checks if adding a blob would exceed the cumulative size limit for a batch. -// Returns true if adding the blob would exceed the limit, false otherwise. -func WouldExceedCumulativeSize(currentSize int, blobSize int, maxBytes uint64) bool { - return uint64(currentSize)+uint64(blobSize) > maxBytes -} - -// GetBlobSize returns the size of a blob in bytes. -func GetBlobSize(blob []byte) int { - return len(blob) + return uint64(len(blob)) <= AbsoluteMaxBlobSize } diff --git a/sequencers/common/size_validation_test.go b/sequencers/common/size_validation_test.go index 103c66d8be..67ebed4c0c 100644 --- a/sequencers/common/size_validation_test.go +++ b/sequencers/common/size_validation_test.go @@ -47,95 +47,3 @@ func TestValidateBlobSize(t *testing.T) { }) } } - -func TestWouldExceedCumulativeSize(t *testing.T) { - tests := []struct { - name string - currentSize int - blobSize int - maxBytes uint64 - want bool - }{ - { - name: "empty batch, small blob", - currentSize: 0, - blobSize: 50, - maxBytes: 100, - want: false, - }, - { - name: "would fit exactly", - currentSize: 50, - blobSize: 50, - maxBytes: 100, - want: false, - }, - { - name: "would exceed by one byte", - currentSize: 50, - blobSize: 51, - maxBytes: 100, - want: true, - }, - { - name: "far exceeds", - currentSize: 80, - blobSize: 100, - maxBytes: 100, - want: true, - }, - { - name: "zero max bytes", - currentSize: 0, - blobSize: 1, - maxBytes: 0, - want: true, - }, - { - name: "current already at limit", - currentSize: 100, - blobSize: 1, - maxBytes: 100, - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := WouldExceedCumulativeSize(tt.currentSize, tt.blobSize, tt.maxBytes) - assert.Equal(t, tt.want, got) - }) - } -} - -func TestGetBlobSize(t *testing.T) { - tests := []struct { - name string - blobSize int - want int - }{ - { - name: "empty blob", - blobSize: 0, - want: 0, - }, - { - name: "small blob", - blobSize: 42, - want: 42, - }, - { - name: "large blob", - blobSize: 1024 * 1024, - want: 1024 * 1024, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - blob := make([]byte, tt.blobSize) - got := GetBlobSize(blob) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/sequencers/single/README.md b/sequencers/single/README.md index 2b6184c3c4..323e76f41e 100644 --- a/sequencers/single/README.md +++ b/sequencers/single/README.md @@ -6,6 +6,8 @@ The single sequencer is a component of the Evolve framework that handles transac The sequencer receives transactions from clients, batches them together, and submits these batches to a Data Availability layer. It maintains transaction and batch queues, handles recovery from crashes, and provides verification mechanisms for batches. +**Key Feature**: The single sequencer implements a checkpoint system for forced inclusion transactions from DA, ensuring that **DA transactions are never re-executed after a crash**. + ```mermaid flowchart LR Client["Client"] --> Sequencer @@ -19,34 +21,40 @@ flowchart LR The main component that orchestrates the entire sequencing process. It: -- Receives transactions from clients +- Receives transactions from clients (mempool transactions) +- Retrieves forced inclusion transactions from the DA layer - Maintains transaction and batch queues - Periodically creates and submits batches to the DA layer -- Handles recovery from crashes +- Handles recovery from crashes via checkpoint system - Provides verification mechanisms for batches -### TransactionQueue - -Manages the queue of pending transactions: - -- Stores transactions in memory and in the database -- Provides methods to add transactions and extract batches -- Handles recovery of transactions from the database after a crash - ### BatchQueue -Manages the queue of batches: +Manages the queue of pending **mempool transactions**: - Stores batches in memory and in the database - Provides methods to add and retrieve batches - Handles recovery of batches from the database after a crash +- Supports queue size limits for backpressure + +### CheckpointStore -### DAClient +Manages persistence of the forced inclusion checkpoint: -Handles communication with the Data Availability layer: +- Tracks the exact position in the DA transaction stream +- Stores `DAHeight` (current DA block height being processed) +- Stores `TxIndex` (index within the forced inclusion batch) +- Enables crash recovery without re-executing DA transactions +- Persisted after every batch of forced inclusion transactions -- Submits batches to the DA layer -- Retrieves batch status from the DA layer +### ForcedInclusionRetriever + +Retrieves forced inclusion transactions from the DA layer: + +- Fetches transactions at epoch boundaries +- Returns all transactions from an entire epoch +- Validates transaction sizes +- Supports epoch-based transaction retrieval ## Flow of Calls @@ -54,11 +62,13 @@ Handles communication with the Data Availability layer: ```mermaid flowchart TD - A["NewSequencer()"] --> B["LoadLastBatchHashFromDB()"] - B --> C["LoadSeenBatchesFromDB()"] - C --> D["Load Transaction Queue from DB"] - D --> E["Load BatchQueue from DB"] - E --> F["Start batch submission loop"] + A["NewSequencer()"] --> B["Load BatchQueue from DB"] + B --> C["Load Checkpoint from DB"] + C --> D{"Checkpoint exists?"} + D -->|Yes| E["Resume from checkpoint position"] + D -->|No| F["Initialize new checkpoint"] + E --> G["Ready to process"] + F --> G ``` ### Transaction Submission Flow @@ -81,15 +91,21 @@ flowchart TD E --> F["Add to BatchQueue"] ``` -### Batch Retrieval Flow +### Batch Retrieval Flow (with Forced Inclusion) ```mermaid flowchart TD A["GetNextBatch()"] --> B["Validate ID"] - B --> C["Check batch hash match"] - C --> D["If match or both nil"] - D --> E["Get batch from BatchQueue"] - E --> F["Update last batch hash"] + B --> C{"Have cached forced inclusion txs?"} + C -->|No| D["fetchNextDABatch()"] + C -->|Yes| E["Process from checkpoint"] + D --> E + E --> F["Create forced inclusion batch from checkpoint position"] + F --> G["Update checkpoint with consumed txs"] + G --> H["Persist checkpoint to disk"] + H --> I["Get mempool batch from BatchQueue"] + I --> J["Combine forced inclusion + mempool txs"] + J --> K["Return combined batch"] ``` ### Batch Verification Flow @@ -101,63 +117,134 @@ flowchart TD C --> D["Return status"] ``` +## Checkpoint System for Forced Inclusion + +### Purpose + +The checkpoint system ensures that **forced inclusion transactions from DA are never re-executed after a crash**. This is critical for correctness and determinism. + +### How It Works + +1. **Checkpoint Structure**: + + ```go + type Checkpoint struct { + DAHeight uint64 // Current DA block height being processed + TxIndex uint64 // Index within the forced inclusion batch + } + ``` + +2. **Processing Flow**: + - Fetch forced inclusion transactions from DA at epoch boundaries + - Cache transactions in memory (`cachedForcedInclusionTxs`) + - Process transactions incrementally from checkpoint position + - Update `TxIndex` after each batch + - When all transactions consumed, increment `DAHeight` and reset `TxIndex` + - **Persist checkpoint after every batch** + +3. **Crash Recovery**: + - On restart, load checkpoint from disk + - Re-fetch forced inclusion transactions from DA (entire epoch) + - Resume processing from `checkpoint.TxIndex` + - Skip already-processed transactions + +### Example + +``` +Initial state: Checkpoint(DAHeight: 100, TxIndex: 0) +DA returns 3 transactions at height 100 + +Batch 1: Process tx[0] + → Checkpoint(DAHeight: 100, TxIndex: 1) ✅ Persisted + +Batch 2: Process tx[1] + → Checkpoint(DAHeight: 100, TxIndex: 2) ✅ Persisted + +**CRASH OCCURS** + +Restart: Load Checkpoint(DAHeight: 100, TxIndex: 2) from disk + → Re-fetch transactions from DA height 100 + → Resume from tx[2] (skip tx[0] and tx[1]) + → ✅ No re-execution! + +Batch 3: Process tx[2] + → Checkpoint(DAHeight: 101, TxIndex: 0) ✅ Persisted +``` + +### Comparison with Mempool Transactions + +| Aspect | Forced Inclusion (DA) | Mempool Transactions | +| ----------------- | ------------------------------- | ---------------------- | +| Source | DA layer via forced inclusion | Client submissions | +| Persistence | Checkpoint (DAHeight + TxIndex) | BatchQueue | +| Crash Recovery | Resume from checkpoint position | Resume from queue | +| Re-execution Risk | ❌ Prevented by checkpoint | ❌ Prevented by queue | +| Priority | Always first in batch | After forced inclusion | + ## Database Layout -The single sequencer uses a key-value database to store transactions, batches, and metadata. Here's the layout of the database: +The single sequencer uses a key-value database to store batches, checkpoints, and metadata. Here's the layout of the database: ### Keys -| Key Pattern | Description | -|---------------------------|---------------------------------------------------------| -| `l` | Last batch hash | -| `seen:` | Marker for seen batch hashes | -| `` | Batch data (hash is the batch hash) | -| `tx:` | Transaction data (hash is SHA-256 of transaction bytes) | +| Key Pattern | Description | +| -------------------- | ------------------------------------------ | +| `/single/checkpoint` | Checkpoint for forced inclusion processing | +| `batches/` | Batch data (mempool transactions) | ### Key Details -#### Last Batch Hash Key (`l`) - -- Stores the hash of the last processed batch -- Used for recovery after a crash -- Value: Raw bytes of the hash - -#### Seen Batch Hash Keys (`seen:`) +#### Checkpoint Key (`/single/checkpoint`) -- Marks batches that have been seen and processed -- Used for batch verification -- Value: `1` (presence indicates the batch has been seen) +- Stores the forced inclusion checkpoint +- Used for crash recovery +- Value: Protobuf-encoded checkpoint data (`SequencerDACheckpoint`) +- Updated after every batch of forced inclusion transactions -#### Batch Keys (``) +#### Batch Keys (`batches/`) -- Stores the actual batch data +- Stores mempool transaction batches - Key is the hex-encoded hash of the batch - Value: Protobuf-encoded batch data - -#### Transaction Keys (`tx:`) - -- Stores individual transactions -- Key is prefixed with `tx:` followed by the SHA-256 hash of the transaction bytes -- Value: Raw transaction bytes +- Managed by `BatchQueue` ## Recovery Mechanism The single sequencer implements a robust recovery mechanism to handle crashes: -1. On startup, it loads the last batch hash from the database -2. It loads all seen batch hashes into memory -3. It loads all pending transactions from the database into the transaction queue -4. It loads all pending batches from the database into the batch queue -5. It resumes normal operation, continuing from where it left off +### For Forced Inclusion Transactions (DA) + +1. On startup, load checkpoint from database +2. If no checkpoint exists, initialize with genesis DA height +3. Resume from checkpoint position (`DAHeight` + `TxIndex`) +4. Re-fetch forced inclusion transactions from DA +5. Skip already-processed transactions using `TxIndex` +6. Continue processing from where it left off + +**Result**: ✅ No forced inclusion transactions are re-executed + +### For Mempool Transactions (Queue) + +1. On startup, load all pending batches from database into `BatchQueue` +2. Resume processing batches in order +3. Continue normal operation + +**Result**: ✅ No mempool transactions are lost + +### Combined Recovery + +Both systems work together to ensure: -This ensures that no transactions are lost in case of a crash, and the sequencer can continue operating seamlessly. +- **Correctness**: No transaction is lost or re-executed +- **Determinism**: Same state after crash recovery +- **Atomicity**: Checkpoint and queue are consistent ## Metrics The sequencer exposes the following metrics: | Metric | Description | -|-------------------------|--------------------------------------------------| +| ----------------------- | ------------------------------------------------ | | `gas_price` | The gas price of DA | | `last_blob_size` | The size in bytes of the last DA blob | | `transaction_status` | Count of transaction statuses for DA submissions | diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index 6007b8d9f5..5b1e1e0498 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -19,29 +19,21 @@ import ( seqcommon "github.com/evstack/ev-node/sequencers/common" ) -var ( - // ErrInvalidId is returned when the chain id is invalid - ErrInvalidId = errors.New("invalid chain id") -) +// ErrInvalidId is returned when the chain id is invalid +var ErrInvalidId = errors.New("invalid chain id") // ForcedInclusionRetriever defines the interface for retrieving forced inclusion transactions from DA type ForcedInclusionRetriever interface { RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) } -// pendingForcedInclusionTx represents a forced inclusion transaction that couldn't fit in the current epoch -type pendingForcedInclusionTx struct { - Data []byte - OriginalHeight uint64 -} - var _ coresequencer.Sequencer = (*Sequencer)(nil) // Sequencer implements core sequencing interface type Sequencer struct { - logger zerolog.Logger - - proposer bool + fiRetriever ForcedInclusionRetriever + logger zerolog.Logger + proposer bool Id []byte da coreda.DA @@ -50,10 +42,12 @@ type Sequencer struct { queue *BatchQueue // single queue for immediate availability // Forced inclusion support - fiRetriever ForcedInclusionRetriever - genesis genesis.Genesis - daHeight atomic.Uint64 - pendingForcedInclusionTxs []pendingForcedInclusionTx + daHeight atomic.Uint64 + checkpointStore *seqcommon.CheckpointStore + checkpoint *seqcommon.Checkpoint + + // Cached forced inclusion transactions from the current epoch + cachedForcedInclusionTxs [][]byte } // NewSequencer creates a new Single Sequencer @@ -67,32 +61,53 @@ func NewSequencer( proposer bool, maxQueueSize int, fiRetriever ForcedInclusionRetriever, - gen genesis.Genesis, + genesis genesis.Genesis, ) (*Sequencer, error) { s := &Sequencer{ - logger: logger, - da: da, - batchTime: batchTime, - Id: id, - queue: NewBatchQueue(db, "batches", maxQueueSize), - proposer: proposer, - fiRetriever: fiRetriever, - genesis: gen, - pendingForcedInclusionTxs: make([]pendingForcedInclusionTx, 0), + logger: logger, + da: da, + batchTime: batchTime, + Id: id, + queue: NewBatchQueue(db, "batches", maxQueueSize), + proposer: proposer, + fiRetriever: fiRetriever, + checkpointStore: seqcommon.NewCheckpointStore(db, ds.NewKey("/single/checkpoint")), } - s.SetDAHeight(gen.DAStartHeight) // will be overridden by the executor + s.SetDAHeight(genesis.DAStartHeight) // will be overridden by the executor loadCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() + + // Load batch queue from DB if err := s.queue.Load(loadCtx); err != nil { return nil, fmt.Errorf("failed to load batch queue from DB: %w", err) } - // No DA submission loop here; handled by central manager + // Load checkpoint from DB, or initialize if none exists + checkpoint, err := s.checkpointStore.Load(loadCtx) + if err != nil { + if errors.Is(err, seqcommon.ErrCheckpointNotFound) { + // No checkpoint exists, initialize with current DA height + s.checkpoint = &seqcommon.Checkpoint{ + DAHeight: s.GetDAHeight(), + TxIndex: 0, + } + } else { + return nil, fmt.Errorf("failed to load checkpoint from DB: %w", err) + } + } else { + s.checkpoint = checkpoint + logger.Info(). + Uint64("da_height", checkpoint.DAHeight). + Uint64("tx_index", checkpoint.TxIndex). + Msg("loaded single sequencer checkpoint from DB") + } + return s, nil } // SubmitBatchTxs implements sequencing.Sequencer. +// It adds mempool transactions to a batch. func (c *Sequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.SubmitBatchTxsRequest) (*coresequencer.SubmitBatchTxsResponse, error) { if !c.isValid(req.Id) { return nil, ErrInvalidId @@ -121,46 +136,26 @@ func (c *Sequencer) SubmitBatchTxs(ctx context.Context, req coresequencer.Submit } // GetNextBatch implements sequencing.Sequencer. +// It gets the next batch of transactions and fetch for forced included transactions. func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextBatchRequest) (*coresequencer.GetNextBatchResponse, error) { if !c.isValid(req.Id) { return nil, ErrInvalidId } - currentDAHeight := c.GetDAHeight() - - forcedTxsEvent, err := c.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) - if err != nil { - if errors.Is(err, coreda.ErrHeightFromFuture) { - c.logger.Debug(). - Uint64("da_height", currentDAHeight). - Msg("DA height from future, waiting for DA to produce block") - } else if !errors.Is(err, block.ErrForceInclusionNotConfigured) { - c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + // If we have no cached transactions or we've consumed all from the current cache, + // fetch the next DA epoch + daHeight := c.GetDAHeight() + if len(c.cachedForcedInclusionTxs) == 0 || c.checkpoint.TxIndex >= uint64(len(c.cachedForcedInclusionTxs)) { + daEndHeight, err := c.fetchNextDAEpoch(ctx) + if err != nil { + return nil, err } - // Still create an empty forced inclusion event - forcedTxsEvent = &block.ForcedInclusionEvent{ - Txs: [][]byte{}, - StartDaHeight: currentDAHeight, - EndDaHeight: currentDAHeight, - } - } else { - // Update DA height. - // If we are in between epochs, we still need to bump the da height. - // At the end of an epoch, we need to bump to go to the next epoch. - if forcedTxsEvent.EndDaHeight >= currentDAHeight { - c.SetDAHeight(forcedTxsEvent.EndDaHeight + 1) - } + daHeight = daEndHeight } - // Always try to process forced inclusion transactions (including pending from previous epochs) - forcedTxs := c.processForcedInclusionTxs(forcedTxsEvent, req.MaxBytes) - - c.logger.Debug(). - Int("tx_count", len(forcedTxs)). - Uint64("da_height_start", forcedTxsEvent.StartDaHeight). - Uint64("da_height_end", forcedTxsEvent.EndDaHeight). - Msg("retrieved forced inclusion transactions from DA") + // Process forced inclusion transactions from checkpoint position + forcedTxs := c.processForcedInclusionTxsFromCheckpoint(req.MaxBytes) // Calculate size used by forced inclusion transactions forcedTxsSize := 0 @@ -168,6 +163,33 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB forcedTxsSize += len(tx) } + // Update checkpoint after consuming forced inclusion transactions + if len(forcedTxs) > 0 { + c.checkpoint.TxIndex += uint64(len(forcedTxs)) + + // If we've consumed all transactions from cache, move to next DA height + if c.checkpoint.TxIndex >= uint64(len(c.cachedForcedInclusionTxs)) { + c.checkpoint.DAHeight = daHeight + 1 + c.checkpoint.TxIndex = 0 + c.cachedForcedInclusionTxs = nil + + // Update the global DA height + c.SetDAHeight(c.checkpoint.DAHeight) + } + + // Persist checkpoint + if err := c.checkpointStore.Save(ctx, c.checkpoint); err != nil { + c.logger.Error().Err(err).Msg("failed to save checkpoint") + return nil, fmt.Errorf("failed to save checkpoint: %w", err) + } + + c.logger.Debug(). + Int("forced_tx_count", len(forcedTxs)). + Uint64("checkpoint_da_height", c.checkpoint.DAHeight). + Uint64("checkpoint_tx_index", c.checkpoint.TxIndex). + Msg("processed forced inclusion transactions and updated checkpoint") + } + batch, err := c.queue.Next(ctx) if err != nil { return nil, err @@ -237,7 +259,6 @@ func (c *Sequencer) VerifyBatch(ctx context.Context, req coresequencer.VerifyBat } if !c.proposer { - proofs, err := c.da.GetProofs(ctx, req.BatchData, c.Id) if err != nil { return nil, fmt.Errorf("failed to get proofs: %w", err) @@ -255,6 +276,7 @@ func (c *Sequencer) VerifyBatch(ctx context.Context, req coresequencer.VerifyBat } return &coresequencer.VerifyBatchResponse{Status: true}, nil } + return &coresequencer.VerifyBatchResponse{Status: true}, nil } @@ -266,7 +288,6 @@ func (c *Sequencer) isValid(Id []byte) bool { // This should be called when the sequencer needs to sync to a specific DA height func (c *Sequencer) SetDAHeight(height uint64) { c.daHeight.Store(height) - c.logger.Debug().Uint64("da_height", height).Msg("DA height updated") } // GetDAHeight returns the current DA height @@ -274,95 +295,87 @@ func (c *Sequencer) GetDAHeight() uint64 { return c.daHeight.Load() } -// processForcedInclusionTxs processes forced inclusion transactions with size validation and pending queue management -func (c *Sequencer) processForcedInclusionTxs(event *block.ForcedInclusionEvent, maxBytes uint64) [][]byte { - currentSize := 0 - var newPendingTxs []pendingForcedInclusionTx - var validatedTxs [][]byte - - // First, process any pending transactions from previous epochs - for _, pendingTx := range c.pendingForcedInclusionTxs { - txSize := seqcommon.GetBlobSize(pendingTx.Data) +// fetchNextDAEpoch fetches transactions from the next DA epoch using checkpoint +func (c *Sequencer) fetchNextDAEpoch(ctx context.Context) (uint64, error) { + currentDAHeight := c.checkpoint.DAHeight - if !seqcommon.ValidateBlobSize(pendingTx.Data) { - c.logger.Warn(). - Uint64("original_height", pendingTx.OriginalHeight). - Int("blob_size", txSize). - Msg("pending forced inclusion blob exceeds absolute maximum size - skipping") - continue - } + c.logger.Debug(). + Uint64("da_height", currentDAHeight). + Uint64("tx_index", c.checkpoint.TxIndex). + Msg("fetching forced inclusion transactions from DA") - if seqcommon.WouldExceedCumulativeSize(currentSize, txSize, maxBytes) { + forcedTxsEvent, err := c.fiRetriever.RetrieveForcedIncludedTxs(ctx, currentDAHeight) + if err != nil { + if errors.Is(err, coreda.ErrHeightFromFuture) { c.logger.Debug(). - Uint64("original_height", pendingTx.OriginalHeight). - Int("current_size", currentSize). - Int("blob_size", txSize). - Msg("pending blob would exceed max size for this epoch - deferring again") - newPendingTxs = append(newPendingTxs, pendingTx) - continue + Uint64("da_height", currentDAHeight). + Msg("DA height from future, waiting for DA to produce block") + return currentDAHeight, nil + } else if errors.Is(err, block.ErrForceInclusionNotConfigured) { + // Forced inclusion not configured, continue without forced txs + c.cachedForcedInclusionTxs = [][]byte{} + return currentDAHeight, nil } - - validatedTxs = append(validatedTxs, pendingTx.Data) - currentSize += txSize - - c.logger.Debug(). - Uint64("original_height", pendingTx.OriginalHeight). - Int("blob_size", txSize). - Int("current_size", currentSize). - Msg("processed pending forced inclusion transaction") + c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") + return currentDAHeight, err } - // Now process new transactions from this epoch - for _, tx := range event.Txs { - txSize := seqcommon.GetBlobSize(tx) - + // Validate and filter transactions + validTxs := make([][]byte, 0, len(forcedTxsEvent.Txs)) + skippedTxs := 0 + for _, tx := range forcedTxsEvent.Txs { if !seqcommon.ValidateBlobSize(tx) { c.logger.Warn(). - Uint64("da_height", event.StartDaHeight). - Int("blob_size", txSize). + Uint64("da_height", forcedTxsEvent.StartDaHeight). + Int("blob_size", len(tx)). Msg("forced inclusion blob exceeds absolute maximum size - skipping") + skippedTxs++ continue } + validTxs = append(validTxs, tx) + } - if seqcommon.WouldExceedCumulativeSize(currentSize, txSize, maxBytes) { - c.logger.Debug(). - Uint64("da_height", event.StartDaHeight). - Int("current_size", currentSize). - Int("blob_size", txSize). - Msg("blob would exceed max size for this epoch - deferring to pending queue") - - // Store for next call - newPendingTxs = append(newPendingTxs, pendingForcedInclusionTx{ - Data: tx, - OriginalHeight: event.StartDaHeight, - }) - continue - } + c.logger.Info(). + Int("valid_tx_count", len(validTxs)). + Int("skipped_tx_count", skippedTxs). + Uint64("da_height_start", forcedTxsEvent.StartDaHeight). + Uint64("da_height_end", forcedTxsEvent.EndDaHeight). + Msg("fetched forced inclusion transactions from DA") - validatedTxs = append(validatedTxs, tx) - currentSize += txSize + // Cache the transactions + c.cachedForcedInclusionTxs = validTxs - c.logger.Debug(). - Int("blob_size", txSize). - Int("current_size", currentSize). - Msg("processed forced inclusion transaction") + // If we had a non-zero tx index, we're resuming from a crash mid-processing + if c.checkpoint.TxIndex > 0 { + c.logger.Info(). + Uint64("tx_index", c.checkpoint.TxIndex). + Msg("resuming from checkpoint within forced inclusion batch") } - // Update pending queue - c.pendingForcedInclusionTxs = newPendingTxs - if len(newPendingTxs) > 0 { - c.logger.Info(). - Int("new_pending_count", len(newPendingTxs)). - Msg("stored pending forced inclusion transactions for next epoch") + return forcedTxsEvent.EndDaHeight, nil +} + +// processForcedInclusionTxsFromCheckpoint processes forced inclusion transactions from checkpoint position +func (c *Sequencer) processForcedInclusionTxsFromCheckpoint(maxBytes uint64) [][]byte { + if len(c.cachedForcedInclusionTxs) == 0 || c.checkpoint.TxIndex >= uint64(len(c.cachedForcedInclusionTxs)) { + return [][]byte{} } - if len(validatedTxs) > 0 { - c.logger.Info(). - Int("processed_tx_count", len(validatedTxs)). - Int("pending_tx_count", len(newPendingTxs)). - Int("current_size", currentSize). - Msg("completed processing forced inclusion transactions") + var result [][]byte + var totalBytes uint64 + + // Start from the checkpoint index + for i := c.checkpoint.TxIndex; i < uint64(len(c.cachedForcedInclusionTxs)); i++ { + tx := c.cachedForcedInclusionTxs[i] + txSize := uint64(len(tx)) + + if totalBytes+txSize > maxBytes { + break + } + + result = append(result, tx) + totalBytes += txSize } - return validatedTxs + return result } diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index e7e8096789..63e3ff3bf8 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -33,6 +33,32 @@ func (m *MockForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Con return args.Get(0).(*block.ForcedInclusionEvent), args.Error(1) } +// newTestSequencer creates a sequencer for tests that don't need full initialization +func newTestSequencer(t *testing.T, db ds.Batching, fiRetriever ForcedInclusionRetriever, proposer bool) *Sequencer { + ctx := context.Background() + logger := zerolog.Nop() + + gen := genesis.Genesis{ + ChainID: "test", + DAStartHeight: 100, + } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test"), + 1*time.Second, + proposer, + 0, // unlimited queue + fiRetriever, + gen, + ) + require.NoError(t, err) + return seq +} + func TestSequencer_SubmitBatchTxs(t *testing.T) { dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) db := ds.NewMapDatastore() @@ -135,17 +161,32 @@ func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) { func TestSequencer_GetNextBatch_NoLastBatch(t *testing.T) { db := ds.NewMapDatastore() + ctx := context.Background() logger := zerolog.Nop() mockRetriever := new(MockForcedInclusionRetriever) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() - seq := &Sequencer{ - logger: logger, - queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test - Id: []byte("test"), - fiRetriever: mockRetriever, + + gen := genesis.Genesis{ + ChainID: "test", + DAStartHeight: 100, } + + seq, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test"), + 1*time.Second, + true, + 0, // unlimited queue + mockRetriever, + gen, + ) + require.NoError(t, err) + defer func() { err := db.Close() if err != nil { @@ -154,7 +195,7 @@ func TestSequencer_GetNextBatch_NoLastBatch(t *testing.T) { }() // Test case where lastBatchHash and seq.lastBatchHash are both nil - res, err := seq.GetNextBatch(context.Background(), coresequencer.GetNextBatchRequest{Id: seq.Id}) + res, err := seq.GetNextBatch(ctx, coresequencer.GetNextBatchRequest{Id: seq.Id}) if err != nil { t.Fatalf("Failed to get next batch: %v", err) } @@ -175,17 +216,12 @@ func TestSequencer_GetNextBatch_Success(t *testing.T) { mockBatch := &coresequencer.Batch{Transactions: [][]byte{[]byte("tx1"), []byte("tx2")}} db := ds.NewMapDatastore() - logger := zerolog.Nop() mockRetriever := new(MockForcedInclusionRetriever) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() - seq := &Sequencer{ - logger: logger, - queue: NewBatchQueue(db, "batches", 0), // 0 = unlimited for test - Id: []byte("test"), - fiRetriever: mockRetriever, - } + + seq := newTestSequencer(t, db, mockRetriever, true) defer func() { err := db.Close() if err != nil { @@ -239,19 +275,14 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Proposer Mode", func(t *testing.T) { mockDA := damocks.NewMockDA(t) - logger := zerolog.Nop() mockRetriever := new(MockForcedInclusionRetriever) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() - seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: true, - da: mockDA, - queue: NewBatchQueue(db, "proposer_queue", 0), // 0 = unlimited for test - fiRetriever: mockRetriever, - } + db := ds.NewMapDatastore() + seq := newTestSequencer(t, db, mockRetriever, true) + seq.da = mockDA + defer db.Close() res, err := seq.VerifyBatch(context.Background(), coresequencer.VerifyBatchRequest{Id: seq.Id, BatchData: batchData}) assert.NoError(err) @@ -265,18 +296,14 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Non-Proposer Mode", func(t *testing.T) { t.Run("Valid Proofs", func(t *testing.T) { mockDA := damocks.NewMockDA(t) - logger := zerolog.Nop() mockRetriever := new(MockForcedInclusionRetriever) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() - seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "valid_proofs_queue", 0), - fiRetriever: mockRetriever, - } + + db := ds.NewMapDatastore() + seq := newTestSequencer(t, db, mockRetriever, false) + seq.da = mockDA + defer db.Close() mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() mockDA.On("Validate", mock.Anything, batchData, proofs, Id).Return([]bool{true, true}, nil).Once() @@ -290,18 +317,14 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid Proof", func(t *testing.T) { mockDA := damocks.NewMockDA(t) - logger := zerolog.Nop() mockRetriever := new(MockForcedInclusionRetriever) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() - seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "invalid_proof_queue", 0), - fiRetriever: mockRetriever, - } + + db := ds.NewMapDatastore() + seq := newTestSequencer(t, db, mockRetriever, false) + seq.da = mockDA + defer db.Close() mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() mockDA.On("Validate", mock.Anything, batchData, proofs, Id).Return([]bool{true, false}, nil).Once() @@ -315,18 +338,14 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("GetProofs Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) - logger := zerolog.Nop() mockRetriever := new(MockForcedInclusionRetriever) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() - seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "getproofs_err_queue", 0), - fiRetriever: mockRetriever, - } + + db := ds.NewMapDatastore() + seq := newTestSequencer(t, db, mockRetriever, false) + seq.da = mockDA + defer db.Close() expectedErr := errors.New("get proofs failed") mockDA.On("GetProofs", context.Background(), batchData, Id).Return(nil, expectedErr).Once() @@ -341,18 +360,14 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Validate Error", func(t *testing.T) { mockDA := damocks.NewMockDA(t) - logger := zerolog.Nop() mockRetriever := new(MockForcedInclusionRetriever) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() - seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "validate_err_queue", 0), - fiRetriever: mockRetriever, - } + + db := ds.NewMapDatastore() + seq := newTestSequencer(t, db, mockRetriever, false) + seq.da = mockDA + defer db.Close() expectedErr := errors.New("validate failed") mockDA.On("GetProofs", context.Background(), batchData, Id).Return(proofs, nil).Once() @@ -367,19 +382,14 @@ func TestSequencer_VerifyBatch(t *testing.T) { t.Run("Invalid ID", func(t *testing.T) { mockDA := damocks.NewMockDA(t) - logger := zerolog.Nop() mockRetriever := new(MockForcedInclusionRetriever) mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, mock.Anything). Return(nil, block.ErrForceInclusionNotConfigured).Maybe() - seq := &Sequencer{ - logger: logger, - Id: Id, - proposer: false, - da: mockDA, - queue: NewBatchQueue(db, "invalid_queue", 0), - fiRetriever: mockRetriever, - } + db := ds.NewMapDatastore() + seq := newTestSequencer(t, db, mockRetriever, false) + seq.da = mockDA + defer db.Close() invalidId := []byte("invalid") res, err := seq.VerifyBatch(context.Background(), coresequencer.VerifyBatchRequest{Id: invalidId, BatchData: batchData}) @@ -548,12 +558,13 @@ func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { EndDaHeight: 100, }, nil).Once() - // Second call should process pending tx at DA height 101 (after first call bumped it to epochEnd + 1) + // Second call won't fetch from DA - tx2 is still in cache + // Only after both txs are consumed will we fetch from DA height 101 mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(101)).Return(&block.ForcedInclusionEvent{ Txs: [][]byte{}, StartDaHeight: 101, EndDaHeight: 101, - }, nil).Once() + }, nil).Maybe() gen := genesis.Genesis{ ChainID: "test-chain", @@ -588,8 +599,8 @@ func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { assert.Equal(t, 1, len(resp.Batch.Transactions), "Should only include first forced tx") assert.Equal(t, 100, len(resp.Batch.Transactions[0])) - // Verify pending tx is stored - assert.Equal(t, 1, len(seq.pendingForcedInclusionTxs), "Second tx should be pending") + // Verify checkpoint reflects that we've consumed one tx + assert.Equal(t, uint64(1), seq.checkpoint.TxIndex, "Should have consumed one tx from cache") // Second call - should get the pending forced tx resp2, err := seq.GetNextBatch(ctx, getReq) @@ -598,8 +609,9 @@ func TestSequencer_GetNextBatch_ForcedInclusion_ExceedsMaxBytes(t *testing.T) { assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) - // Pending queue should now be empty - assert.Equal(t, 0, len(seq.pendingForcedInclusionTxs), "Pending queue should be empty") + // Checkpoint should have moved to next DA height after consuming all cached txs + assert.Equal(t, uint64(101), seq.checkpoint.DAHeight, "Should have moved to next DA height") + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex, "TxIndex should be reset") mockFI.AssertExpectations(t) } @@ -620,12 +632,13 @@ func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) EndDaHeight: 100, }, nil).Once() - // Second call returns no new forced txs at height 101 (after first call bumped DA height to epochEnd + 1), but pending should still be processed + // Second call won't fetch from DA - forced tx is still in cache + // Only after the forced tx is consumed will we fetch from DA height 101 mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(101)).Return(&block.ForcedInclusionEvent{ Txs: [][]byte{}, StartDaHeight: 101, EndDaHeight: 101, - }, nil).Once() + }, nil).Maybe() gen := genesis.Genesis{ ChainID: "test-chain", @@ -672,8 +685,9 @@ func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) assert.Equal(t, 1, len(resp.Batch.Transactions), "Should have batch tx only") assert.Equal(t, 50, len(resp.Batch.Transactions[0])) - // Verify pending forced tx is stored - assert.Equal(t, 1, len(seq.pendingForcedInclusionTxs), "Large forced tx should be pending") + // Verify checkpoint shows no forced tx was consumed (tx too large) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex, "No forced tx should be consumed yet") + assert.Equal(t, 1, len(seq.cachedForcedInclusionTxs), "Forced tx should still be cached") // Second call with larger maxBytes = 200 // Should process pending forced tx first @@ -689,8 +703,9 @@ func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) - // Pending queue should now be empty - assert.Equal(t, 0, len(seq.pendingForcedInclusionTxs), "Pending queue should be empty") + // Checkpoint should reflect that forced tx was consumed + assert.Equal(t, uint64(101), seq.checkpoint.DAHeight, "Should have moved to next DA height") + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex, "TxIndex should be reset after consuming all") mockFI.AssertExpectations(t) } @@ -706,18 +721,27 @@ func TestSequencer_QueueLimit_Integration(t *testing.T) { Return(nil, block.ErrForceInclusionNotConfigured).Maybe() // Create a sequencer with a small queue limit for testing + ctx := context.Background() logger := zerolog.Nop() - seq := &Sequencer{ - logger: logger, - da: mockDA, - batchTime: time.Second, - Id: []byte("test"), - queue: NewBatchQueue(db, "test_queue", 2), // Very small limit for testing - proposer: true, - fiRetriever: mockRetriever, + + gen := genesis.Genesis{ + ChainID: "test", + DAStartHeight: 100, } - ctx := context.Background() + seq, err := NewSequencer( + ctx, + logger, + db, + mockDA, + []byte("test"), + time.Second, + true, + 2, // Very small limit for testing + mockRetriever, + gen, + ) + require.NoError(t, err) // Test successful batch submission within limit batch1 := createTestBatch(t, 3) @@ -939,3 +963,103 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { // 4. ✅ Continues to throttle when queue fills up again // 5. ✅ Provides backpressure to prevent resource exhaustion } + +func TestSequencer_CheckpointPersistence_CrashRecovery(t *testing.T) { + ctx := context.Background() + logger := zerolog.New(zerolog.NewConsoleWriter()) + + db := ds.NewMapDatastore() + defer db.Close() + + // Create forced inclusion txs at DA height 100 + mockFI := &MockForcedInclusionRetriever{} + forcedTx1 := make([]byte, 100) + forcedTx2 := make([]byte, 80) + forcedTx3 := make([]byte, 90) + mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{forcedTx1, forcedTx2, forcedTx3}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil) + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + } + + // Create first sequencer instance + seq1, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // First call - get first forced tx + getReq := coresequencer.GetNextBatchRequest{ + Id: []byte("test-chain"), + MaxBytes: 120, + LastBatchData: nil, + } + + resp1, err := seq1.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp1.Batch) + assert.Equal(t, 1, len(resp1.Batch.Transactions), "Should get first forced tx") + assert.Equal(t, 100, len(resp1.Batch.Transactions[0])) + + // Verify checkpoint is persisted + assert.Equal(t, uint64(1), seq1.checkpoint.TxIndex, "Checkpoint should show 1 tx consumed") + assert.Equal(t, uint64(100), seq1.checkpoint.DAHeight, "Checkpoint should be at DA height 100") + + // Second call - get second forced tx + resp2, err := seq1.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp2.Batch) + assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should get second forced tx") + assert.Equal(t, 80, len(resp2.Batch.Transactions[0])) + + // Verify checkpoint updated + assert.Equal(t, uint64(2), seq1.checkpoint.TxIndex, "Checkpoint should show 2 txs consumed") + + // SIMULATE CRASH: Create new sequencer instance with same DB + // This simulates a node restart/crash + seq2, err := NewSequencer( + ctx, + logger, + db, + nil, + []byte("test-chain"), + 1*time.Second, + true, + 100, + mockFI, + gen, + ) + require.NoError(t, err) + + // Verify checkpoint was loaded from disk + assert.Equal(t, uint64(2), seq2.checkpoint.TxIndex, "Checkpoint should be loaded from disk") + assert.Equal(t, uint64(100), seq2.checkpoint.DAHeight, "DA height should be loaded from disk") + + // Third call on new sequencer instance - should get third forced tx (NOT re-execute first two) + resp3, err := seq2.GetNextBatch(ctx, getReq) + require.NoError(t, err) + require.NotNil(t, resp3.Batch) + assert.Equal(t, 1, len(resp3.Batch.Transactions), "Should get third forced tx (resume from checkpoint)") + assert.Equal(t, 90, len(resp3.Batch.Transactions[0]), "Should be third tx, not first") + + // Verify checkpoint moved to next DA height after consuming all + assert.Equal(t, uint64(101), seq2.checkpoint.DAHeight, "Should have moved to next DA height") + assert.Equal(t, uint64(0), seq2.checkpoint.TxIndex, "TxIndex should be reset") + + t.Log("✅ Checkpoint system successfully prevented re-execution of DA transactions after crash") + mockFI.AssertExpectations(t) +} From ea3fc6c0a6a6c7ff5eb77f834b2a6f5c0dc2bab8 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 12:40:11 +0100 Subject: [PATCH 10/19] check against max bytes direclty --- sequencers/based/sequencer.go | 9 +++-- sequencers/common/size_validation.go | 7 ---- sequencers/common/size_validation_test.go | 49 ----------------------- sequencers/single/sequencer.go | 9 +++-- 4 files changed, 10 insertions(+), 64 deletions(-) delete mode 100644 sequencers/common/size_validation_test.go diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index bbade78082..e4ade6f2e5 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -94,7 +94,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get // fetch the next DA epoch daHeight := s.GetDAHeight() if len(s.currentBatchTxs) == 0 || s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { - daEndHeight, err := s.fetchNextDAEpoch(ctx) + daEndHeight, err := s.fetchNextDAEpoch(ctx, req.MaxBytes) if err != nil { return nil, err } @@ -135,7 +135,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get } // fetchNextDAEpoch fetches transactions from the next DA epoch -func (s *BasedSequencer) fetchNextDAEpoch(ctx context.Context) (uint64, error) { +func (s *BasedSequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) (uint64, error) { currentDAHeight := s.checkpoint.DAHeight s.logger.Debug(). @@ -165,11 +165,12 @@ func (s *BasedSequencer) fetchNextDAEpoch(ctx context.Context) (uint64, error) { skippedTxs := 0 for _, tx := range forcedTxsEvent.Txs { // Validate blob size against absolute maximum - if !seqcommon.ValidateBlobSize(tx) { + if uint64(len(tx)) > maxBytes { s.logger.Warn(). Uint64("da_height", forcedTxsEvent.StartDaHeight). Int("blob_size", len(tx)). - Msg("forced inclusion blob exceeds absolute maximum size - skipping") + Uint64("max_bytes", maxBytes). + Msg("forced inclusion blob exceeds maximum size - skipping") skippedTxs++ continue } diff --git a/sequencers/common/size_validation.go b/sequencers/common/size_validation.go index 11dbc5ae36..a88206e280 100644 --- a/sequencers/common/size_validation.go +++ b/sequencers/common/size_validation.go @@ -7,10 +7,3 @@ const ( // Blobs exceeding this size are invalid and should be rejected permanently. AbsoluteMaxBlobSize = 2 * 1024 * 1024 // 2MB ) - -// ValidateBlobSize checks if a single blob exceeds the absolute maximum allowed size. -// This checks against the DA layer limit, not the per-batch limit. -// Returns true if the blob is within the absolute size limit, false otherwise. -func ValidateBlobSize(blob []byte) bool { - return uint64(len(blob)) <= AbsoluteMaxBlobSize -} diff --git a/sequencers/common/size_validation_test.go b/sequencers/common/size_validation_test.go deleted file mode 100644 index 67ebed4c0c..0000000000 --- a/sequencers/common/size_validation_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package common - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestValidateBlobSize(t *testing.T) { - tests := []struct { - name string - blobSize int - want bool - }{ - { - name: "empty blob", - blobSize: 0, - want: true, - }, - { - name: "small blob", - blobSize: 100, - want: true, - }, - { - name: "exactly at limit", - blobSize: int(AbsoluteMaxBlobSize), - want: true, - }, - { - name: "one byte over limit", - blobSize: int(AbsoluteMaxBlobSize) + 1, - want: false, - }, - { - name: "far exceeds limit", - blobSize: int(AbsoluteMaxBlobSize) * 2, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - blob := make([]byte, tt.blobSize) - got := ValidateBlobSize(blob) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index 5b1e1e0498..f2c8c11ab2 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -146,7 +146,7 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB // fetch the next DA epoch daHeight := c.GetDAHeight() if len(c.cachedForcedInclusionTxs) == 0 || c.checkpoint.TxIndex >= uint64(len(c.cachedForcedInclusionTxs)) { - daEndHeight, err := c.fetchNextDAEpoch(ctx) + daEndHeight, err := c.fetchNextDAEpoch(ctx, req.MaxBytes) if err != nil { return nil, err } @@ -296,7 +296,7 @@ func (c *Sequencer) GetDAHeight() uint64 { } // fetchNextDAEpoch fetches transactions from the next DA epoch using checkpoint -func (c *Sequencer) fetchNextDAEpoch(ctx context.Context) (uint64, error) { +func (c *Sequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) (uint64, error) { currentDAHeight := c.checkpoint.DAHeight c.logger.Debug(). @@ -324,11 +324,12 @@ func (c *Sequencer) fetchNextDAEpoch(ctx context.Context) (uint64, error) { validTxs := make([][]byte, 0, len(forcedTxsEvent.Txs)) skippedTxs := 0 for _, tx := range forcedTxsEvent.Txs { - if !seqcommon.ValidateBlobSize(tx) { + if uint64(len(tx)) > maxBytes { c.logger.Warn(). Uint64("da_height", forcedTxsEvent.StartDaHeight). Int("blob_size", len(tx)). - Msg("forced inclusion blob exceeds absolute maximum size - skipping") + Uint64("max_bytes", maxBytes). + Msg("forced inclusion blob exceeds maximum size - skipping") skippedTxs++ continue } From 840568da2bad916a9e5762b9606534d14c6efa83 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 12:51:28 +0100 Subject: [PATCH 11/19] chore: update comments --- sequencers/based/sequencer.go | 2 +- sequencers/single/sequencer.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index e4ade6f2e5..62be5265e5 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -110,7 +110,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get if txCount > 0 { s.checkpoint.TxIndex += txCount - // If we've consumed all transactions from this DA block, move to next + // If we've consumed all transactions from this DA epoch, move to next if s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { s.checkpoint.DAHeight = daHeight + 1 s.checkpoint.TxIndex = 0 diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index f2c8c11ab2..e568c726a9 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -167,7 +167,7 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB if len(forcedTxs) > 0 { c.checkpoint.TxIndex += uint64(len(forcedTxs)) - // If we've consumed all transactions from cache, move to next DA height + // If we've consumed all transactions from this DA epoch, move to next if c.checkpoint.TxIndex >= uint64(len(c.cachedForcedInclusionTxs)) { c.checkpoint.DAHeight = daHeight + 1 c.checkpoint.TxIndex = 0 From cdadf8080122551982320c96cd8952c48e99a67c Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 13:22:17 +0100 Subject: [PATCH 12/19] test: fix error check --- sequencers/based/sequencer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index af65d45fbb..bb214b363d 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -233,7 +233,7 @@ func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { _, err = seq.GetNextBatch(context.Background(), req) require.Error(t, err) - assert.Contains(t, err.Error(), "forced inclusion not configured") + assert.Contains(t, err.Error(), "forced inclusion namespace not configured") } func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { From c761301533af8fac8fd79ac0f2ef17a2a34729bc Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 13:33:03 +0100 Subject: [PATCH 13/19] test: properly test tx precedence --- sequencers/single/sequencer_test.go | 21 ++++++++++----------- types/epoch_test.go | 7 +++++++ 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 63e3ff3bf8..a4d8ca40fd 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -624,10 +624,10 @@ func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) mockFI := &MockForcedInclusionRetriever{} - // First call returns a large forced tx that gets deferred - largeForcedTx := make([]byte, 150) + // First call returns a large forced tx that will get evicted + largeForcedTx1, largeForcedTx2 := make([]byte, 75), make([]byte, 75) mockFI.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ - Txs: [][]byte{largeForcedTx}, + Txs: [][]byte{largeForcedTx1, largeForcedTx2}, StartDaHeight: 100, EndDaHeight: 100, }, nil).Once() @@ -671,23 +671,22 @@ func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) require.NoError(t, err) // First call with maxBytes = 100 - // Large forced tx (150 bytes) won't fit, gets deferred - // Batch tx (50 bytes) should be returned getReq := coresequencer.GetNextBatchRequest{ Id: []byte("test-chain"), - MaxBytes: 100, + MaxBytes: 125, LastBatchData: nil, } resp, err := seq.GetNextBatch(ctx, getReq) require.NoError(t, err) require.NotNil(t, resp.Batch) - assert.Equal(t, 1, len(resp.Batch.Transactions), "Should have batch tx only") - assert.Equal(t, 50, len(resp.Batch.Transactions[0])) + assert.Equal(t, 2, len(resp.Batch.Transactions), "Should have 1 batch tx + 1 forced tx") + assert.Equal(t, 75, len(resp.Batch.Transactions[0])) // forced tx is 75 bytes + assert.Equal(t, 50, len(resp.Batch.Transactions[1])) // batch tx is 50 bytes // Verify checkpoint shows no forced tx was consumed (tx too large) - assert.Equal(t, uint64(0), seq.checkpoint.TxIndex, "No forced tx should be consumed yet") - assert.Equal(t, 1, len(seq.cachedForcedInclusionTxs), "Forced tx should still be cached") + assert.Equal(t, uint64(1), seq.checkpoint.TxIndex, "Only one forced tx should be consumed") + assert.Greater(t, len(seq.cachedForcedInclusionTxs), 1, "Remaining forced tx should still be cached") // Second call with larger maxBytes = 200 // Should process pending forced tx first @@ -701,7 +700,7 @@ func TestSequencer_GetNextBatch_AlwaysCheckPendingForcedInclusion(t *testing.T) require.NoError(t, err) require.NotNil(t, resp2.Batch) assert.Equal(t, 1, len(resp2.Batch.Transactions), "Should include pending forced tx") - assert.Equal(t, 150, len(resp2.Batch.Transactions[0])) + assert.Equal(t, 75, len(resp2.Batch.Transactions[0])) // Checkpoint should reflect that forced tx was consumed assert.Equal(t, uint64(101), seq.checkpoint.DAHeight, "Should have moved to next DA height") diff --git a/types/epoch_test.go b/types/epoch_test.go index 5787126186..c293bcd350 100644 --- a/types/epoch_test.go +++ b/types/epoch_test.go @@ -91,6 +91,13 @@ func TestCalculateEpochNumber(t *testing.T) { daHeight: 105, expectedEpoch: 6, }, + { + name: "epoch size 0", + daStartHeight: 100, + daEpochSize: 0, + daHeight: 105, + expectedEpoch: 1, + }, } for _, tt := range tests { From ee3a10bbeaac15f13184537bfb9954462c9dff52 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 5 Dec 2025 21:51:09 +0100 Subject: [PATCH 14/19] cleanups --- sequencers/based/sequencer.go | 24 +++++++++--------------- sequencers/single/sequencer.go | 28 ++++++++++++++-------------- 2 files changed, 23 insertions(+), 29 deletions(-) diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index 62be5265e5..2e10d15b3a 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -71,10 +71,14 @@ func NewBasedSequencer( } } else { bs.checkpoint = checkpoint - bs.logger.Info(). - Uint64("da_height", checkpoint.DAHeight). - Uint64("tx_index", checkpoint.TxIndex). - Msg("loaded based sequencer checkpoint from DB") + // If we had a non-zero tx index, we're resuming from a crash mid-block + // The transactions starting from that index are what we need + if checkpoint.TxIndex > 0 { + bs.logger.Debug(). + Uint64("tx_index", checkpoint.TxIndex). + Uint64("da_height", checkpoint.DAHeight). + Msg("resuming from checkpoint within DA epoch") + } } return bs, nil @@ -122,7 +126,6 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get // Persist checkpoint if err := s.checkpointStore.Save(ctx, s.checkpoint); err != nil { - s.logger.Error().Err(err).Msg("failed to save checkpoint") return nil, fmt.Errorf("failed to save checkpoint: %w", err) } } @@ -156,8 +159,7 @@ func (s *BasedSequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) Msg("DA height from future, waiting for DA to produce block") return currentDAHeight, nil } - s.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") - return currentDAHeight, err + return currentDAHeight, fmt.Errorf("failed to retrieve forced inclusion transactions: %w", err) } // Validate and filter transactions @@ -187,14 +189,6 @@ func (s *BasedSequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) // Cache the transactions for this DA epoch s.currentBatchTxs = validTxs - // If we had a non-zero tx index, we're resuming from a crash mid-block - // The transactions starting from that index are what we need - if s.checkpoint.TxIndex > 0 { - s.logger.Info(). - Uint64("tx_index", s.checkpoint.TxIndex). - Msg("resuming from checkpoint within DA epoch") - } - return forcedTxsEvent.EndDaHeight, nil } diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index e568c726a9..4e9c777386 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -4,6 +4,7 @@ package single import ( "bytes" "context" + "encoding/hex" "errors" "fmt" "sync/atomic" @@ -97,10 +98,14 @@ func NewSequencer( } } else { s.checkpoint = checkpoint - logger.Info(). - Uint64("da_height", checkpoint.DAHeight). - Uint64("tx_index", checkpoint.TxIndex). - Msg("loaded single sequencer checkpoint from DB") + // If we had a non-zero tx index, we're resuming from a crash mid-block + // The transactions starting from that index are what we need + if checkpoint.TxIndex > 0 { + s.logger.Debug(). + Uint64("tx_index", checkpoint.TxIndex). + Uint64("da_height", checkpoint.DAHeight). + Msg("resuming from checkpoint within DA epoch") + } } return s, nil @@ -179,7 +184,6 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB // Persist checkpoint if err := c.checkpointStore.Save(ctx, c.checkpoint); err != nil { - c.logger.Error().Err(err).Msg("failed to save checkpoint") return nil, fmt.Errorf("failed to save checkpoint: %w", err) } @@ -209,7 +213,10 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB // Would exceed limit, return remaining txs to the front of the queue excludedBatch := coresequencer.Batch{Transactions: batch.Transactions[i:]} if err := c.queue.Prepend(ctx, excludedBatch); err != nil { + // tx will be lost forever, but we shouldn't halt. + // halting doesn't not add any value. c.logger.Error().Err(err). + Str("tx", hex.EncodeToString(tx)). Int("excluded_count", len(batch.Transactions)-i). Msg("failed to prepend excluded transactions back to queue") } else { @@ -316,8 +323,8 @@ func (c *Sequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) (uint c.cachedForcedInclusionTxs = [][]byte{} return currentDAHeight, nil } - c.logger.Error().Err(err).Uint64("da_height", currentDAHeight).Msg("failed to retrieve forced inclusion transactions") - return currentDAHeight, err + + return currentDAHeight, fmt.Errorf("failed to retrieve forced inclusion transactions: %w", err) } // Validate and filter transactions @@ -346,13 +353,6 @@ func (c *Sequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) (uint // Cache the transactions c.cachedForcedInclusionTxs = validTxs - // If we had a non-zero tx index, we're resuming from a crash mid-processing - if c.checkpoint.TxIndex > 0 { - c.logger.Info(). - Uint64("tx_index", c.checkpoint.TxIndex). - Msg("resuming from checkpoint within forced inclusion batch") - } - return forcedTxsEvent.EndDaHeight, nil } From 3096c5e2655b85099bdad3be420d9ff9ab0600a0 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 8 Dec 2025 12:51:25 +0100 Subject: [PATCH 15/19] test: add empty da epoch test --- sequencers/single/sequencer_test.go | 86 +++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index a4d8ca40fd..22d02ef80a 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -1062,3 +1062,89 @@ func TestSequencer_CheckpointPersistence_CrashRecovery(t *testing.T) { t.Log("✅ Checkpoint system successfully prevented re-execution of DA transactions after crash") mockFI.AssertExpectations(t) } + +func TestSequencer_GetNextBatch_EmptyDABatch_IncreasesDAHeight(t *testing.T) { + db := ds.NewMapDatastore() + ctx := context.Background() + + mockRetriever := new(MockForcedInclusionRetriever) + + // First DA epoch returns empty transactions + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)). + Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 105, + }, nil).Once() + + // Second DA epoch also returns empty transactions + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(101)). + Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 106, + EndDaHeight: 111, + }, nil).Once() + + gen := genesis.Genesis{ + ChainID: "test", + DAStartHeight: 100, + DAEpochForcedInclusion: 5, + } + + seq, err := NewSequencer( + ctx, + zerolog.Nop(), + db, + nil, + []byte("test"), + 1*time.Second, + true, + 1000, + mockRetriever, + gen, + ) + require.NoError(t, err) + + defer func() { + err := db.Close() + if err != nil { + t.Fatalf("Failed to close sequencer: %v", err) + } + }() + + req := coresequencer.GetNextBatchRequest{ + Id: seq.Id, + MaxBytes: 1000000, + LastBatchData: nil, + } + + // Initial DA height should be 100 + assert.Equal(t, uint64(100), seq.GetDAHeight()) + assert.Equal(t, uint64(100), seq.checkpoint.DAHeight) + + // First batch - empty DA block at height 100 + resp, err := seq.GetNextBatch(ctx, req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + // DA height should have increased to 106 even though no transactions were processed + assert.Equal(t, uint64(106), seq.GetDAHeight()) + assert.Equal(t, uint64(106), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) + + // Second batch - empty DA block at height 106 + resp, err = seq.GetNextBatch(ctx, req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + // DA height should have increased to 112 + assert.Equal(t, uint64(112), seq.GetDAHeight()) + assert.Equal(t, uint64(112), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) + + mockRetriever.AssertExpectations(t) +} From 48ca2daf14112df7481e40140ff401c55dbe6091 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 8 Dec 2025 13:17:29 +0100 Subject: [PATCH 16/19] align testes --- sequencers/based/sequencer_test.go | 355 ++++++++++++----------------- 1 file changed, 145 insertions(+), 210 deletions(-) diff --git a/sequencers/based/sequencer_test.go b/sequencers/based/sequencer_test.go index bb214b363d..30a4bd6118 100644 --- a/sequencers/based/sequencer_test.go +++ b/sequencers/based/sequencer_test.go @@ -2,9 +2,7 @@ package based import ( "context" - "errors" "testing" - "time" ds "github.com/ipfs/go-datastore" syncds "github.com/ipfs/go-datastore/sync" @@ -16,98 +14,42 @@ import ( "github.com/evstack/ev-node/block" coreda "github.com/evstack/ev-node/core/da" coresequencer "github.com/evstack/ev-node/core/sequencer" - "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" ) -// MockDA is a mock implementation of DA for testing -type MockDA struct { +// MockForcedInclusionRetriever is a mock implementation of ForcedInclusionRetriever for testing +type MockForcedInclusionRetriever struct { mock.Mock } -func (m *MockDA) Submit(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte) ([][]byte, error) { - args := m.Called(ctx, blobs, gasPrice, namespace) +func (m *MockForcedInclusionRetriever) RetrieveForcedIncludedTxs(ctx context.Context, daHeight uint64) (*block.ForcedInclusionEvent, error) { + args := m.Called(ctx, daHeight) if args.Get(0) == nil { return nil, args.Error(1) } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) SubmitWithOptions(ctx context.Context, blobs [][]byte, gasPrice float64, namespace []byte, options []byte) ([][]byte, error) { - args := m.Called(ctx, blobs, gasPrice, namespace, options) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - args := m.Called(ctx, height, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).(*coreda.GetIDsResult), args.Error(1) -} - -func (m *MockDA) Get(ctx context.Context, ids [][]byte, namespace []byte) ([][]byte, error) { - args := m.Called(ctx, ids, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) -} - -func (m *MockDA) GetProofs(ctx context.Context, ids [][]byte, namespace []byte) ([]coreda.Proof, error) { - args := m.Called(ctx, ids, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([]coreda.Proof), args.Error(1) -} - -func (m *MockDA) Validate(ctx context.Context, ids [][]byte, proofs []coreda.Proof, namespace []byte) ([]bool, error) { - args := m.Called(ctx, ids, proofs, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([]bool), args.Error(1) -} - -func (m *MockDA) Commit(ctx context.Context, blobs [][]byte, namespace []byte) ([][]byte, error) { - args := m.Called(ctx, blobs, namespace) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).([][]byte), args.Error(1) + return args.Get(0).(*block.ForcedInclusionEvent), args.Error(1) } // createTestSequencer is a helper function to create a sequencer for testing -func createTestSequencer(t *testing.T, mockDA *MockDA, cfg config.Config, gen genesis.Genesis) *BasedSequencer { +func createTestSequencer(t *testing.T, mockRetriever *MockForcedInclusionRetriever, gen genesis.Genesis) *BasedSequencer { t.Helper() - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) // Create in-memory datastore db := syncds.MutexWrap(ds.NewMapDatastore()) - seq, err := NewBasedSequencer(context.Background(), fiRetriever, db, gen, zerolog.Nop()) + seq, err := NewBasedSequencer(context.Background(), mockRetriever, db, gen, zerolog.Nop()) require.NoError(t, err) return seq } func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { - mockDA := new(MockDA) + mockRetriever := new(MockForcedInclusionRetriever) gen := genesis.Genesis{ ChainID: "test-chain", DAEpochForcedInclusion: 10, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) // Submit should succeed but be ignored req := coresequencer.SubmitBatchTxsRequest{ @@ -128,12 +70,12 @@ func TestBasedSequencer_SubmitBatchTxs(t *testing.T) { func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { testBlobs := [][]byte{[]byte("tx1"), []byte("tx2")} - mockDA := new(MockDA) - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ - IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, - Timestamp: time.Now(), + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: testBlobs, + StartDaHeight: 100, + EndDaHeight: 100, }, nil) - mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) gen := genesis.Genesis{ ChainID: "test-chain", @@ -141,12 +83,7 @@ func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { DAEpochForcedInclusion: 1, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, @@ -165,14 +102,15 @@ func TestBasedSequencer_GetNextBatch_WithForcedTxs(t *testing.T) { assert.Equal(t, uint64(101), seq.checkpoint.DAHeight) assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) - mockDA.AssertExpectations(t) + mockRetriever.AssertExpectations(t) } func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { - mockDA := new(MockDA) - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ - IDs: []coreda.ID{}, - Timestamp: time.Now(), + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, }, nil) gen := genesis.Genesis{ @@ -181,12 +119,7 @@ func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { DAEpochForcedInclusion: 1, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, @@ -200,40 +133,32 @@ func TestBasedSequencer_GetNextBatch_EmptyDA(t *testing.T) { // Should return empty batch when DA has no transactions assert.Equal(t, 0, len(resp.Batch.Transactions)) - mockDA.AssertExpectations(t) + mockRetriever.AssertExpectations(t) } func TestBasedSequencer_GetNextBatch_NotConfigured(t *testing.T) { - mockDA := new(MockDA) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(nil, block.ErrForceInclusionNotConfigured) gen := genesis.Genesis{ ChainID: "test-chain", DAStartHeight: 100, - DAEpochForcedInclusion: 0, // Not configured + DAEpochForcedInclusion: 1, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "" // Empty to trigger not configured - - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - // Create in-memory datastore - db := syncds.MutexWrap(ds.NewMapDatastore()) - - seq, err := NewBasedSequencer(context.Background(), fiRetriever, db, gen, zerolog.Nop()) - require.NoError(t, err) + seq := createTestSequencer(t, mockRetriever, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, LastBatchData: nil, } - _, err = seq.GetNextBatch(context.Background(), req) + resp, err := seq.GetNextBatch(context.Background(), req) require.Error(t, err) - assert.Contains(t, err.Error(), "forced inclusion namespace not configured") + require.Nil(t, resp) + assert.ErrorIs(t, err, block.ErrForceInclusionNotConfigured) + + mockRetriever.AssertExpectations(t) } func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { @@ -243,12 +168,12 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { tx3 := make([]byte, 200) testBlobs := [][]byte{tx1, tx2, tx3} - mockDA := new(MockDA) - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ - IDs: []coreda.ID{[]byte("id1"), []byte("id2"), []byte("id3")}, - Timestamp: time.Now(), + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: testBlobs, + StartDaHeight: 100, + EndDaHeight: 100, }, nil) - mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) gen := genesis.Genesis{ ChainID: "test-chain", @@ -256,12 +181,7 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { DAEpochForcedInclusion: 1, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) // First call with MaxBytes that fits only first 2 transactions req := coresequencer.GetNextBatchRequest{ @@ -295,27 +215,27 @@ func TestBasedSequencer_GetNextBatch_WithMaxBytes(t *testing.T) { assert.Equal(t, uint64(101), seq.checkpoint.DAHeight) assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) - mockDA.AssertExpectations(t) + mockRetriever.AssertExpectations(t) } func TestBasedSequencer_GetNextBatch_MultipleDABlocks(t *testing.T) { testBlobs1 := [][]byte{[]byte("tx1"), []byte("tx2")} testBlobs2 := [][]byte{[]byte("tx3"), []byte("tx4")} - mockDA := new(MockDA) + mockRetriever := new(MockForcedInclusionRetriever) // First DA block - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ - IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, - Timestamp: time.Now(), + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: testBlobs1, + StartDaHeight: 100, + EndDaHeight: 100, }, nil).Once() - mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs1, nil).Once() // Second DA block - mockDA.On("GetIDs", mock.Anything, uint64(101), mock.Anything).Return(&coreda.GetIDsResult{ - IDs: []coreda.ID{[]byte("id3"), []byte("id4")}, - Timestamp: time.Now(), + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(101)).Return(&block.ForcedInclusionEvent{ + Txs: testBlobs2, + StartDaHeight: 101, + EndDaHeight: 101, }, nil).Once() - mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs2, nil).Once() gen := genesis.Genesis{ ChainID: "test-chain", @@ -323,12 +243,7 @@ func TestBasedSequencer_GetNextBatch_MultipleDABlocks(t *testing.T) { DAEpochForcedInclusion: 1, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, @@ -353,14 +268,12 @@ func TestBasedSequencer_GetNextBatch_MultipleDABlocks(t *testing.T) { assert.Equal(t, []byte("tx4"), resp.Batch.Transactions[1]) assert.Equal(t, uint64(102), seq.checkpoint.DAHeight) - mockDA.AssertExpectations(t) + mockRetriever.AssertExpectations(t) } func TestBasedSequencer_GetNextBatch_ResumesFromCheckpoint(t *testing.T) { testBlobs := [][]byte{[]byte("tx1"), []byte("tx2"), []byte("tx3")} - - mockDA := new(MockDA) - // No DA calls expected since we manually set the state + mockRetriever := new(MockForcedInclusionRetriever) gen := genesis.Genesis{ ChainID: "test-chain", @@ -368,12 +281,7 @@ func TestBasedSequencer_GetNextBatch_ResumesFromCheckpoint(t *testing.T) { DAEpochForcedInclusion: 1, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) // Simulate processing first transaction (resuming from checkpoint after restart) seq.checkpoint.DAHeight = 100 @@ -399,16 +307,16 @@ func TestBasedSequencer_GetNextBatch_ResumesFromCheckpoint(t *testing.T) { } func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T) { - // Create a transaction larger than max bytes - largeTx := make([]byte, 2000000) // 2MB + // Create a transaction that exceeds maxBytes + largeTx := make([]byte, 2000) testBlobs := [][]byte{largeTx} - mockDA := new(MockDA) - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ - IDs: []coreda.ID{[]byte("id1")}, - Timestamp: time.Now(), + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: testBlobs, + StartDaHeight: 100, + EndDaHeight: 100, }, nil) - mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) gen := genesis.Genesis{ ChainID: "test-chain", @@ -416,12 +324,7 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T DAEpochForcedInclusion: 1, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000, // Much smaller than the transaction @@ -435,22 +338,17 @@ func TestBasedSequencer_GetNextBatch_ForcedInclusionExceedsMaxBytes(t *testing.T // Should return empty batch since transaction exceeds max bytes assert.Equal(t, 0, len(resp.Batch.Transactions)) - mockDA.AssertExpectations(t) + mockRetriever.AssertExpectations(t) } func TestBasedSequencer_VerifyBatch(t *testing.T) { - mockDA := new(MockDA) + mockRetriever := new(MockForcedInclusionRetriever) gen := genesis.Genesis{ ChainID: "test-chain", DAEpochForcedInclusion: 10, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) req := coresequencer.VerifyBatchRequest{ Id: []byte("test-chain"), @@ -465,19 +363,14 @@ func TestBasedSequencer_VerifyBatch(t *testing.T) { } func TestBasedSequencer_SetDAHeight(t *testing.T) { - mockDA := new(MockDA) + mockRetriever := new(MockForcedInclusionRetriever) gen := genesis.Genesis{ ChainID: "test-chain", DAStartHeight: 100, DAEpochForcedInclusion: 10, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) // Initial height from genesis assert.Equal(t, uint64(100), seq.GetDAHeight()) @@ -488,8 +381,8 @@ func TestBasedSequencer_SetDAHeight(t *testing.T) { } func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { - mockDA := new(MockDA) - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, errors.New("DA connection error")) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(nil, block.ErrForceInclusionNotConfigured) gen := genesis.Genesis{ ChainID: "test-chain", @@ -497,30 +390,24 @@ func TestBasedSequencer_GetNextBatch_ErrorHandling(t *testing.T) { DAEpochForcedInclusion: 1, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, LastBatchData: nil, } - // DA errors are handled gracefully by returning empty batch and retrying resp, err := seq.GetNextBatch(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, resp) - assert.Equal(t, 0, len(resp.Batch.Transactions), "Should return empty batch on DA error") + require.Error(t, err) + require.Nil(t, resp) + assert.ErrorIs(t, err, block.ErrForceInclusionNotConfigured) - mockDA.AssertExpectations(t) + mockRetriever.AssertExpectations(t) } func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { - mockDA := new(MockDA) - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(nil, coreda.ErrHeightFromFuture) + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(nil, coreda.ErrHeightFromFuture) gen := genesis.Genesis{ ChainID: "test-chain", @@ -528,12 +415,7 @@ func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { DAEpochForcedInclusion: 1, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - - seq := createTestSequencer(t, mockDA, cfg, gen) + seq := createTestSequencer(t, mockRetriever, gen) req := coresequencer.GetNextBatchRequest{ MaxBytes: 1000000, @@ -549,18 +431,18 @@ func TestBasedSequencer_GetNextBatch_HeightFromFuture(t *testing.T) { // DA height should stay the same assert.Equal(t, uint64(100), seq.checkpoint.DAHeight) - mockDA.AssertExpectations(t) + mockRetriever.AssertExpectations(t) } func TestBasedSequencer_CheckpointPersistence(t *testing.T) { testBlobs := [][]byte{[]byte("tx1"), []byte("tx2")} - mockDA := new(MockDA) - mockDA.On("GetIDs", mock.Anything, uint64(100), mock.Anything).Return(&coreda.GetIDsResult{ - IDs: []coreda.ID{[]byte("id1"), []byte("id2")}, - Timestamp: time.Now(), + mockRetriever := new(MockForcedInclusionRetriever) + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: testBlobs, + StartDaHeight: 100, + EndDaHeight: 100, }, nil) - mockDA.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(testBlobs, nil) gen := genesis.Genesis{ ChainID: "test-chain", @@ -568,19 +450,11 @@ func TestBasedSequencer_CheckpointPersistence(t *testing.T) { DAEpochForcedInclusion: 1, } - cfg := config.DefaultConfig() - cfg.DA.Namespace = "test-ns" - cfg.DA.DataNamespace = "test-data-ns" - cfg.DA.ForcedInclusionNamespace = "test-fi-ns" - // Create persistent datastore db := syncds.MutexWrap(ds.NewMapDatastore()) // Create first sequencer - daClient := block.NewDAClient(mockDA, cfg, zerolog.Nop()) - fiRetriever := block.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) - - seq1, err := NewBasedSequencer(context.Background(), fiRetriever, db, gen, zerolog.Nop()) + seq1, err := NewBasedSequencer(context.Background(), mockRetriever, db, gen, zerolog.Nop()) require.NoError(t, err) req := coresequencer.GetNextBatchRequest{ @@ -595,12 +469,73 @@ func TestBasedSequencer_CheckpointPersistence(t *testing.T) { assert.Equal(t, 2, len(resp.Batch.Transactions)) // Create a new sequencer with the same datastore (simulating restart) - seq2, err := NewBasedSequencer(context.Background(), fiRetriever, db, gen, zerolog.Nop()) + seq2, err := NewBasedSequencer(context.Background(), mockRetriever, db, gen, zerolog.Nop()) require.NoError(t, err) // Checkpoint should be loaded from DB assert.Equal(t, uint64(101), seq2.checkpoint.DAHeight) assert.Equal(t, uint64(0), seq2.checkpoint.TxIndex) - mockDA.AssertExpectations(t) + mockRetriever.AssertExpectations(t) +} + +func TestBasedSequencer_GetNextBatch_EmptyDABatch_IncreasesDAHeight(t *testing.T) { + mockRetriever := new(MockForcedInclusionRetriever) + + // First DA block returns empty transactions + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(100)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 100, + EndDaHeight: 100, + }, nil).Once() + + // Second DA block also returns empty transactions + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(101)).Return(&block.ForcedInclusionEvent{ + Txs: [][]byte{}, + StartDaHeight: 101, + EndDaHeight: 101, + }, nil).Once() + + gen := genesis.Genesis{ + ChainID: "test-chain", + DAStartHeight: 100, + DAEpochForcedInclusion: 1, + } + + seq := createTestSequencer(t, mockRetriever, gen) + + req := coresequencer.GetNextBatchRequest{ + MaxBytes: 1000000, + LastBatchData: nil, + } + + // Initial DA height should be 100 + assert.Equal(t, uint64(100), seq.GetDAHeight()) + assert.Equal(t, uint64(100), seq.checkpoint.DAHeight) + + // First batch - empty DA block at height 100 + resp, err := seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + // DA height should have increased to 101 even though no transactions were processed + assert.Equal(t, uint64(101), seq.GetDAHeight()) + assert.Equal(t, uint64(101), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) + + // Second batch - empty DA block at height 101 + resp, err = seq.GetNextBatch(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Batch) + assert.Equal(t, 0, len(resp.Batch.Transactions)) + + // DA height should have increased to 102 + assert.Equal(t, uint64(102), seq.GetDAHeight()) + assert.Equal(t, uint64(102), seq.checkpoint.DAHeight) + assert.Equal(t, uint64(0), seq.checkpoint.TxIndex) + + mockRetriever.AssertExpectations(t) } From ff4727a9b66cf09a4ab4068ce569187a64d3f872 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 8 Dec 2025 15:35:26 +0100 Subject: [PATCH 17/19] fix da increasing --- sequencers/based/sequencer.go | 11 +++++------ sequencers/single/sequencer.go | 8 ++++---- sequencers/single/sequencer_test.go | 2 +- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index 2e10d15b3a..5b849b8a39 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -110,9 +110,8 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get batch := s.createBatchFromCheckpoint(req.MaxBytes) // Update checkpoint with how many transactions we consumed - txCount := uint64(len(batch.Transactions)) - if txCount > 0 { - s.checkpoint.TxIndex += txCount + if daHeight > 0 { + s.checkpoint.TxIndex += uint64(len(batch.Transactions)) // If we've consumed all transactions from this DA epoch, move to next if s.checkpoint.TxIndex >= uint64(len(s.currentBatchTxs)) { @@ -150,16 +149,16 @@ func (s *BasedSequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) if err != nil { // Check if forced inclusion is not configured if errors.Is(err, block.ErrForceInclusionNotConfigured) { - return currentDAHeight, block.ErrForceInclusionNotConfigured + return 0, block.ErrForceInclusionNotConfigured } else if errors.Is(err, coreda.ErrHeightFromFuture) { // If we get a height from future error, stay at current position // We'll retry the same height on the next call until DA produces that block s.logger.Debug(). Uint64("da_height", currentDAHeight). Msg("DA height from future, waiting for DA to produce block") - return currentDAHeight, nil + return 0, nil } - return currentDAHeight, fmt.Errorf("failed to retrieve forced inclusion transactions: %w", err) + return 0, fmt.Errorf("failed to retrieve forced inclusion transactions: %w", err) } // Validate and filter transactions diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index 4e9c777386..f169a5bb18 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -169,7 +169,7 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB } // Update checkpoint after consuming forced inclusion transactions - if len(forcedTxs) > 0 { + if daHeight > 0 { c.checkpoint.TxIndex += uint64(len(forcedTxs)) // If we've consumed all transactions from this DA epoch, move to next @@ -317,14 +317,14 @@ func (c *Sequencer) fetchNextDAEpoch(ctx context.Context, maxBytes uint64) (uint c.logger.Debug(). Uint64("da_height", currentDAHeight). Msg("DA height from future, waiting for DA to produce block") - return currentDAHeight, nil + return 0, nil } else if errors.Is(err, block.ErrForceInclusionNotConfigured) { // Forced inclusion not configured, continue without forced txs c.cachedForcedInclusionTxs = [][]byte{} - return currentDAHeight, nil + return 0, nil } - return currentDAHeight, fmt.Errorf("failed to retrieve forced inclusion transactions: %w", err) + return 0, fmt.Errorf("failed to retrieve forced inclusion transactions: %w", err) } // Validate and filter transactions diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 22d02ef80a..6124642acb 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -1078,7 +1078,7 @@ func TestSequencer_GetNextBatch_EmptyDABatch_IncreasesDAHeight(t *testing.T) { }, nil).Once() // Second DA epoch also returns empty transactions - mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(101)). + mockRetriever.On("RetrieveForcedIncludedTxs", mock.Anything, uint64(106)). Return(&block.ForcedInclusionEvent{ Txs: [][]byte{}, StartDaHeight: 106, From 9d7601c1a53dbe92b3d75c2afb2d876f8baa23a0 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 8 Dec 2025 15:37:02 +0100 Subject: [PATCH 18/19] allow other processing --- sequencers/based/sequencer.go | 2 +- sequencers/single/sequencer.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sequencers/based/sequencer.go b/sequencers/based/sequencer.go index 5b849b8a39..8845183e64 100644 --- a/sequencers/based/sequencer.go +++ b/sequencers/based/sequencer.go @@ -110,7 +110,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get batch := s.createBatchFromCheckpoint(req.MaxBytes) // Update checkpoint with how many transactions we consumed - if daHeight > 0 { + if daHeight > 0 || len(batch.Transactions) > 0 { s.checkpoint.TxIndex += uint64(len(batch.Transactions)) // If we've consumed all transactions from this DA epoch, move to next diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index f169a5bb18..8b114e1e06 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -169,7 +169,7 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB } // Update checkpoint after consuming forced inclusion transactions - if daHeight > 0 { + if daHeight > 0 || len(forcedTxs) > 0 { c.checkpoint.TxIndex += uint64(len(forcedTxs)) // If we've consumed all transactions from this DA epoch, move to next From feb309b97b72ccb2b30df06f1343483519ded3fd Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 8 Dec 2025 15:37:37 +0100 Subject: [PATCH 19/19] chore: revert autodoc change --- docs/guides/migrating-to-ev-abci.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/guides/migrating-to-ev-abci.md b/docs/guides/migrating-to-ev-abci.md index eb6abcd9e0..f49ba6df6f 100644 --- a/docs/guides/migrating-to-ev-abci.md +++ b/docs/guides/migrating-to-ev-abci.md @@ -41,9 +41,9 @@ import ( ) ``` -1. Add the migration manager keeper to your app struct -2. Register the module in your module manager -3. Configure the migration manager in your app initialization +2. Add the migration manager keeper to your app struct +3. Register the module in your module manager +4. Configure the migration manager in your app initialization ### Step 2: Replace Staking Module with Wrapper