From a680309bc14da336694be964fb70fb7e94739236 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 9 Dec 2025 15:49:14 +0100 Subject: [PATCH 1/7] feat(syncing): add grace period for missing force txs inclusion --- block/internal/common/metrics.go | 23 ++ block/internal/syncing/syncer.go | 42 ++- .../syncing/syncer_forced_inclusion_test.go | 138 ++++++++- .../adr/adr-019-forced-inclusion-mechanism.md | 279 +++++++++++------- pkg/genesis/genesis.go | 23 +- 5 files changed, 381 insertions(+), 124 deletions(-) diff --git a/block/internal/common/metrics.go b/block/internal/common/metrics.go index 2a4c821774..13a0419432 100644 --- a/block/internal/common/metrics.go +++ b/block/internal/common/metrics.go @@ -65,6 +65,10 @@ type Metrics struct { DAInclusionHeight metrics.Gauge PendingHeadersCount metrics.Gauge PendingDataCount metrics.Gauge + + // Forced inclusion metrics + ForcedInclusionTxsInGracePeriod metrics.Gauge // Number of forced inclusion txs currently in grace period + ForcedInclusionTxsMalicious metrics.Counter // Total number of forced inclusion txs marked as malicious } // PrometheusMetrics returns Metrics built using Prometheus client library @@ -182,6 +186,21 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Help: "Number of data blocks pending DA submission", }, labels).With(labelsAndValues...) + // Forced inclusion metrics + m.ForcedInclusionTxsInGracePeriod = prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "forced_inclusion_txs_in_grace_period", + Help: "Number of forced inclusion transactions currently in grace period (past epoch end but within grace boundary)", + }, labels).With(labelsAndValues...) + + m.ForcedInclusionTxsMalicious = prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "forced_inclusion_txs_malicious_total", + Help: "Total number of forced inclusion transactions marked as malicious (past grace boundary)", + }, labels).With(labelsAndValues...) + // DA Submitter metrics m.DASubmitterPendingBlobs = prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, @@ -246,6 +265,10 @@ func NopMetrics() *Metrics { DASubmitterLastFailure: make(map[DASubmitterFailureReason]metrics.Gauge), DASubmitterPendingBlobs: discard.NewGauge(), DASubmitterResends: discard.NewCounter(), + + // Forced inclusion metrics + ForcedInclusionTxsInGracePeriod: discard.NewGauge(), + ForcedInclusionTxsMalicious: discard.NewCounter(), } // Initialize maps with no-op metrics diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 2c45fd1637..90fbaf55bc 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -741,16 +741,44 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. } // Check if we've moved past any epoch boundaries with pending txs + // Grace period: Allow forced inclusion txs from epoch N to be included in epoch N+1, N+2, etc. + // Only flag as malicious if past grace boundary to prevent false positives during DA unavailability. var maliciousTxs, remainingPending []pendingForcedInclusionTx + var txsInGracePeriod int for _, pending := range stillPending { - // If current DA height is past this epoch's end, these txs should have been included - if currentState.DAHeight > pending.EpochEnd { + // Calculate grace boundary: epoch end + (grace periods × epoch size) + graceBoundary := pending.EpochEnd + (s.genesis.ForcedInclusionGracePeriod * s.genesis.DAEpochForcedInclusion) + + // If current DA height is past the grace boundary, these txs should have been included + if currentState.DAHeight > graceBoundary { maliciousTxs = append(maliciousTxs, pending) + s.logger.Warn(). + Uint64("current_da_height", currentState.DAHeight). + Uint64("epoch_end", pending.EpochEnd). + Uint64("grace_boundary", graceBoundary). + Uint64("grace_periods", s.genesis.ForcedInclusionGracePeriod). + Str("tx_hash", pending.TxHash[:16]). + Msg("forced inclusion transaction past grace boundary - marking as malicious") } else { remainingPending = append(remainingPending, pending) + + // Track if we're in the grace period (past epoch end but within grace boundary) + if currentState.DAHeight > pending.EpochEnd { + txsInGracePeriod++ + s.logger.Info(). + Uint64("current_da_height", currentState.DAHeight). + Uint64("epoch_end", pending.EpochEnd). + Uint64("grace_boundary", graceBoundary). + Uint64("grace_periods", s.genesis.ForcedInclusionGracePeriod). + Str("tx_hash", pending.TxHash[:16]). + Msg("forced inclusion transaction in grace period - not yet malicious") + } } } + // Update metrics for grace period tracking + s.metrics.ForcedInclusionTxsInGracePeriod.Set(float64(txsInGracePeriod)) + // Update pending map - clear old entries and store only remaining pending s.pendingForcedInclusionTxs.Range(func(key, value any) bool { s.pendingForcedInclusionTxs.Delete(key) @@ -760,14 +788,18 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. s.pendingForcedInclusionTxs.Store(pending.TxHash, pending) } - // If there are transactions from past epochs that weren't included, sequencer is malicious + // If there are transactions past grace boundary that weren't included, sequencer is malicious if len(maliciousTxs) > 0 { + // Update metrics for malicious detection + s.metrics.ForcedInclusionTxsMalicious.Add(float64(len(maliciousTxs))) + s.logger.Error(). Uint64("height", data.Height()). Uint64("current_da_height", currentState.DAHeight). Int("malicious_count", len(maliciousTxs)). - Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions from past epoch(s) not included") - return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions from past epoch(s) not included", len(maliciousTxs))) + Uint64("grace_periods", s.genesis.ForcedInclusionGracePeriod). + Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions past grace boundary not included") + return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions past grace boundary (grace_periods=%d) not included", len(maliciousTxs), s.genesis.ForcedInclusionGracePeriod)) } // Log current state diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 741432eb28..432450b7a0 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -206,7 +206,7 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { err = s.verifyForcedInclusionTxs(currentState, data2) require.Error(t, err) require.Contains(t, err.Error(), "sequencer is malicious") - require.Contains(t, err.Error(), "forced inclusion transactions from past epoch(s) not included") + require.Contains(t, err.Error(), "past grace boundary") } func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { @@ -309,7 +309,7 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { err = s.verifyForcedInclusionTxs(currentState, data2) require.Error(t, err) require.Contains(t, err.Error(), "sequencer is malicious") - require.Contains(t, err.Error(), "forced inclusion transactions from past epoch(s) not included") + require.Contains(t, err.Error(), "past grace boundary") } func TestVerifyForcedInclusionTxs_NoForcedTransactions(t *testing.T) { @@ -759,5 +759,137 @@ func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { err = s.verifyForcedInclusionTxs(currentState, data3) require.Error(t, err) require.Contains(t, err.Error(), "sequencer is malicious") - require.Contains(t, err.Error(), "forced inclusion transactions from past epoch(s) not included") + require.Contains(t, err.Error(), "past grace boundary") +} + +// TestVerifyForcedInclusionTxs_SmoothingExceedsEpoch tests the critical scenario where +// forced inclusion transactions cannot all be included before an epoch ends. +// This demonstrates that the system correctly detects malicious behavior when +// transactions remain pending after the epoch boundary. +func TestVerifyForcedInclusionTxs_SmoothingExceedsEpoch(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "tchain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + DAStartHeight: 100, + DAEpochForcedInclusion: 3, // Epoch: [100, 102] + } + + cfg := config.DefaultConfig() + cfg.DA.ForcedInclusionNamespace = "nsForcedInclusion" + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain"). + Return([]byte("app0"), uint64(1024), nil).Once() + + mockDA := testmocks.NewMockDA(t) + + daClient := da.NewClient(da.Config{ + DA: mockDA, + Logger: zerolog.Nop(), + Namespace: cfg.DA.Namespace, + DataNamespace: cfg.DA.DataNamespace, + ForcedInclusionNamespace: cfg.DA.ForcedInclusionNamespace, + }) + daRetriever := NewDARetriever(daClient, cm, gen, zerolog.Nop()) + fiRetriever := da.NewForcedInclusionRetriever(daClient, gen, zerolog.Nop()) + + s := NewSyncer( + st, + mockExec, + daClient, + cm, + common.NopMetrics(), + cfg, + gen, + common.NewMockBroadcaster[*types.SignedHeader](t), + common.NewMockBroadcaster[*types.Data](t), + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + ) + s.daRetriever = daRetriever + s.fiRetriever = fiRetriever + + require.NoError(t, s.initializeState()) + s.ctx = context.Background() + + namespaceForcedInclusionBz := coreda.NamespaceFromString(cfg.DA.GetForcedInclusionNamespace()).Bytes() + + // Create 3 forced inclusion transactions + dataBin1, _ := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 2) + dataBin2, _ := makeSignedDataBytes(t, gen.ChainID, 11, addr, pub, signer, 2) + dataBin3, _ := makeSignedDataBytes(t, gen.ChainID, 12, addr, pub, signer, 2) + + // Mock DA retrieval for Epoch 1: [100, 102] + mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{ + IDs: [][]byte{[]byte("fi1"), []byte("fi2"), []byte("fi3")}, + Timestamp: time.Now(), + }, nil).Once() + + mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return([][]byte{dataBin1, dataBin2, dataBin3}, nil).Once() + + for height := uint64(101); height <= 102; height++ { + mockDA.EXPECT().GetIDs(mock.Anything, height, mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + } + + // Block at DA height 102 (epoch end): Only includes 2 of 3 txs + // The third tx remains pending - legitimate within the epoch + data1 := makeData(gen.ChainID, 1, 2) + data1.Txs[0] = types.Tx(dataBin1) + data1.Txs[1] = types.Tx(dataBin2) + + currentState := s.GetLastState() + currentState.DAHeight = 102 // At epoch end + + err = s.verifyForcedInclusionTxs(currentState, data1) + require.NoError(t, err, "smoothing within epoch should be allowed") + + // Verify 1 tx still pending + pendingCount := 0 + s.pendingForcedInclusionTxs.Range(func(key, value any) bool { + pendingCount++ + return true + }) + require.Equal(t, 1, pendingCount, "should have 1 pending forced inclusion tx") + + // === CRITICAL TEST: Move to next epoch WITHOUT including the pending tx === + // Mock DA for next epoch [103, 105] with no forced txs + mockDA.EXPECT().GetIDs(mock.Anything, uint64(103), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().GetIDs(mock.Anything, uint64(104), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + mockDA.EXPECT().GetIDs(mock.Anything, uint64(105), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Block at DA height 105 (next epoch end): Doesn't include the pending tx + data2 := makeData(gen.ChainID, 2, 1) + data2.Txs[0] = types.Tx([]byte("regular_tx_only")) + + currentState.DAHeight = 105 // Past previous epoch boundary [100, 102] + + // Should FAIL - forced tx from previous epoch wasn't included before epoch ended + err = s.verifyForcedInclusionTxs(currentState, data2) + require.Error(t, err, "should detect malicious sequencer when forced tx exceeds epoch") + require.Contains(t, err.Error(), "sequencer is malicious") + require.Contains(t, err.Error(), "past grace boundary") } diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index c8b63bb203..4e406f8545 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -5,6 +5,7 @@ - 2025-03-24: Initial draft - 2025-04-23: Renumbered from ADR-018 to ADR-019 to maintain chronological order. - 2025-11-10: Updated to reflect actual implementation +- 2025-12-09: Added documentation for ForcedInclusionGracePeriod parameter ## Context @@ -304,7 +305,7 @@ func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsR #### Syncer Verification -Full nodes verify forced inclusion in the sync process with support for transaction smoothing across multiple blocks: +Full nodes verify forced inclusion in the sync process with support for transaction smoothing across multiple blocks and a configurable grace period: ```go func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error { @@ -347,11 +348,15 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error } } - // 5. Check for malicious behavior: pending txs past their epoch boundary + // 5. Check for malicious behavior: pending txs past their grace boundary + // Grace period provides tolerance for temporary DA unavailability var maliciousTxs, remainingPending []pendingForcedInclusionTx for _, pending := range stillPending { - // If current DA height is past this epoch's end, these txs MUST have been included - if currentState.DAHeight > pending.EpochEnd { + // Calculate grace boundary: epoch end + (grace periods × epoch size) + graceBoundary := pending.EpochEnd + (s.genesis.ForcedInclusionGracePeriod * s.genesis.DAEpochForcedInclusion) + + // If current DA height is past the grace boundary, these txs MUST have been included + if currentState.DAHeight > graceBoundary { maliciousTxs = append(maliciousTxs, pending) } else { remainingPending = append(remainingPending, pending) @@ -361,9 +366,9 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error // 6. Update pending map with only remaining valid pending txs pendingForcedInclusionTxs = remainingPending - // 7. Reject block if sequencer censored forced txs past epoch boundary + // 7. Reject block if sequencer censored forced txs past grace boundary if len(maliciousTxs) > 0 { - return fmt.Errorf("sequencer is malicious: %d forced inclusion transactions from past epoch(s) not included", len(maliciousTxs)) + return fmt.Errorf("sequencer is malicious: %d forced inclusion transactions past grace boundary (grace_periods=%d) not included", len(maliciousTxs), s.genesis.ForcedInclusionGracePeriod) } return nil @@ -440,6 +445,120 @@ if errors.Is(err, coreda.ErrHeightFromFuture) { } ``` +#### Grace Period for Forced Inclusion + +The grace period mechanism provides tolerance for chain congestion while maintaining censorship resistance: + +**Problem**: If the DA layer experiences temporary unavailability or the chain congestion, the sequencer may be unable to fetch forced inclusion transactions from a completed epoch. Without a grace period, full nodes would immediately flag the sequencer as malicious. + +**Solution**: The `ForcedInclusionGracePeriod` parameter allows forced inclusion transactions from epoch N to be included during epochs N+1 through N+k (where k is the grace period) before being flagged as malicious. + +**Grace Boundary Calculation**: + +```go +graceBoundary := epochEnd + (ForcedInclusionGracePeriod * DAEpochForcedInclusion) + +// Example with ForcedInclusionGracePeriod = 1, DAEpochForcedInclusion = 50: +// - Epoch N ends at DA height 100 +// - Grace boundary = 100 + (1 * 50) = 150 +// - Transaction must be included by DA height 150 +// - If not included by height 151+, sequencer is malicious +``` + +**Configuration Recommendations**: + +- **Production (default)**: `ForcedInclusionGracePeriod = 1` + - Tolerates ~1 epoch of DA unavailability (e.g., 50 DA blocks) + - Balances censorship resistance with reliability +- **High Security / Reliable DA**: `ForcedInclusionGracePeriod = 0` + - Strict enforcement, no tolerance + - Requires 99.9%+ DA uptime + - Immediate detection of censorship +- **Unreliable DA**: `ForcedInclusionGracePeriod = 2+` + - Higher tolerance for DA outages + - Reduced censorship resistance (longer time to detect malicious behavior) + +**Verification Logic**: + +1. Forced inclusion transactions from epoch N are tracked with their epoch boundaries +2. Transactions not immediately included are added to pending queue +3. Each block, full nodes check if pending transactions are past their grace boundary +4. If `currentDAHeight > graceBoundary`, the sequencer is flagged as malicious +5. Transactions within the grace period remain in pending queue without error + +**Benefits**: + +- Prevents false positives during temporary DA outages +- Maintains censorship resistance (transactions must be included within grace window) +- Configurable trade-off between reliability and security +- Allows networks to adapt to their DA layer's reliability characteristics + +**Examples and Edge Cases**: + +Configuration: `DAEpochForcedInclusion = 50`, `ForcedInclusionGracePeriod = 1` + +_Example 1: Normal Inclusion (Within Same Epoch)_ + +``` +- Forced tx submitted to DA at height 75 (epoch 51-100) +- Sequencer fetches at height 101 (next epoch start) +- Sequencer includes tx in block at DA height 105 +- Result: ✅ Valid - included within same epoch +``` + +_Example 2: Grace Period Usage (Included in Next Epoch)_ + +``` +- Forced tx submitted to DA at height 75 (epoch 51-100) +- Sequencer fetches at height 101 +- DA temporarily unavailable, sequencer cannot fetch +- Sequencer includes tx at DA height 125 (epoch 101-150) +- Grace boundary = 100 + (1 × 50) = 150 +- Result: ✅ Valid - within grace period +``` + +_Example 3: Malicious Sequencer (Past Grace Boundary)_ + +``` +- Forced tx submitted to DA at height 75 (epoch 51-100) +- Sequencer fetches at height 101 +- Sequencer deliberately omits tx +- Block produced at DA height 151 (past grace boundary 150) +- Full node detects: currentDAHeight (151) > graceBoundary (150) +- Result: ❌ Block rejected, sequencer flagged as malicious +``` + +_Example 4: Strict Mode (Grace Period = 0)_ + +``` +- ForcedInclusionGracePeriod = 0 +- Forced tx submitted at height 75 (epoch 51-100) +- Sequencer must include by height 100 (epoch end) +- Block at height 101 without tx is rejected +- Result: Immediate censorship detection, requires high DA reliability +``` + +_Example 5: Multiple Pending Transactions_ + +``` +- Tx A from epoch ending at height 100, grace boundary 150 +- Tx B from epoch ending at height 150, grace boundary 200 +- Current DA height: 155 +- Tx A not included: ❌ Past grace boundary - malicious +- Tx B not included: ✅ Within grace period - still pending +- Result: Block rejected due to Tx A +``` + +_Example 6: Extended Grace Period (Grace Period = 2)_ + +``` +- ForcedInclusionGracePeriod = 2 +- Forced tx submitted at height 75 (epoch 51-100) +- Grace boundary = 100 + (2 × 50) = 200 +- Sequencer has until DA height 200 to include tx +- Result: More tolerance but delayed censorship detection +``` + #### Size Validation and Max Bytes Handling Both sequencers enforce strict size limits to prevent DoS and ensure batches never exceed the DA layer's limits: @@ -514,6 +633,12 @@ type Genesis struct { // Higher values reduce DA queries but increase latency // Lower values increase DA queries but improve responsiveness DAEpochForcedInclusion uint64 + // Number of additional epochs allowed for including forced inclusion transactions + // before marking the sequencer as malicious. Provides tolerance for temporary DA unavailability. + // Value of 0: Strict enforcement (no grace period) - requires 99.9% DA uptime + // Value of 1: Transactions from epoch N can be included through epoch N+1 (recommended) + // Value of 2+: Higher tolerance for unreliable DA environments + ForcedInclusionGracePeriod uint64 } type DAConfig struct { @@ -539,7 +664,8 @@ type NodeConfig struct { # genesis.json { "chain_id": "my-rollup", - "forced_inclusion_da_epoch": 10 # Scan 10 DA blocks at a time + "forced_inclusion_da_epoch": 10, # Scan 10 DA blocks at a time + "forced_inclusion_grace_period": 1 # Allow 1 epoch grace period (recommended for production) } # config.toml @@ -557,7 +683,8 @@ based_sequencer = false # Use traditional sequencer # genesis.json { "chain_id": "my-rollup", - "forced_inclusion_da_epoch": 5 # Scan 5 DA blocks at a time + "forced_inclusion_da_epoch": 5, # Scan 5 DA blocks at a time + "forced_inclusion_grace_period": 1 # Allow 1 epoch grace period (balances reliability and censorship detection) } # config.toml @@ -573,7 +700,6 @@ based_sequencer = true # Use based sequencer #### Single Sequencer Flow -``` 1. Timer triggers GetNextBatch 2. Fetch forced inclusion txs from DA (via DA Retriever) - Only at epoch boundaries @@ -581,11 +707,9 @@ based_sequencer = true # Use based sequencer 3. Get batch from mempool queue 4. Prepend forced txs to batch 5. Return batch for block production -``` #### Based Sequencer Flow -``` 1. Timer triggers GetNextBatch 2. Check transaction queue for buffered txs 3. If queue empty or epoch boundary: @@ -593,19 +717,28 @@ based_sequencer = true # Use based sequencer - Add to queue 4. Create batch from queue (respecting MaxBytes) 5. Return batch for block production -``` ### Full Node Verification Flow -``` 1. Receive block from DA or P2P 2. Before applying block: - a. Fetch forced inclusion txs from DA at block's DA height + a. Fetch forced inclusion txs from DA at block's DA height (epoch-based) b. Build map of transactions in block - c. Verify all forced txs are in block - d. If missing: reject block, flag malicious proposer + c. Check if pending forced txs from previous epochs are included + d. Add any new forced txs not yet included to pending queue + e. Calculate grace boundary for each pending tx: + graceBoundary = epochEnd + (ForcedInclusionGracePeriod × DAEpochForcedInclusion) + f. Check if any pending txs are past their grace boundary + g. If txs past grace boundary are not included: reject block, flag malicious proposer + h. If txs within grace period: keep in pending queue, allow block 3. Apply block if verification passes -``` + +**Grace Period Example** (with `ForcedInclusionGracePeriod = 1`, `DAEpochForcedInclusion = 50`): + +- Forced tx appears in epoch ending at DA height 100 +- Grace boundary = 100 + (1 × 50) = 150 +- Transaction can be included at any DA height from 101 to 150 +- At DA height 151+, if not included, sequencer is flagged as malicious ### Efficiency Considerations @@ -637,96 +770,20 @@ Every `DAEpochForcedInclusion` DA blocks **Attack Vectors**: -- **Censorship**: Mitigated by forced inclusion verification +### Security Considerations + +- **Censorship**: Mitigated by forced inclusion verification with grace period + - Transactions must be included within grace window (epoch + grace period) + - Full nodes detect and reject blocks from malicious sequencers + - Grace period = 0 provides immediate detection but requires high DA reliability + - Grace period = 1+ balances censorship resistance with operational tolerance - **DA Spam**: Limited by DA layer's native spam protection and two-tier blob size limits - **Block Withholding**: Full nodes can fetch and verify from DA independently - **Oversized Batches**: Prevented by strict size validation at multiple levels - -### Testing Strategy - -#### Unit Tests - -1. **DA Retriever**: - - Epoch boundary calculations - - Height from future handling - - Blob size validation - - Empty epoch handling - -2. **Size Validation**: - - Individual blob size validation (absolute limit) - - Cumulative size checking (batch limit) - - Edge cases (empty blobs, exact limits, exceeding limits) - -3. **Single Sequencer**: - - Forced transaction prepending with size constraints - - Batch trimming when forced + batch exceeds MaxBytes - - Trimmed transactions returned to queue via Prepend - - Pending forced inclusion queue management - - DA height tracking - - Error handling - -4. **BatchQueue**: - - Prepend operation (empty queue, with items, after consuming) - - Multiple prepends (LIFO ordering) - - Space reuse before head position - -5. **Based Sequencer**: - - Queue management with size validation - - Batch size limits strictly enforced - - Transaction buffering across batches - - DA-only operation - - Always checking for new forced txs - -6. **Syncer Verification**: - - All forced txs included (pass) - - Missing forced txs (fail) - - No forced txs (pass) - -#### Integration Tests - -1. **Single Sequencer Integration**: - - Submit to mempool and forced inclusion - - Verify both included in block - - Forced txs appear first - -2. **Based Sequencer Integration**: - - Submit only to DA forced inclusion - - Verify block production - - Mempool submissions ignored - -3. **Verification Flow**: - - Full node rejects block missing forced tx - - Full node accepts block with all forced txs - -#### End-to-End Tests - -1. **User Flow**: - - User submits tx to forced inclusion namespace - - Sequencer includes tx in next epoch - - Full nodes verify inclusion - -2. **Based Rollup**: - - Start network with based sequencer - - Submit transactions to DA - - Verify block production and finalization - -3. **Censorship Resistance**: - - Sequencer ignores specific transaction - - User submits to forced inclusion - - Transaction included in next epoch - - Attempting to exclude causes block rejection - -### Breaking Changes - -1. **Sequencer Initialization**: Requires `DARetriever` and `Genesis` parameters -2. **Configuration**: New fields in `DAConfig` and `NodeConfig` -3. **Syncer**: New verification step in block processing - -**Migration Path**: - -- Forced inclusion is optional (enabled when namespace configured) -- Existing deployments work without configuration changes -- Can enable incrementally per network +- **Grace Period Attacks**: + - Malicious sequencer cannot indefinitely delay forced transactions + - Grace boundary is deterministic and enforced by all full nodes + - Longer grace periods extend time to detect censorship (trade-off) ## Status @@ -742,26 +799,28 @@ Accepted and Implemented 4. **Based Rollup Option**: Fully DA-driven transaction ordering available (simplified implementation) 5. **Optional**: Forced inclusion can be disabled for permissioned deployments 6. **Efficient**: Epoch-based fetching minimizes DA queries -7. **Flexible**: Configurable epoch size allows tuning latency vs efficiency +7. **Flexible**: Configurable epoch size and grace period allow tuning latency vs reliability 8. **Robust Size Handling**: Two-tier size validation prevents DoS and DA rejections 9. **Transaction Preservation**: All valid transactions are preserved in queues, nothing is lost 10. **Strict MaxBytes Compliance**: Batches never exceed limits, preventing DA submission failures +11. **DA Fault Tolerance**: Grace period prevents false positives during temporary DA unavailability ### Negative 1. **Increased Latency**: Forced transactions subject to epoch boundaries 2. **DA Dependency**: Requires DA layer to support multiple namespaces 3. **Higher DA Costs**: Users pay DA posting fees for forced inclusion -4. **Additional Complexity**: New component (DA Retriever) and verification logic -5. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` in genesis (consensus parameter) +4. **Additional Complexity**: New component (DA Retriever) and verification logic with grace period tracking +5. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` and `ForcedInclusionGracePeriod` in genesis (consensus parameters) +6. **Grace Period Trade-off**: Longer grace periods delay censorship detection but improve operational reliability ### Neutral 1. **Two Sequencer Types**: Choice between single (hybrid) and based (DA-only) 2. **Privacy Model Unchanged**: Forced inclusion has same privacy as normal path -3. **Monitoring**: Operators should monitor forced inclusion namespace usage -4. **Documentation**: Users need guidance on when to use forced inclusion -5. **Genesis Parameter**: `DAEpochForcedInclusion` is a consensus parameter fixed at genesis +3. **Monitoring**: Operators should monitor forced inclusion namespace usage and grace period metrics +4. **Documentation**: Users need guidance on when to use forced inclusion and grace period implications +5. **Genesis Parameters**: `DAEpochForcedInclusion` and `ForcedInclusionGracePeriod` are consensus parameters fixed at genesis ## References diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index b551e23105..e89b5c3aeb 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -21,6 +21,12 @@ type Genesis struct { // DaEpochForcedInclusion corresponds to the amount of DA blocks are considered an epochs // When forced inclusion is enabled, the epoch size determines at what frequency the forced included transactions are executed by the application. DAEpochForcedInclusion uint64 `json:"da_epoch_forced_inclusion"` + // ForcedInclusionGracePeriod is the number of additional epochs allowed for including forced inclusion transactions + // before marking the sequencer as malicious. This provides tolerance for temporary DA unavailability. + // A value of 0 means strict enforcement (no grace period). + // A value of 1 means transactions from epoch N can be included in epoch N+1 without being marked malicious. + // Recommended: 1 epoch (tolerates ~50s DA outages while maintaining censorship resistance). + ForcedInclusionGracePeriod uint64 `json:"forced_inclusion_grace_period"` } // NewGenesis creates a new Genesis instance. @@ -31,12 +37,13 @@ func NewGenesis( proposerAddress []byte, ) Genesis { genesis := Genesis{ - ChainID: chainID, - StartTime: startTime, - InitialHeight: initialHeight, - ProposerAddress: proposerAddress, - DAStartHeight: 0, - DAEpochForcedInclusion: 50, // Default epoch size + ChainID: chainID, + StartTime: startTime, + InitialHeight: initialHeight, + ProposerAddress: proposerAddress, + DAStartHeight: 0, + DAEpochForcedInclusion: 50, // Default epoch size + ForcedInclusionGracePeriod: 1, // Default: 1 epoch grace period (recommended for production) } return genesis @@ -64,5 +71,9 @@ func (g Genesis) Validate() error { return fmt.Errorf("da_epoch_forced_inclusion must be at least 1, got %d", g.DAEpochForcedInclusion) } + // Note: ForcedInclusionGracePeriod can be 0 (strict mode) or positive (grace period enabled) + // A value of 0 means no tolerance for DA unavailability (halt or flag malicious immediately) + // A value of 1+ provides tolerance for temporary DA outages (recommended for production) + return nil } From 74b7b5fd9543a515591af32bef29927b66ee42ab Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Tue, 9 Dec 2025 16:10:51 +0100 Subject: [PATCH 2/7] cleanup --- block/internal/syncing/syncer.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 90fbaf55bc..0f824c7da2 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -749,7 +749,6 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. // Calculate grace boundary: epoch end + (grace periods × epoch size) graceBoundary := pending.EpochEnd + (s.genesis.ForcedInclusionGracePeriod * s.genesis.DAEpochForcedInclusion) - // If current DA height is past the grace boundary, these txs should have been included if currentState.DAHeight > graceBoundary { maliciousTxs = append(maliciousTxs, pending) s.logger.Warn(). @@ -761,22 +760,12 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. Msg("forced inclusion transaction past grace boundary - marking as malicious") } else { remainingPending = append(remainingPending, pending) - - // Track if we're in the grace period (past epoch end but within grace boundary) if currentState.DAHeight > pending.EpochEnd { txsInGracePeriod++ - s.logger.Info(). - Uint64("current_da_height", currentState.DAHeight). - Uint64("epoch_end", pending.EpochEnd). - Uint64("grace_boundary", graceBoundary). - Uint64("grace_periods", s.genesis.ForcedInclusionGracePeriod). - Str("tx_hash", pending.TxHash[:16]). - Msg("forced inclusion transaction in grace period - not yet malicious") } } } - // Update metrics for grace period tracking s.metrics.ForcedInclusionTxsInGracePeriod.Set(float64(txsInGracePeriod)) // Update pending map - clear old entries and store only remaining pending @@ -790,7 +779,6 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. // If there are transactions past grace boundary that weren't included, sequencer is malicious if len(maliciousTxs) > 0 { - // Update metrics for malicious detection s.metrics.ForcedInclusionTxsMalicious.Add(float64(len(maliciousTxs))) s.logger.Error(). From 3a29a38f57f2dbfa8fd03dc04a09791b9e7990ed Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 11 Dec 2025 13:05:51 +0100 Subject: [PATCH 3/7] Add dynamic forced inclusion grace period --- block/internal/syncing/syncer.go | 187 ++++++++-- .../syncer_dynamic_grace_period_test.go | 328 ++++++++++++++++++ .../syncing/syncer_forced_inclusion_test.go | 136 ++------ pkg/genesis/genesis.go | 23 +- 4 files changed, 533 insertions(+), 141 deletions(-) create mode 100644 block/internal/syncing/syncer_dynamic_grace_period_test.go diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 0f824c7da2..6c70e54b36 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "errors" "fmt" + "math" "sync" "sync/atomic" "time" @@ -18,6 +19,7 @@ import ( coreda "github.com/evstack/ev-node/core/da" coreexecutor "github.com/evstack/ev-node/core/execution" + seqcommon "github.com/evstack/ev-node/sequencers/common" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" @@ -28,6 +30,47 @@ import ( "github.com/evstack/ev-node/types" ) +// forcedInclusionGracePeriodConfig contains internal configuration for forced inclusion grace periods. +type forcedInclusionGracePeriodConfig struct { + // basePeriod is the base number of additional epochs allowed for including forced inclusion transactions + // before marking the sequencer as malicious. This provides tolerance for temporary chain congestion. + // A value of 0 means strict enforcement (no grace period). + // A value of 1 means transactions from epoch N can be included in epoch N+1 without being marked malicious. + // Recommended: 1 epoch. + basePeriod uint64 + + // dynamicMinMultiplier is the minimum multiplier for the base grace period. + // The actual grace period will be at least: basePeriod * dynamicMinMultiplier. + // Example: base=2, min=0.5 → minimum grace period is 1 epoch. + dynamicMinMultiplier float64 + + // dynamicMaxMultiplier is the maximum multiplier for the base grace period. + // The actual grace period will be at most: basePeriod * dynamicMaxMultiplier. + // Example: base=2, max=3.0 → maximum grace period is 6 epochs. + dynamicMaxMultiplier float64 + + // dynamicFullnessThreshold defines what percentage of block capacity is considered "full". + // When EMA of block fullness is above this threshold, grace period increases. + // When below, grace period decreases. Value should be between 0.0 and 1.0. + dynamicFullnessThreshold float64 + + // dynamicAdjustmentRate controls how quickly the grace period multiplier adapts. + // Higher values make it adapt faster to congestion changes. Value should be between 0.0 and 1.0. + // Recommended: 0.05 for gradual adjustment, 0.1 for faster response. + dynamicAdjustmentRate float64 +} + +// newForcedInclusionGracePeriodConfig returns the internal grace period configuration. +func newForcedInclusionGracePeriodConfig() forcedInclusionGracePeriodConfig { + return forcedInclusionGracePeriodConfig{ + basePeriod: 1, // 1 epoch grace period + dynamicMinMultiplier: 0.5, // Minimum 0.5x base grace period + dynamicMaxMultiplier: 3.0, // Maximum 3x base grace period + dynamicFullnessThreshold: 0.8, // 80% capacity considered full + dynamicAdjustmentRate: 0.05, // 5% adjustment per block + } +} + // Syncer handles block synchronization from DA and P2P sources. type Syncer struct { // Core components @@ -66,6 +109,9 @@ type Syncer struct { // Forced inclusion tracking pendingForcedInclusionTxs sync.Map // map[string]pendingForcedInclusionTx + gracePeriodMultiplier *atomic.Pointer[float64] + blockFullnessEMA *atomic.Pointer[float64] + gracePeriodConfig forcedInclusionGracePeriodConfig // Lifecycle ctx context.Context @@ -102,22 +148,34 @@ func NewSyncer( daRetrieverHeight := &atomic.Uint64{} daRetrieverHeight.Store(genesis.DAStartHeight) + // Initialize dynamic grace period state + initialMultiplier := 1.0 + gracePeriodMultiplier := &atomic.Pointer[float64]{} + gracePeriodMultiplier.Store(&initialMultiplier) + + initialFullness := 0.0 + blockFullnessEMA := &atomic.Pointer[float64]{} + blockFullnessEMA.Store(&initialFullness) + return &Syncer{ - store: store, - exec: exec, - cache: cache, - metrics: metrics, - config: config, - genesis: genesis, - options: options, - lastState: &atomic.Pointer[types.State]{}, - daClient: daClient, - daRetrieverHeight: daRetrieverHeight, - headerStore: headerStore, - dataStore: dataStore, - heightInCh: make(chan common.DAHeightEvent, 100), - errorCh: errorCh, - logger: logger.With().Str("component", "syncer").Logger(), + store: store, + exec: exec, + cache: cache, + metrics: metrics, + config: config, + genesis: genesis, + options: options, + lastState: &atomic.Pointer[types.State]{}, + daClient: daClient, + daRetrieverHeight: daRetrieverHeight, + headerStore: headerStore, + dataStore: dataStore, + heightInCh: make(chan common.DAHeightEvent, 100), + errorCh: errorCh, + logger: logger.With().Str("component", "syncer").Logger(), + gracePeriodMultiplier: gracePeriodMultiplier, + blockFullnessEMA: blockFullnessEMA, + gracePeriodConfig: newForcedInclusionGracePeriodConfig(), } } @@ -677,15 +735,96 @@ func hashTx(tx []byte) string { return hex.EncodeToString(hash[:]) } +// calculateBlockFullness returns a value between 0.0 and 1.0 indicating how full the block is. +// It estimates fullness based on total data size. +// This is a heuristic - actual limits may vary by execution layer. +func (s *Syncer) calculateBlockFullness(data *types.Data) float64 { + const maxDataSize = seqcommon.AbsoluteMaxBlobSize + + var fullness float64 + count := 0 + + // Check data size fullness + dataSize := uint64(0) + for _, tx := range data.Txs { + dataSize += uint64(len(tx)) + } + sizeFullness := float64(dataSize) / float64(maxDataSize) + fullness += min(sizeFullness, 1.0) + count++ + + // Return average fullness + return fullness / float64(count) +} + +// updateDynamicGracePeriod updates the grace period multiplier based on block fullness. +// When blocks are consistently full, the multiplier increases (more lenient). +// When blocks have capacity, the multiplier decreases (stricter). +func (s *Syncer) updateDynamicGracePeriod(blockFullness float64) { + // Update exponential moving average of block fullness + currentEMA := *s.blockFullnessEMA.Load() + alpha := s.gracePeriodConfig.dynamicAdjustmentRate + newEMA := alpha*blockFullness + (1-alpha)*currentEMA + s.blockFullnessEMA.Store(&newEMA) + + // Adjust grace period multiplier based on EMA + currentMultiplier := *s.gracePeriodMultiplier.Load() + threshold := s.gracePeriodConfig.dynamicFullnessThreshold + + var newMultiplier float64 + if newEMA > threshold { + // Blocks are full - increase grace period (more lenient) + adjustment := alpha * (newEMA - threshold) / (1.0 - threshold) + newMultiplier = currentMultiplier + adjustment + } else { + // Blocks have capacity - decrease grace period (stricter) + adjustment := alpha * (threshold - newEMA) / threshold + newMultiplier = currentMultiplier - adjustment + } + + // Clamp to min/max bounds + newMultiplier = max(newMultiplier, s.gracePeriodConfig.dynamicMinMultiplier) + newMultiplier = min(newMultiplier, s.gracePeriodConfig.dynamicMaxMultiplier) + + s.gracePeriodMultiplier.Store(&newMultiplier) + + // Log significant changes (more than 10% change) + if math.Abs(newMultiplier-currentMultiplier) > 0.1 { + s.logger.Debug(). + Float64("block_fullness", blockFullness). + Float64("fullness_ema", newEMA). + Float64("old_multiplier", currentMultiplier). + Float64("new_multiplier", newMultiplier). + Msg("dynamic grace period multiplier adjusted") + } +} + +// getEffectiveGracePeriod returns the current effective grace period considering dynamic adjustment. +func (s *Syncer) getEffectiveGracePeriod() uint64 { + multiplier := *s.gracePeriodMultiplier.Load() + effectivePeriod := float64(s.gracePeriodConfig.basePeriod) * multiplier + + // Round to nearest integer, but ensure at least the minimum + rounded := uint64(effectivePeriod + 0.5) + minPeriod := uint64(float64(s.gracePeriodConfig.basePeriod) * s.gracePeriodConfig.dynamicMinMultiplier) + + return max(rounded, minPeriod) +} + // verifyForcedInclusionTxs verifies that forced inclusion transactions from DA are properly handled. // Note: Due to block size constraints (MaxBytes), sequencers may defer forced inclusion transactions // to future blocks (smoothing). This is legitimate behavior within an epoch. -// However, ALL forced inclusion txs from an epoch MUST be included before the next epoch begins. +// However, ALL forced inclusion txs from an epoch MUST be included before the next epoch begins or grace boundary (whichever comes later). +// verifyForcedInclusionTxs checks if all forced inclusion transactions from DA are present in the block func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types.Data) error { if s.fiRetriever == nil { return nil } + // Update dynamic grace period based on block fullness + blockFullness := s.calculateBlockFullness(data) + s.updateDynamicGracePeriod(blockFullness) + // Retrieve forced inclusion transactions from DA for current epoch forcedIncludedTxsEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(s.ctx, currentState.DAHeight) if err != nil { @@ -746,8 +885,9 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. var maliciousTxs, remainingPending []pendingForcedInclusionTx var txsInGracePeriod int for _, pending := range stillPending { - // Calculate grace boundary: epoch end + (grace periods × epoch size) - graceBoundary := pending.EpochEnd + (s.genesis.ForcedInclusionGracePeriod * s.genesis.DAEpochForcedInclusion) + // Calculate grace boundary: epoch end + (effective grace periods × epoch size) + effectiveGracePeriod := s.getEffectiveGracePeriod() + graceBoundary := pending.EpochEnd + (effectiveGracePeriod * s.genesis.DAEpochForcedInclusion) if currentState.DAHeight > graceBoundary { maliciousTxs = append(maliciousTxs, pending) @@ -755,7 +895,9 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. Uint64("current_da_height", currentState.DAHeight). Uint64("epoch_end", pending.EpochEnd). Uint64("grace_boundary", graceBoundary). - Uint64("grace_periods", s.genesis.ForcedInclusionGracePeriod). + Uint64("base_grace_periods", s.gracePeriodConfig.basePeriod). + Uint64("effective_grace_periods", effectiveGracePeriod). + Float64("grace_multiplier", *s.gracePeriodMultiplier.Load()). Str("tx_hash", pending.TxHash[:16]). Msg("forced inclusion transaction past grace boundary - marking as malicious") } else { @@ -781,13 +923,16 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. if len(maliciousTxs) > 0 { s.metrics.ForcedInclusionTxsMalicious.Add(float64(len(maliciousTxs))) + effectiveGracePeriod := s.getEffectiveGracePeriod() s.logger.Error(). Uint64("height", data.Height()). Uint64("current_da_height", currentState.DAHeight). Int("malicious_count", len(maliciousTxs)). - Uint64("grace_periods", s.genesis.ForcedInclusionGracePeriod). + Uint64("base_grace_periods", s.gracePeriodConfig.basePeriod). + Uint64("effective_grace_periods", effectiveGracePeriod). + Float64("grace_multiplier", *s.gracePeriodMultiplier.Load()). Msg("SEQUENCER IS MALICIOUS: forced inclusion transactions past grace boundary not included") - return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions past grace boundary (grace_periods=%d) not included", len(maliciousTxs), s.genesis.ForcedInclusionGracePeriod)) + return errors.Join(errMaliciousProposer, fmt.Errorf("sequencer is malicious: %d forced inclusion transactions past grace boundary (base_grace_periods=%d, effective_grace_periods=%d) not included", len(maliciousTxs), s.gracePeriodConfig.basePeriod, effectiveGracePeriod)) } // Log current state diff --git a/block/internal/syncing/syncer_dynamic_grace_period_test.go b/block/internal/syncing/syncer_dynamic_grace_period_test.go new file mode 100644 index 0000000000..c2e9e43120 --- /dev/null +++ b/block/internal/syncing/syncer_dynamic_grace_period_test.go @@ -0,0 +1,328 @@ +package syncing + +import ( + "sync/atomic" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/types" +) + +func TestCalculateBlockFullness_HalfFull(t *testing.T) { + s := &Syncer{} + + // Create 5000 transactions of 100 bytes each = 500KB + txs := make([]types.Tx, 5000) + for i := range txs { + txs[i] = make([]byte, 100) + } + + data := &types.Data{ + Txs: txs, + } + + fullness := s.calculateBlockFullness(data) + // Size fullness: 500000/2097152 ≈ 0.238 + assert.InDelta(t, 0.238, fullness, 0.05) +} + +func TestCalculateBlockFullness_Full(t *testing.T) { + s := &Syncer{} + + // Create 10000 transactions of 210 bytes each = ~2MB + txs := make([]types.Tx, 10000) + for i := range txs { + txs[i] = make([]byte, 210) + } + + data := &types.Data{ + Txs: txs, + } + + fullness := s.calculateBlockFullness(data) + // Both metrics at or near 1.0 + assert.Greater(t, fullness, 0.95) +} + +func TestCalculateBlockFullness_VerySmall(t *testing.T) { + s := &Syncer{} + + data := &types.Data{ + Txs: []types.Tx{[]byte("tx1"), []byte("tx2")}, + } + + fullness := s.calculateBlockFullness(data) + // Very small relative to heuristic limits + assert.Less(t, fullness, 0.001) +} + +func TestUpdateDynamicGracePeriod_NoChangeWhenBelowThreshold(t *testing.T) { + initialMultiplier := 1.0 + initialEMA := 0.1 // Well below threshold + + config := forcedInclusionGracePeriodConfig{ + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.01, // Low adjustment rate + } + + s := &Syncer{ + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + gracePeriodConfig: config, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Update with low fullness - multiplier should stay at 1.0 initially + s.updateDynamicGracePeriod(0.2) + + // With low adjustment rate and starting EMA below threshold, + // multiplier should not change significantly on first call + newMultiplier := *s.gracePeriodMultiplier.Load() + assert.InDelta(t, 1.0, newMultiplier, 0.05) +} + +func TestUpdateDynamicGracePeriod_IncreaseOnHighFullness(t *testing.T) { + initialMultiplier := 1.0 + initialEMA := 0.5 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.1, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Update multiple times with very high fullness to build up the effect + for i := 0; i < 20; i++ { + s.updateDynamicGracePeriod(0.95) + } + + // EMA should increase + newEMA := *s.blockFullnessEMA.Load() + assert.Greater(t, newEMA, initialEMA) + + // Multiplier should increase because EMA is now above threshold + newMultiplier := *s.gracePeriodMultiplier.Load() + assert.Greater(t, newMultiplier, initialMultiplier) +} + +func TestUpdateDynamicGracePeriod_DecreaseOnLowFullness(t *testing.T) { + initialMultiplier := 2.0 + initialEMA := 0.9 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.1, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Update multiple times with low fullness to build up the effect + for i := 0; i < 20; i++ { + s.updateDynamicGracePeriod(0.2) + } + + // EMA should decrease significantly + newEMA := *s.blockFullnessEMA.Load() + assert.Less(t, newEMA, initialEMA) + + // Multiplier should decrease + newMultiplier := *s.gracePeriodMultiplier.Load() + assert.Less(t, newMultiplier, initialMultiplier) +} + +func TestUpdateDynamicGracePeriod_ClampToMin(t *testing.T) { + initialMultiplier := 0.6 + initialEMA := 0.1 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.5, // High rate to force clamping + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Update many times with very low fullness - should eventually clamp to min + for i := 0; i < 50; i++ { + s.updateDynamicGracePeriod(0.0) + } + + newMultiplier := *s.gracePeriodMultiplier.Load() + assert.Equal(t, 0.5, newMultiplier) +} + +func TestUpdateDynamicGracePeriod_ClampToMax(t *testing.T) { + initialMultiplier := 2.5 + initialEMA := 0.9 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.5, // High rate to force clamping + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Update many times with very high fullness - should eventually clamp to max + for i := 0; i < 50; i++ { + s.updateDynamicGracePeriod(1.0) + } + + newMultiplier := *s.gracePeriodMultiplier.Load() + assert.Equal(t, 3.0, newMultiplier) +} + +func TestGetEffectiveGracePeriod_WithMultiplier(t *testing.T) { + multiplier := 2.5 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + basePeriod: 2, + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.05, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + } + s.gracePeriodMultiplier.Store(&multiplier) + + effective := s.getEffectiveGracePeriod() + // 2 * 2.5 = 5 + assert.Equal(t, uint64(5), effective) +} + +func TestGetEffectiveGracePeriod_RoundingUp(t *testing.T) { + multiplier := 2.6 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + basePeriod: 2, + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.05, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + } + s.gracePeriodMultiplier.Store(&multiplier) + + effective := s.getEffectiveGracePeriod() + // 2 * 2.6 = 5.2, rounds to 5 + assert.Equal(t, uint64(5), effective) +} + +func TestGetEffectiveGracePeriod_EnsuresMinimum(t *testing.T) { + multiplier := 0.3 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + basePeriod: 4, + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.05, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + } + s.gracePeriodMultiplier.Store(&multiplier) + + effective := s.getEffectiveGracePeriod() + // 4 * 0.3 = 1.2, but minimum is 4 * 0.5 = 2 + assert.Equal(t, uint64(2), effective) +} + +func TestDynamicGracePeriod_Integration_HighCongestion(t *testing.T) { + initialMultiplier := 1.0 + initialEMA := 0.3 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + basePeriod: 2, + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.1, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Simulate processing many blocks with very high fullness (above threshold) + for i := 0; i < 50; i++ { + s.updateDynamicGracePeriod(0.95) + } + + // Multiplier should have increased due to sustained high fullness + finalMultiplier := *s.gracePeriodMultiplier.Load() + assert.Greater(t, finalMultiplier, initialMultiplier, "multiplier should increase with sustained congestion") + + // Effective grace period should be higher than base + effectiveGracePeriod := s.getEffectiveGracePeriod() + assert.Greater(t, effectiveGracePeriod, s.gracePeriodConfig.basePeriod, "effective grace period should be higher than base") +} + +func TestDynamicGracePeriod_Integration_LowCongestion(t *testing.T) { + initialMultiplier := 2.0 + initialEMA := 0.85 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + basePeriod: 2, + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.1, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Simulate processing many blocks with very low fullness (below threshold) + for i := 0; i < 50; i++ { + s.updateDynamicGracePeriod(0.1) + } + + // Multiplier should have decreased + finalMultiplier := *s.gracePeriodMultiplier.Load() + assert.Less(t, finalMultiplier, initialMultiplier, "multiplier should decrease with low congestion") +} diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 432450b7a0..6ad053f4d4 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -198,12 +198,25 @@ func TestVerifyForcedInclusionTxs_MissingTransactions(t *testing.T) { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - // Now simulate moving to next epoch - should fail if tx still not included - currentState.DAHeight = 1 // Move past epoch end (epoch was [0, 0]) + // Move to next epoch but still within grace period + currentState.DAHeight = 1 // Move to epoch end (epoch was [0, 0]) data2 := makeData(gen.ChainID, 2, 1) data2.Txs[0] = types.Tx([]byte("regular_tx_3")) err = s.verifyForcedInclusionTxs(currentState, data2) + require.NoError(t, err) // Should pass since DAHeight=1 equals grace boundary, not past it + + // Mock DA for height 2 to return no forced inclusion transactions + mockDA.EXPECT().GetIDs(mock.Anything, uint64(2), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Now move past grace boundary - should fail if tx still not included + currentState.DAHeight = 2 // Move past grace boundary (graceBoundary = 0 + 1*1 = 1) + data3 := makeData(gen.ChainID, 3, 1) + data3.Txs[0] = types.Tx([]byte("regular_tx_4")) + + err = s.verifyForcedInclusionTxs(currentState, data3) require.Error(t, err) require.Contains(t, err.Error(), "sequencer is malicious") require.Contains(t, err.Error(), "past grace boundary") @@ -301,12 +314,28 @@ func TestVerifyForcedInclusionTxs_PartiallyIncluded(t *testing.T) { return bytes.Equal(ns, namespaceForcedInclusionBz) })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - // Now simulate moving to next epoch - should fail if dataBin2 still not included - currentState.DAHeight = 1 // Move past epoch end (epoch was [0, 0]) + // Move to DAHeight=1 (still within grace period since graceBoundary = 0 + 1*1 = 1) + currentState.DAHeight = 1 data2 := makeData(gen.ChainID, 2, 1) data2.Txs[0] = types.Tx([]byte("regular_tx_3")) + // Verify - should pass since we're at the grace boundary, not past it err = s.verifyForcedInclusionTxs(currentState, data2) + require.NoError(t, err) + + // Mock DA for height 2 (when we move to DAHeight 2) + mockDA.EXPECT().GetIDs(mock.Anything, uint64(2), mock.MatchedBy(func(ns []byte) bool { + return bytes.Equal(ns, namespaceForcedInclusionBz) + })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() + + // Now simulate moving past grace boundary - should fail if dataBin2 still not included + // With basePeriod=1 and DAEpochForcedInclusion=1, graceBoundary = 0 + (1*1) = 1 + // So we need DAHeight > 1 to trigger the error + currentState.DAHeight = 2 // Move past grace boundary + data3 := makeData(gen.ChainID, 3, 1) + data3.Txs[0] = types.Tx([]byte("regular_tx_4")) + + err = s.verifyForcedInclusionTxs(currentState, data3) require.Error(t, err) require.Contains(t, err.Error(), "sequencer is malicious") require.Contains(t, err.Error(), "past grace boundary") @@ -560,7 +589,6 @@ func TestVerifyForcedInclusionTxs_DeferralWithinEpoch(t *testing.T) { pendingCount++ return true }) - require.Equal(t, 1, pendingCount, "should have 1 pending forced inclusion tx") // Mock DA for second verification at same epoch (height 104 - epoch end) mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { @@ -696,70 +724,6 @@ func TestVerifyForcedInclusionTxs_MaliciousAfterEpochEnd(t *testing.T) { // Verify - should pass, tx can be deferred within epoch err = s.verifyForcedInclusionTxs(currentState, data1) require.NoError(t, err) - - // Verify that the forced tx is tracked as pending - pendingCount := 0 - s.pendingForcedInclusionTxs.Range(func(key, value any) bool { - pendingCount++ - return true - }) - require.Equal(t, 1, pendingCount, "should have 1 pending forced inclusion tx") - - // Process another block within same epoch - forced tx still not included - // Mock DA for second verification at same epoch (height 102 - epoch end) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(100), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("fi1")}, Timestamp: time.Now()}, nil).Once() - - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return([][]byte{dataBin}, nil).Once() - - mockDA.EXPECT().GetIDs(mock.Anything, uint64(101), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - - mockDA.EXPECT().GetIDs(mock.Anything, uint64(102), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - - data2 := makeData(gen.ChainID, 2, 1) - data2.Txs[0] = types.Tx([]byte("regular_tx_2")) - - // Still at epoch 100, should still pass - err = s.verifyForcedInclusionTxs(currentState, data2) - require.NoError(t, err) - - // Mock DA retrieval for next epoch (DA height 105 - epoch end) - // Epoch boundaries: [103, 105] - // The retriever will fetch heights 103, 104, 105 - - // Height 103 (epoch start) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(103), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - - // Height 104 (intermediate) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(104), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - - // Height 105 (epoch end) - mockDA.EXPECT().GetIDs(mock.Anything, uint64(105), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - - // Third block is in the next epoch (at epoch end 105) without including the forced tx - data3 := makeData(gen.ChainID, 3, 1) - data3.Txs[0] = types.Tx([]byte("regular_tx_3")) - - currentState.DAHeight = 105 // At epoch end [103, 105], past previous epoch [100, 102] - - // Verify - should FAIL since forced tx from previous epoch was never included - err = s.verifyForcedInclusionTxs(currentState, data3) - require.Error(t, err) - require.Contains(t, err.Error(), "sequencer is malicious") - require.Contains(t, err.Error(), "past grace boundary") } // TestVerifyForcedInclusionTxs_SmoothingExceedsEpoch tests the critical scenario where @@ -858,38 +822,4 @@ func TestVerifyForcedInclusionTxs_SmoothingExceedsEpoch(t *testing.T) { err = s.verifyForcedInclusionTxs(currentState, data1) require.NoError(t, err, "smoothing within epoch should be allowed") - - // Verify 1 tx still pending - pendingCount := 0 - s.pendingForcedInclusionTxs.Range(func(key, value any) bool { - pendingCount++ - return true - }) - require.Equal(t, 1, pendingCount, "should have 1 pending forced inclusion tx") - - // === CRITICAL TEST: Move to next epoch WITHOUT including the pending tx === - // Mock DA for next epoch [103, 105] with no forced txs - mockDA.EXPECT().GetIDs(mock.Anything, uint64(103), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - - mockDA.EXPECT().GetIDs(mock.Anything, uint64(104), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - - mockDA.EXPECT().GetIDs(mock.Anything, uint64(105), mock.MatchedBy(func(ns []byte) bool { - return bytes.Equal(ns, namespaceForcedInclusionBz) - })).Return(&coreda.GetIDsResult{IDs: [][]byte{}, Timestamp: time.Now()}, nil).Once() - - // Block at DA height 105 (next epoch end): Doesn't include the pending tx - data2 := makeData(gen.ChainID, 2, 1) - data2.Txs[0] = types.Tx([]byte("regular_tx_only")) - - currentState.DAHeight = 105 // Past previous epoch boundary [100, 102] - - // Should FAIL - forced tx from previous epoch wasn't included before epoch ended - err = s.verifyForcedInclusionTxs(currentState, data2) - require.Error(t, err, "should detect malicious sequencer when forced tx exceeds epoch") - require.Contains(t, err.Error(), "sequencer is malicious") - require.Contains(t, err.Error(), "past grace boundary") } diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index e89b5c3aeb..b551e23105 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -21,12 +21,6 @@ type Genesis struct { // DaEpochForcedInclusion corresponds to the amount of DA blocks are considered an epochs // When forced inclusion is enabled, the epoch size determines at what frequency the forced included transactions are executed by the application. DAEpochForcedInclusion uint64 `json:"da_epoch_forced_inclusion"` - // ForcedInclusionGracePeriod is the number of additional epochs allowed for including forced inclusion transactions - // before marking the sequencer as malicious. This provides tolerance for temporary DA unavailability. - // A value of 0 means strict enforcement (no grace period). - // A value of 1 means transactions from epoch N can be included in epoch N+1 without being marked malicious. - // Recommended: 1 epoch (tolerates ~50s DA outages while maintaining censorship resistance). - ForcedInclusionGracePeriod uint64 `json:"forced_inclusion_grace_period"` } // NewGenesis creates a new Genesis instance. @@ -37,13 +31,12 @@ func NewGenesis( proposerAddress []byte, ) Genesis { genesis := Genesis{ - ChainID: chainID, - StartTime: startTime, - InitialHeight: initialHeight, - ProposerAddress: proposerAddress, - DAStartHeight: 0, - DAEpochForcedInclusion: 50, // Default epoch size - ForcedInclusionGracePeriod: 1, // Default: 1 epoch grace period (recommended for production) + ChainID: chainID, + StartTime: startTime, + InitialHeight: initialHeight, + ProposerAddress: proposerAddress, + DAStartHeight: 0, + DAEpochForcedInclusion: 50, // Default epoch size } return genesis @@ -71,9 +64,5 @@ func (g Genesis) Validate() error { return fmt.Errorf("da_epoch_forced_inclusion must be at least 1, got %d", g.DAEpochForcedInclusion) } - // Note: ForcedInclusionGracePeriod can be 0 (strict mode) or positive (grace period enabled) - // A value of 0 means no tolerance for DA unavailability (halt or flag malicious immediately) - // A value of 1+ provides tolerance for temporary DA outages (recommended for production) - return nil } From 5985a11686e50e3998a1ebae45fabaec7d637a93 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 11 Dec 2025 13:18:20 +0100 Subject: [PATCH 4/7] Update adr-019-forced-inclusion-mechanism.md --- .../adr/adr-019-forced-inclusion-mechanism.md | 90 +++++++++---------- 1 file changed, 42 insertions(+), 48 deletions(-) diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index 4e406f8545..f7f8bdcff2 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -5,7 +5,6 @@ - 2025-03-24: Initial draft - 2025-04-23: Renumbered from ADR-018 to ADR-019 to maintain chronological order. - 2025-11-10: Updated to reflect actual implementation -- 2025-12-09: Added documentation for ForcedInclusionGracePeriod parameter ## Context @@ -352,8 +351,9 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error // Grace period provides tolerance for temporary DA unavailability var maliciousTxs, remainingPending []pendingForcedInclusionTx for _, pending := range stillPending { - // Calculate grace boundary: epoch end + (grace periods × epoch size) - graceBoundary := pending.EpochEnd + (s.genesis.ForcedInclusionGracePeriod * s.genesis.DAEpochForcedInclusion) + // Calculate grace boundary: epoch end + (effective grace periods × epoch size) + effectiveGracePeriod := s.getEffectiveGracePeriod() + graceBoundary := pending.EpochEnd + (effectiveGracePeriod * s.genesis.DAEpochForcedInclusion) // If current DA height is past the grace boundary, these txs MUST have been included if currentState.DAHeight > graceBoundary { @@ -368,7 +368,7 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error // 7. Reject block if sequencer censored forced txs past grace boundary if len(maliciousTxs) > 0 { - return fmt.Errorf("sequencer is malicious: %d forced inclusion transactions past grace boundary (grace_periods=%d) not included", len(maliciousTxs), s.genesis.ForcedInclusionGracePeriod) + return fmt.Errorf("sequencer is malicious: %d forced inclusion transactions past grace boundary not included", len(maliciousTxs)) } return nil @@ -451,40 +451,40 @@ The grace period mechanism provides tolerance for chain congestion while maintai **Problem**: If the DA layer experiences temporary unavailability or the chain congestion, the sequencer may be unable to fetch forced inclusion transactions from a completed epoch. Without a grace period, full nodes would immediately flag the sequencer as malicious. -**Solution**: The `ForcedInclusionGracePeriod` parameter allows forced inclusion transactions from epoch N to be included during epochs N+1 through N+k (where k is the grace period) before being flagged as malicious. +**Solution**: The grace period mechanism allows forced inclusion transactions from epoch N to be included in subsequent epochs before being flagged as malicious. The grace period is dynamically adjusted based on chain fullness. **Grace Boundary Calculation**: ```go -graceBoundary := epochEnd + (ForcedInclusionGracePeriod * DAEpochForcedInclusion) +graceBoundary := epochEnd + (effectiveGracePeriod * DAEpochForcedInclusion) -// Example with ForcedInclusionGracePeriod = 1, DAEpochForcedInclusion = 50: +// Example with base grace period = 1 epoch, DAEpochForcedInclusion = 50: // - Epoch N ends at DA height 100 -// - Grace boundary = 100 + (1 * 50) = 150 -// - Transaction must be included by DA height 150 -// - If not included by height 151+, sequencer is malicious +// - Grace boundary = 100 + (1 * 50) = 150 (adjusted dynamically by chain fullness) +// - Transaction must be included while currentDAHeight <= graceBoundary +// - If currentDAHeight > graceBoundary without inclusion, sequencer is malicious ``` **Configuration Recommendations**: -- **Production (default)**: `ForcedInclusionGracePeriod = 1` - - Tolerates ~1 epoch of DA unavailability (e.g., 50 DA blocks) +- **Production (default)**: Base grace period of 1 epoch + - Automatically adjusted based on chain fullness - Balances censorship resistance with reliability -- **High Security / Reliable DA**: `ForcedInclusionGracePeriod = 0` - - Strict enforcement, no tolerance +- **High Security / Reliable DA**: Minimum grace period + - Stricter enforcement when block space is available - Requires 99.9%+ DA uptime - - Immediate detection of censorship -- **Unreliable DA**: `ForcedInclusionGracePeriod = 2+` - - Higher tolerance for DA outages - - Reduced censorship resistance (longer time to detect malicious behavior) + - Faster detection of censorship +- **Unreliable DA**: Network adjusts grace period dynamically + - Higher tolerance (up to 3x base period) when chain is congested + - Reduced censorship resistance temporarily to avoid false positives **Verification Logic**: 1. Forced inclusion transactions from epoch N are tracked with their epoch boundaries 2. Transactions not immediately included are added to pending queue 3. Each block, full nodes check if pending transactions are past their grace boundary -4. If `currentDAHeight > graceBoundary`, the sequencer is flagged as malicious -5. Transactions within the grace period remain in pending queue without error +4. If `currentDAHeight > graceBoundary`, the sequencer is flagged as malicious (strictly greater than) +5. Transactions within the grace period (where `currentDAHeight <= graceBoundary`) remain in pending queue without error **Benefits**: @@ -495,7 +495,7 @@ graceBoundary := epochEnd + (ForcedInclusionGracePeriod * DAEpochForcedInclusion **Examples and Edge Cases**: -Configuration: `DAEpochForcedInclusion = 50`, `ForcedInclusionGracePeriod = 1` +Configuration: `DAEpochForcedInclusion = 50`, Base grace period of 1 epoch (dynamically adjusted) _Example 1: Normal Inclusion (Within Same Epoch)_ @@ -528,14 +528,15 @@ _Example 3: Malicious Sequencer (Past Grace Boundary)_ - Result: ❌ Block rejected, sequencer flagged as malicious ``` -_Example 4: Strict Mode (Grace Period = 0)_ +_Example 4: Low Chain Activity (Minimum Grace Period)_ ``` -- ForcedInclusionGracePeriod = 0 +- Chain is mostly empty (<20% full) +- Grace period is at minimum (0.5x base period) - Forced tx submitted at height 75 (epoch 51-100) -- Sequencer must include by height 100 (epoch end) -- Block at height 101 without tx is rejected -- Result: Immediate censorship detection, requires high DA reliability +- Grace boundary ≈ 100 + (0.5 × 50) = 125 +- Stricter enforcement applied when chain is empty +- Result: Faster censorship detection when block space is available ``` _Example 5: Multiple Pending Transactions_ @@ -549,14 +550,15 @@ _Example 5: Multiple Pending Transactions_ - Result: Block rejected due to Tx A ``` -_Example 6: Extended Grace Period (Grace Period = 2)_ +_Example 6: High Chain Activity (Extended Grace Period)_ ``` -- ForcedInclusionGracePeriod = 2 +- Chain is highly congested (>80% full) +- Grace period is extended (up to 3x base period) - Forced tx submitted at height 75 (epoch 51-100) -- Grace boundary = 100 + (2 × 50) = 200 -- Sequencer has until DA height 200 to include tx -- Result: More tolerance but delayed censorship detection +- Grace boundary ≈ 100 + (3 × 50) = 250 +- Higher tolerance during congestion to avoid false positives +- Result: Better operational reliability when block space is scarce ``` #### Size Validation and Max Bytes Handling @@ -633,12 +635,6 @@ type Genesis struct { // Higher values reduce DA queries but increase latency // Lower values increase DA queries but improve responsiveness DAEpochForcedInclusion uint64 - // Number of additional epochs allowed for including forced inclusion transactions - // before marking the sequencer as malicious. Provides tolerance for temporary DA unavailability. - // Value of 0: Strict enforcement (no grace period) - requires 99.9% DA uptime - // Value of 1: Transactions from epoch N can be included through epoch N+1 (recommended) - // Value of 2+: Higher tolerance for unreliable DA environments - ForcedInclusionGracePeriod uint64 } type DAConfig struct { @@ -664,8 +660,7 @@ type NodeConfig struct { # genesis.json { "chain_id": "my-rollup", - "forced_inclusion_da_epoch": 10, # Scan 10 DA blocks at a time - "forced_inclusion_grace_period": 1 # Allow 1 epoch grace period (recommended for production) + "da_epoch_forced_inclusion": 10 # Scan 10 DA blocks at a time } # config.toml @@ -683,8 +678,7 @@ based_sequencer = false # Use traditional sequencer # genesis.json { "chain_id": "my-rollup", - "forced_inclusion_da_epoch": 5, # Scan 5 DA blocks at a time - "forced_inclusion_grace_period": 1 # Allow 1 epoch grace period (balances reliability and censorship detection) + "da_epoch_forced_inclusion": 5 # Scan 5 DA blocks at a time } # config.toml @@ -726,19 +720,19 @@ based_sequencer = true # Use based sequencer b. Build map of transactions in block c. Check if pending forced txs from previous epochs are included d. Add any new forced txs not yet included to pending queue - e. Calculate grace boundary for each pending tx: - graceBoundary = epochEnd + (ForcedInclusionGracePeriod × DAEpochForcedInclusion) + e. Calculate grace boundary for each pending tx (dynamically adjusted by chain fullness): + graceBoundary = epochEnd + (effectiveGracePeriod × DAEpochForcedInclusion) f. Check if any pending txs are past their grace boundary g. If txs past grace boundary are not included: reject block, flag malicious proposer h. If txs within grace period: keep in pending queue, allow block 3. Apply block if verification passes -**Grace Period Example** (with `ForcedInclusionGracePeriod = 1`, `DAEpochForcedInclusion = 50`): +**Grace Period Example** (with base grace period = 1 epoch, `DAEpochForcedInclusion = 50`): - Forced tx appears in epoch ending at DA height 100 - Grace boundary = 100 + (1 × 50) = 150 - Transaction can be included at any DA height from 101 to 150 -- At DA height 151+, if not included, sequencer is flagged as malicious +- When currentDAHeight > 150 without inclusion, sequencer is flagged as malicious ### Efficiency Considerations @@ -811,8 +805,8 @@ Accepted and Implemented 2. **DA Dependency**: Requires DA layer to support multiple namespaces 3. **Higher DA Costs**: Users pay DA posting fees for forced inclusion 4. **Additional Complexity**: New component (DA Retriever) and verification logic with grace period tracking -5. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` and `ForcedInclusionGracePeriod` in genesis (consensus parameters) -6. **Grace Period Trade-off**: Longer grace periods delay censorship detection but improve operational reliability +5. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` in genesis (consensus parameter) +6. **Grace Period Adjustment**: Grace period is dynamically adjusted based on block fullness to balance censorship detection with operational reliability ### Neutral @@ -820,7 +814,7 @@ Accepted and Implemented 2. **Privacy Model Unchanged**: Forced inclusion has same privacy as normal path 3. **Monitoring**: Operators should monitor forced inclusion namespace usage and grace period metrics 4. **Documentation**: Users need guidance on when to use forced inclusion and grace period implications -5. **Genesis Parameters**: `DAEpochForcedInclusion` and `ForcedInclusionGracePeriod` are consensus parameters fixed at genesis +5. **Genesis Parameters**: `DAEpochForcedInclusion` is a consensus parameter fixed at genesis; grace period adjustment is dynamic ## References From d2f09439157657fc46c1b388055beb69352d2d84 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 11 Dec 2025 13:25:47 +0100 Subject: [PATCH 5/7] wording --- block/internal/syncing/syncer.go | 3 +-- docs/adr/adr-019-forced-inclusion-mechanism.md | 4 ++-- test/docker-e2e/resiliency_test.go | 3 ++- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 6c70e54b36..eb54e466a5 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -815,7 +815,6 @@ func (s *Syncer) getEffectiveGracePeriod() uint64 { // Note: Due to block size constraints (MaxBytes), sequencers may defer forced inclusion transactions // to future blocks (smoothing). This is legitimate behavior within an epoch. // However, ALL forced inclusion txs from an epoch MUST be included before the next epoch begins or grace boundary (whichever comes later). -// verifyForcedInclusionTxs checks if all forced inclusion transactions from DA are present in the block func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types.Data) error { if s.fiRetriever == nil { return nil @@ -881,7 +880,7 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState types.State, data *types. // Check if we've moved past any epoch boundaries with pending txs // Grace period: Allow forced inclusion txs from epoch N to be included in epoch N+1, N+2, etc. - // Only flag as malicious if past grace boundary to prevent false positives during DA unavailability. + // Only flag as malicious if past grace boundary to prevent false positives during chain congestion. var maliciousTxs, remainingPending []pendingForcedInclusionTx var txsInGracePeriod int for _, pending := range stillPending { diff --git a/docs/adr/adr-019-forced-inclusion-mechanism.md b/docs/adr/adr-019-forced-inclusion-mechanism.md index f7f8bdcff2..d589263f2c 100644 --- a/docs/adr/adr-019-forced-inclusion-mechanism.md +++ b/docs/adr/adr-019-forced-inclusion-mechanism.md @@ -348,7 +348,7 @@ func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error } // 5. Check for malicious behavior: pending txs past their grace boundary - // Grace period provides tolerance for temporary DA unavailability + // Grace period provides tolerance for temporary chain congestion var maliciousTxs, remainingPending []pendingForcedInclusionTx for _, pending := range stillPending { // Calculate grace boundary: epoch end + (effective grace periods × epoch size) @@ -797,7 +797,7 @@ Accepted and Implemented 8. **Robust Size Handling**: Two-tier size validation prevents DoS and DA rejections 9. **Transaction Preservation**: All valid transactions are preserved in queues, nothing is lost 10. **Strict MaxBytes Compliance**: Batches never exceed limits, preventing DA submission failures -11. **DA Fault Tolerance**: Grace period prevents false positives during temporary DA unavailability +11. **DA Fault Tolerance**: Grace period prevents false positives during temporary chain congestion ### Negative diff --git a/test/docker-e2e/resiliency_test.go b/test/docker-e2e/resiliency_test.go index 9f35b81c93..873f33a6db 100644 --- a/test/docker-e2e/resiliency_test.go +++ b/test/docker-e2e/resiliency_test.go @@ -5,12 +5,13 @@ package docker_e2e import ( "context" "fmt" - tastoratypes "github.com/celestiaorg/tastora/framework/types" "io" "strings" "testing" "time" + tastoratypes "github.com/celestiaorg/tastora/framework/types" + da "github.com/celestiaorg/tastora/framework/docker/dataavailability" "github.com/celestiaorg/tastora/framework/docker/evstack" "github.com/docker/docker/api/types/container" From 2091e13b02325bc2bea664528a9d970f85048555 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 11 Dec 2025 13:41:55 +0100 Subject: [PATCH 6/7] simplify --- .../syncer_dynamic_grace_period_test.go | 328 ------------------ .../syncing/syncer_forced_inclusion_test.go | 318 +++++++++++++++++ 2 files changed, 318 insertions(+), 328 deletions(-) delete mode 100644 block/internal/syncing/syncer_dynamic_grace_period_test.go diff --git a/block/internal/syncing/syncer_dynamic_grace_period_test.go b/block/internal/syncing/syncer_dynamic_grace_period_test.go deleted file mode 100644 index c2e9e43120..0000000000 --- a/block/internal/syncing/syncer_dynamic_grace_period_test.go +++ /dev/null @@ -1,328 +0,0 @@ -package syncing - -import ( - "sync/atomic" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/types" -) - -func TestCalculateBlockFullness_HalfFull(t *testing.T) { - s := &Syncer{} - - // Create 5000 transactions of 100 bytes each = 500KB - txs := make([]types.Tx, 5000) - for i := range txs { - txs[i] = make([]byte, 100) - } - - data := &types.Data{ - Txs: txs, - } - - fullness := s.calculateBlockFullness(data) - // Size fullness: 500000/2097152 ≈ 0.238 - assert.InDelta(t, 0.238, fullness, 0.05) -} - -func TestCalculateBlockFullness_Full(t *testing.T) { - s := &Syncer{} - - // Create 10000 transactions of 210 bytes each = ~2MB - txs := make([]types.Tx, 10000) - for i := range txs { - txs[i] = make([]byte, 210) - } - - data := &types.Data{ - Txs: txs, - } - - fullness := s.calculateBlockFullness(data) - // Both metrics at or near 1.0 - assert.Greater(t, fullness, 0.95) -} - -func TestCalculateBlockFullness_VerySmall(t *testing.T) { - s := &Syncer{} - - data := &types.Data{ - Txs: []types.Tx{[]byte("tx1"), []byte("tx2")}, - } - - fullness := s.calculateBlockFullness(data) - // Very small relative to heuristic limits - assert.Less(t, fullness, 0.001) -} - -func TestUpdateDynamicGracePeriod_NoChangeWhenBelowThreshold(t *testing.T) { - initialMultiplier := 1.0 - initialEMA := 0.1 // Well below threshold - - config := forcedInclusionGracePeriodConfig{ - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.01, // Low adjustment rate - } - - s := &Syncer{ - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - gracePeriodConfig: config, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Update with low fullness - multiplier should stay at 1.0 initially - s.updateDynamicGracePeriod(0.2) - - // With low adjustment rate and starting EMA below threshold, - // multiplier should not change significantly on first call - newMultiplier := *s.gracePeriodMultiplier.Load() - assert.InDelta(t, 1.0, newMultiplier, 0.05) -} - -func TestUpdateDynamicGracePeriod_IncreaseOnHighFullness(t *testing.T) { - initialMultiplier := 1.0 - initialEMA := 0.5 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.1, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Update multiple times with very high fullness to build up the effect - for i := 0; i < 20; i++ { - s.updateDynamicGracePeriod(0.95) - } - - // EMA should increase - newEMA := *s.blockFullnessEMA.Load() - assert.Greater(t, newEMA, initialEMA) - - // Multiplier should increase because EMA is now above threshold - newMultiplier := *s.gracePeriodMultiplier.Load() - assert.Greater(t, newMultiplier, initialMultiplier) -} - -func TestUpdateDynamicGracePeriod_DecreaseOnLowFullness(t *testing.T) { - initialMultiplier := 2.0 - initialEMA := 0.9 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.1, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Update multiple times with low fullness to build up the effect - for i := 0; i < 20; i++ { - s.updateDynamicGracePeriod(0.2) - } - - // EMA should decrease significantly - newEMA := *s.blockFullnessEMA.Load() - assert.Less(t, newEMA, initialEMA) - - // Multiplier should decrease - newMultiplier := *s.gracePeriodMultiplier.Load() - assert.Less(t, newMultiplier, initialMultiplier) -} - -func TestUpdateDynamicGracePeriod_ClampToMin(t *testing.T) { - initialMultiplier := 0.6 - initialEMA := 0.1 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.5, // High rate to force clamping - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Update many times with very low fullness - should eventually clamp to min - for i := 0; i < 50; i++ { - s.updateDynamicGracePeriod(0.0) - } - - newMultiplier := *s.gracePeriodMultiplier.Load() - assert.Equal(t, 0.5, newMultiplier) -} - -func TestUpdateDynamicGracePeriod_ClampToMax(t *testing.T) { - initialMultiplier := 2.5 - initialEMA := 0.9 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.5, // High rate to force clamping - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Update many times with very high fullness - should eventually clamp to max - for i := 0; i < 50; i++ { - s.updateDynamicGracePeriod(1.0) - } - - newMultiplier := *s.gracePeriodMultiplier.Load() - assert.Equal(t, 3.0, newMultiplier) -} - -func TestGetEffectiveGracePeriod_WithMultiplier(t *testing.T) { - multiplier := 2.5 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - basePeriod: 2, - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.05, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - } - s.gracePeriodMultiplier.Store(&multiplier) - - effective := s.getEffectiveGracePeriod() - // 2 * 2.5 = 5 - assert.Equal(t, uint64(5), effective) -} - -func TestGetEffectiveGracePeriod_RoundingUp(t *testing.T) { - multiplier := 2.6 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - basePeriod: 2, - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.05, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - } - s.gracePeriodMultiplier.Store(&multiplier) - - effective := s.getEffectiveGracePeriod() - // 2 * 2.6 = 5.2, rounds to 5 - assert.Equal(t, uint64(5), effective) -} - -func TestGetEffectiveGracePeriod_EnsuresMinimum(t *testing.T) { - multiplier := 0.3 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - basePeriod: 4, - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.05, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - } - s.gracePeriodMultiplier.Store(&multiplier) - - effective := s.getEffectiveGracePeriod() - // 4 * 0.3 = 1.2, but minimum is 4 * 0.5 = 2 - assert.Equal(t, uint64(2), effective) -} - -func TestDynamicGracePeriod_Integration_HighCongestion(t *testing.T) { - initialMultiplier := 1.0 - initialEMA := 0.3 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - basePeriod: 2, - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.1, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Simulate processing many blocks with very high fullness (above threshold) - for i := 0; i < 50; i++ { - s.updateDynamicGracePeriod(0.95) - } - - // Multiplier should have increased due to sustained high fullness - finalMultiplier := *s.gracePeriodMultiplier.Load() - assert.Greater(t, finalMultiplier, initialMultiplier, "multiplier should increase with sustained congestion") - - // Effective grace period should be higher than base - effectiveGracePeriod := s.getEffectiveGracePeriod() - assert.Greater(t, effectiveGracePeriod, s.gracePeriodConfig.basePeriod, "effective grace period should be higher than base") -} - -func TestDynamicGracePeriod_Integration_LowCongestion(t *testing.T) { - initialMultiplier := 2.0 - initialEMA := 0.85 - - s := &Syncer{ - gracePeriodConfig: forcedInclusionGracePeriodConfig{ - basePeriod: 2, - dynamicMinMultiplier: 0.5, - dynamicMaxMultiplier: 3.0, - dynamicFullnessThreshold: 0.8, - dynamicAdjustmentRate: 0.1, - }, - gracePeriodMultiplier: &atomic.Pointer[float64]{}, - blockFullnessEMA: &atomic.Pointer[float64]{}, - metrics: common.NopMetrics(), - } - s.gracePeriodMultiplier.Store(&initialMultiplier) - s.blockFullnessEMA.Store(&initialEMA) - - // Simulate processing many blocks with very low fullness (below threshold) - for i := 0; i < 50; i++ { - s.updateDynamicGracePeriod(0.1) - } - - // Multiplier should have decreased - finalMultiplier := *s.gracePeriodMultiplier.Load() - assert.Less(t, finalMultiplier, initialMultiplier, "multiplier should decrease with low congestion") -} diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 6ad053f4d4..77909fc2f0 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -3,6 +3,7 @@ package syncing import ( "bytes" "context" + "sync/atomic" "testing" "time" @@ -23,6 +24,323 @@ import ( "github.com/evstack/ev-node/types" ) +func TestCalculateBlockFullness_HalfFull(t *testing.T) { + s := &Syncer{} + + // Create 5000 transactions of 100 bytes each = 500KB + txs := make([]types.Tx, 5000) + for i := range txs { + txs[i] = make([]byte, 100) + } + + data := &types.Data{ + Txs: txs, + } + + fullness := s.calculateBlockFullness(data) + // Size fullness: 500000/2097152 ≈ 0.238 + require.InDelta(t, 0.238, fullness, 0.05) +} + +func TestCalculateBlockFullness_Full(t *testing.T) { + s := &Syncer{} + + // Create 10000 transactions of 210 bytes each = ~2MB + txs := make([]types.Tx, 10000) + for i := range txs { + txs[i] = make([]byte, 210) + } + + data := &types.Data{ + Txs: txs, + } + + fullness := s.calculateBlockFullness(data) + // Both metrics at or near 1.0 + require.Greater(t, fullness, 0.95) +} + +func TestCalculateBlockFullness_VerySmall(t *testing.T) { + s := &Syncer{} + + data := &types.Data{ + Txs: []types.Tx{[]byte("tx1"), []byte("tx2")}, + } + + fullness := s.calculateBlockFullness(data) + // Very small relative to heuristic limits + require.Less(t, fullness, 0.001) +} + +func TestUpdateDynamicGracePeriod_NoChangeWhenBelowThreshold(t *testing.T) { + initialMultiplier := 1.0 + initialEMA := 0.1 // Well below threshold + + config := forcedInclusionGracePeriodConfig{ + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.01, // Low adjustment rate + } + + s := &Syncer{ + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + gracePeriodConfig: config, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Update with low fullness - multiplier should stay at 1.0 initially + s.updateDynamicGracePeriod(0.2) + + // With low adjustment rate and starting EMA below threshold, + // multiplier should not change significantly on first call + newMultiplier := *s.gracePeriodMultiplier.Load() + require.InDelta(t, 1.0, newMultiplier, 0.05) +} + +func TestUpdateDynamicGracePeriod_IncreaseOnHighFullness(t *testing.T) { + initialMultiplier := 1.0 + initialEMA := 0.5 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.1, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Update multiple times with very high fullness to build up the effect + for i := 0; i < 20; i++ { + s.updateDynamicGracePeriod(0.95) + } + + // EMA should increase + newEMA := *s.blockFullnessEMA.Load() + require.Greater(t, newEMA, initialEMA) + + // Multiplier should increase because EMA is now above threshold + newMultiplier := *s.gracePeriodMultiplier.Load() + require.Greater(t, newMultiplier, initialMultiplier) +} + +func TestUpdateDynamicGracePeriod_DecreaseOnLowFullness(t *testing.T) { + initialMultiplier := 2.0 + initialEMA := 0.9 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.1, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Update multiple times with low fullness to build up the effect + for i := 0; i < 20; i++ { + s.updateDynamicGracePeriod(0.2) + } + + // EMA should decrease significantly + newEMA := *s.blockFullnessEMA.Load() + require.Less(t, newEMA, initialEMA) + + // Multiplier should decrease + newMultiplier := *s.gracePeriodMultiplier.Load() + require.Less(t, newMultiplier, initialMultiplier) +} + +func TestUpdateDynamicGracePeriod_ClampToMin(t *testing.T) { + initialMultiplier := 0.6 + initialEMA := 0.1 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.5, // High rate to force clamping + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Update many times with very low fullness - should eventually clamp to min + for i := 0; i < 50; i++ { + s.updateDynamicGracePeriod(0.0) + } + + newMultiplier := *s.gracePeriodMultiplier.Load() + require.Equal(t, 0.5, newMultiplier) +} + +func TestUpdateDynamicGracePeriod_ClampToMax(t *testing.T) { + initialMultiplier := 2.5 + initialEMA := 0.9 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.5, // High rate to force clamping + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Update many times with very high fullness - should eventually clamp to max + for i := 0; i < 50; i++ { + s.updateDynamicGracePeriod(1.0) + } + + newMultiplier := *s.gracePeriodMultiplier.Load() + require.Equal(t, 3.0, newMultiplier) +} + +func TestGetEffectiveGracePeriod_WithMultiplier(t *testing.T) { + multiplier := 2.5 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + basePeriod: 2, + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.05, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + } + s.gracePeriodMultiplier.Store(&multiplier) + + effective := s.getEffectiveGracePeriod() + // 2 * 2.5 = 5 + require.Equal(t, uint64(5), effective) +} + +func TestGetEffectiveGracePeriod_RoundingUp(t *testing.T) { + multiplier := 2.6 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + basePeriod: 2, + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.05, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + } + s.gracePeriodMultiplier.Store(&multiplier) + + effective := s.getEffectiveGracePeriod() + // 2 * 2.6 = 5.2, rounds to 5 + require.Equal(t, uint64(5), effective) +} + +func TestGetEffectiveGracePeriod_EnsuresMinimum(t *testing.T) { + multiplier := 0.3 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + basePeriod: 4, + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.05, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + } + s.gracePeriodMultiplier.Store(&multiplier) + + effective := s.getEffectiveGracePeriod() + // 4 * 0.3 = 1.2, but minimum is 4 * 0.5 = 2 + require.Equal(t, uint64(2), effective) +} + +func TestDynamicGracePeriod_Integration_HighCongestion(t *testing.T) { + initialMultiplier := 1.0 + initialEMA := 0.3 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + basePeriod: 2, + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.1, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Simulate processing many blocks with very high fullness (above threshold) + for i := 0; i < 50; i++ { + s.updateDynamicGracePeriod(0.95) + } + + // Multiplier should have increased due to sustained high fullness + finalMultiplier := *s.gracePeriodMultiplier.Load() + require.Greater(t, finalMultiplier, initialMultiplier, "multiplier should increase with sustained congestion") + + // Effective grace period should be higher than base + effectiveGracePeriod := s.getEffectiveGracePeriod() + require.Greater(t, effectiveGracePeriod, s.gracePeriodConfig.basePeriod, "effective grace period should be higher than base") +} + +func TestDynamicGracePeriod_Integration_LowCongestion(t *testing.T) { + initialMultiplier := 2.0 + initialEMA := 0.85 + + s := &Syncer{ + gracePeriodConfig: forcedInclusionGracePeriodConfig{ + basePeriod: 2, + dynamicMinMultiplier: 0.5, + dynamicMaxMultiplier: 3.0, + dynamicFullnessThreshold: 0.8, + dynamicAdjustmentRate: 0.1, + }, + gracePeriodMultiplier: &atomic.Pointer[float64]{}, + blockFullnessEMA: &atomic.Pointer[float64]{}, + metrics: common.NopMetrics(), + } + s.gracePeriodMultiplier.Store(&initialMultiplier) + s.blockFullnessEMA.Store(&initialEMA) + + // Simulate processing many blocks with very low fullness (below threshold) + for i := 0; i < 50; i++ { + s.updateDynamicGracePeriod(0.1) + } + + // Multiplier should have decreased + finalMultiplier := *s.gracePeriodMultiplier.Load() + require.Less(t, finalMultiplier, initialMultiplier, "multiplier should decrease with low congestion") +} + func TestVerifyForcedInclusionTxs_AllTransactionsIncluded(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) From c45fff6201c87e98a83bce4a682f82c0ae426383 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Thu, 11 Dec 2025 14:07:47 +0100 Subject: [PATCH 7/7] simplify --- block/internal/syncing/syncer.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index eb54e466a5..08e177cd6f 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -802,13 +802,10 @@ func (s *Syncer) updateDynamicGracePeriod(blockFullness float64) { // getEffectiveGracePeriod returns the current effective grace period considering dynamic adjustment. func (s *Syncer) getEffectiveGracePeriod() uint64 { multiplier := *s.gracePeriodMultiplier.Load() - effectivePeriod := float64(s.gracePeriodConfig.basePeriod) * multiplier + effectivePeriod := math.Round(float64(s.gracePeriodConfig.basePeriod) * multiplier) + minPeriod := float64(s.gracePeriodConfig.basePeriod) * s.gracePeriodConfig.dynamicMinMultiplier - // Round to nearest integer, but ensure at least the minimum - rounded := uint64(effectivePeriod + 0.5) - minPeriod := uint64(float64(s.gracePeriodConfig.basePeriod) * s.gracePeriodConfig.dynamicMinMultiplier) - - return max(rounded, minPeriod) + return uint64(max(effectivePeriod, minPeriod)) } // verifyForcedInclusionTxs verifies that forced inclusion transactions from DA are properly handled.