Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions node/core/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,17 @@ func (e *Executor) CalculateCapWithProposalBlock(currentBlockBytes []byte, curre
return false, err
}

// MPT fork: force batch points on the 1st and 2nd post-fork blocks, so the 1st post-fork block
// becomes a single-block batch: [H1, H2).
force, err := e.forceBatchPointForMPTFork(height, block.Timestamp, block.StateRoot, block.Hash)
if err != nil {
return false, err
}
if force {
e.logger.Info("MPT fork: force batch point", "height", height, "timestamp", block.Timestamp)
return true, nil
}

var exceeded bool
if e.isBatchUpgraded(block.Timestamp) {
exceeded, err = e.batchingCache.batchData.WillExceedCompressedSizeLimit(e.batchingCache.currentBlockContext, e.batchingCache.currentTxsPayload)
Expand All @@ -187,6 +198,79 @@ func (e *Executor) CalculateCapWithProposalBlock(currentBlockBytes []byte, curre
return exceeded, err
}

// forceBatchPointForMPTFork forces batch points at the 1st and 2nd block after the MPT fork time.
//
// Design goals:
// - Minimal change: only affects batch-point decision logic.
// - Stability: CalculateCapWithProposalBlock can be called multiple times at the same height; return must be consistent.
// - Performance: after handling (or skipping beyond) the fork boundary, no more HeaderByNumber calls are made.
func (e *Executor) forceBatchPointForMPTFork(height uint64, blockTime uint64, stateRoot common.Hash, blockHash common.Hash) (bool, error) {
// If we already decided to force at this height, keep returning true without extra RPCs.
if e.mptForkForceHeight == height && height != 0 {
return true, nil
}
// If fork boundary is already handled and this isn't a forced height, fast exit.
if e.mptForkStage >= 2 {
return false, nil
}

// Ensure we have fork time cached (0 means disabled).
if e.mptForkTime == 0 {
e.mptForkTime = e.l2Client.MPTForkTime()
}
forkTime := e.mptForkTime
if forkTime == 0 || blockTime < forkTime {
return false, nil
}
if height == 0 {
return false, nil
}

// Check parent block time to detect the 1st post-fork block (H1).
parent, err := e.l2Client.HeaderByNumber(context.Background(), big.NewInt(int64(height-1)))
if err != nil {
return false, err
}
if parent.Time < forkTime {
// Log H1 (the 1st post-fork block) state root
// This stateRoot is intended to be used as the Rollup contract "genesis state root"
// when we reset/re-initialize the genesis state root during the MPT upgrade.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
// when we reset/re-initialize the genesis state root during the MPT upgrade.
// when we reset/re-initialize the state root during the MPT upgrade.

its not genesis state root

e.logger.Info(
"MPT_FORK_H1_GENESIS_STATE_ROOT",
"height", height,
"timestamp", blockTime,
"forkTime", forkTime,
"stateRoot", stateRoot.Hex(),
"blockHash", blockHash.Hex(),
)
e.mptForkStage = 1
e.mptForkForceHeight = height
return true, nil
}

// If parent is already post-fork, we may be at the 2nd post-fork block (H2) or later.
if height < 2 {
// We cannot be H2; mark done to avoid future calls.
e.mptForkStage = 2
return false, nil
}

grandParent, err := e.l2Client.HeaderByNumber(context.Background(), big.NewInt(int64(height-2)))
if err != nil {
return false, err
}
if grandParent.Time < forkTime {
// This is H2 (2nd post-fork block).
e.mptForkStage = 2
e.mptForkForceHeight = height
return true, nil
}

// Beyond H2: nothing to do (can't retroactively fix). Mark done for performance.
e.mptForkStage = 2
return false, nil
}

func (e *Executor) AppendBlsData(height int64, batchHash []byte, data l2node.BlsData) error {
if len(batchHash) != 32 {
return fmt.Errorf("wrong batchHash length. expected: 32, actual: %d", len(batchHash))
Expand Down
8 changes: 8 additions & 0 deletions node/core/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,13 @@ type Executor struct {
rollupABI *abi.ABI
batchingCache *BatchingCache

// MPT fork handling: force batch points at the 1st and 2nd block after fork.
// This state machine exists to avoid repeated HeaderByNumber calls after the fork is handled,
// while keeping results stable if CalculateCapWithProposalBlock is called multiple times at the same height.
mptForkTime uint64 // cached from geth eth_config.morph.mptForkTime (0 means disabled/unknown)
mptForkStage uint8 // 0: not handled, 1: forced H1, 2: done (forced H2 or skipped beyond H2)
mptForkForceHeight uint64 // if equals current height, must return true (stability across multiple calls)

logger tmlog.Logger
metrics *Metrics
}
Expand Down Expand Up @@ -148,6 +155,7 @@ func NewExecutor(newSyncFunc NewSyncerFunc, config *Config, tmPubKey crypto.PubK
batchingCache: NewBatchingCache(),
UpgradeBatchTime: config.UpgradeBatchTime,
blsKeyCheckForkHeight: config.BlsKeyCheckForkHeight,
mptForkTime: l2Client.MPTForkTime(),
logger: logger,
metrics: PrometheusMetrics("morphnode"),
}
Expand Down
7 changes: 7 additions & 0 deletions node/types/retryable_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,13 @@ type RetryableClient struct {
logger tmlog.Logger
}

// MPTForkTime returns the configured MPT fork/switch timestamp fetched from geth (eth_config).
// Note: this is a local value stored in the client; it does not perform any RPC.
func (rc *RetryableClient) MPTForkTime() uint64 {
return rc.switchTime
}


// NewRetryableClient creates a new retryable client with the given switch time.
// Will retry calling the api, if the connection is refused.
//
Expand Down
Loading