diff --git a/CHANGELOG.md b/CHANGELOG.md index 2757ec6b0..0533ad911 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,126 @@ # Changelog +## v0.4.3 + +This is a minor release and upgrading is optional. + +### User Facing Changes + +* Support choose economic DA type automatically for op-batcher. #209 +* Add 2 configs for el-sync optimization and enable fastnode mode again. #201 + +### What's Changed +* feature: add haber fork config in deployment script by @redhdx in https://github.com/bnb-chain/opbnb/pull/202 +* feat(op-node): support multi clients to fetch blobs by @bnoieh in https://github.com/bnb-chain/opbnb/pull/199 +* feat: fastnode support by trigger el-sync when needed by @krish-nr in https://github.com/bnb-chain/opbnb/pull/201 +* fix(blob-client): don't append L1ArchiveBlobRpcAddr flag to config if not set by @bnoieh in https://github.com/bnb-chain/opbnb/pull/207 +* fix(devnet): fork offset should be 0x by @welkin22 in https://github.com/bnb-chain/opbnb/pull/210 +* fix(devnet): batcher uses its address to submit transactions by @welkin22 in https://github.com/bnb-chain/opbnb/pull/211 +* feat: op-batcher auto switch to economic DA type by @bnoieh in https://github.com/bnb-chain/opbnb/pull/209 + +### Docker Images + +- ghcr.io/bnb-chain/op-node:v0.4.3 +- ghcr.io/bnb-chain/op-batcher:v0.4.3 +- ghcr.io/bnb-chain/op-proposer:v0.4.3 + +**Full Changelog**: https://github.com/bnb-chain/opbnb/compare/v0.4.2...v0.4.3 + +## v0.4.2 + +This is the mainnet hardfork release version. + +Four hard forks are scheduled to launch on the opBNB Mainnet: +Shanghai/Canyon Time: 2024-06-20 08:00:00 AM UTC +Delta Time: 2024-06-20 08:10:00 AM UTC +Cancun/Ecotone Time: 2024-06-20 08:20:00 AM UTC +Haber Time: 2024-06-20 08:30:00 AM UTC + +All mainnet `op-node` have to be upgraded to this version before 2024-06-20 08:00:00 AM UTC. +The `op-geth` also have to be upgraded to v0.4.2 accordingly, check [this](https://github.com/bnb-chain/op-geth/releases/tag/v0.4.2) for more details. + +### User Facing Changes + +If you are upgrading from v0.3.x to this version, please note that there are some configuration changes. +- Removed `--l1.rpckind=bsc_fullnode` +- Removed `--l2.engine-sync` +- Removed `--l2.skip-sync-start-check` +- To start engine-sync, use `--syncmode=execution-layer` (default value is `consensus-layer`) +- Added `--l1.max-concurrency=20` to control the rate of requests to L1 endpoints. + +After the Cancun/Ecotone hard fork, DA data will be submitted to the BSC network in blob format. Regular BSC nodes only retain blob data from the past 18 days. If you are syncing data from the genesis block or are more than 18 days behind the latest block, you will need to ensure that your configured L1 endpoint supports persisting blob data for a longer period of time. We will ensure that the snapshot provided by this [snapshot repository](https://github.com/bnb-chain/opbnb-snapshot) is within the 18-day range, so you can also choose to use the snapshot to avoid relying on older blob data to start your new node. + +### What's Changed +* feature: update deployment script for opBNB by @redhdx in https://github.com/bnb-chain/opbnb/pull/196 +* fix: fix CI after 4844 merge by @welkin22 in https://github.com/bnb-chain/opbnb/pull/198 +* op-node: set finalityDelay to 15 to speed up finality update by @bnoieh in https://github.com/bnb-chain/opbnb/pull/200 +* config: Mainnet canyon/delta/ecotone fork time by @welkin22 in https://github.com/bnb-chain/opbnb/pull/203 + +### Docker Images +- ghcr.io/bnb-chain/op-node:v0.4.2 +- ghcr.io/bnb-chain/op-batcher:v0.4.2 +- ghcr.io/bnb-chain/op-proposer:v0.4.2 + +**Full Changelog**: https://github.com/bnb-chain/opbnb/compare/v0.4.1...v0.4.2 + +## v0.4.1 + +This is a minor release and upgrading is optional. + +### User Facing Changes + +- Add flag `--txmgr.blob-gas-price-limit` for op-batcher to limit the maximum gas price of submitted tx + +### Partial Changelog + +* fix: fix devnet after 1.7.2 upstream merge by @welkin22 in https://github.com/bnb-chain/opbnb/pull/194 +* op-batcher: optimize tx submitting and add metrics by @bnoieh in https://github.com/bnb-chain/opbnb/pull/195 + +### Docker Images + +- ghcr.io/bnb-chain/op-batcher:v0.4.1 + +**Full Changelog**: https://github.com/bnb-chain/opbnb/compare/v0.4.0...v0.4.1 + +## v0.4.0 + +This release includes code merging from the upstream version v1.7.2 to transition Testnet's DA data from calldata to blob format. + +Four hard forks are scheduled to launch on the opBNB Testnet: +Snow Time: May-15-2024 06:00 AM +UTC +Shanghai/Canyon Time: May-15-2024 06:10 AM +UTC +Delta Time: May-15-2024 06:20 AM +UTC +Cancun/Ecotone Time: May-15-2024 06:30 AM +UTC + +### User Facing Changes +Nodes on the **Testnet** need to be upgraded to this version before the first hard fork time. +**Note: This is a version prepared for Testnet, Mainnet nodes do not need to upgrade to this version.** + +**Note: After the Cancun/Ecotone hard fork, DA data will be submitted to the BSC network in blob format. Regular BSC nodes only retain blob data from the past 18 days. If you are syncing data from the genesis block or are more than 18 days behind the latest block, you will need to ensure that your configured L1 endpoint supports persisting blob data for a longer period of time. We will ensure that the Testnet snapshot provided by this [snapshot repository](https://github.com/bnb-chain/opbnb-snapshot) is within the 18-day range, so you can also choose to use the snapshot to avoid relying on older blob data to start your new node.** + +Changes in op-node configuration: +- Removed `--l1.rpckind=bsc_fullnode` +- Removed `--l2.engine-sync` +- Removed `--l2.skip-sync-start-check` +- To start engine-sync, use `--syncmode=execution-layer` (default value is `consensus-layer`) +- Added `--l1.max-concurrency=20` to control the rate of requests to L1 endpoints. + +### What's Changed +* feature(op-node): update opBNB qanet info by @redhdx in https://github.com/bnb-chain/opbnb/pull/187 +* feat: update qanet config by @redhdx in https://github.com/bnb-chain/opbnb/pull/188 +* feature(op-node): add opBNB qanet hard fork config by @redhdx in https://github.com/bnb-chain/opbnb/pull/189 +* Fix blob parsing problem by @welkin22 in https://github.com/bnb-chain/opbnb/pull/190 +* chore: fork config for 4844-2 qanet by @welkin22 in https://github.com/bnb-chain/opbnb/pull/191 +* Merge upstream v1.7.2 by @bnoieh in https://github.com/bnb-chain/opbnb/pull/184 +* config: Testnet 4844 fork time by @welkin22 in https://github.com/bnb-chain/opbnb/pull/192 + +### Docker Images +ghcr.io/bnb-chain/op-node:v0.4.0 +ghcr.io/bnb-chain/op-batcher:v0.4.0 +ghcr.io/bnb-chain/op-proposer:v0.4.0 + +**Full Changelog**: https://github.com/bnb-chain/opbnb/compare/v0.3.3...v0.4.0 + ## v0.3.3 This is a minor release and upgrading is optional. diff --git a/op-node/rollup/derive/engine_controller.go b/op-node/rollup/derive/engine_controller.go index 7184976e5..f53ad7065 100644 --- a/op-node/rollup/derive/engine_controller.go +++ b/op-node/rollup/derive/engine_controller.go @@ -4,17 +4,19 @@ import ( "context" "errors" "fmt" + "strings" "time" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/async" "github.com/ethereum-optimism/optimism/op-node/rollup/conductor" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" ) type syncStatusEnum int @@ -32,7 +34,14 @@ const ( syncStatusFinishedEL // EL sync is done & we should be performing consolidation ) -var ErrNoFCUNeeded = errors.New("no FCU call was needed") +var ( + ErrNoFCUNeeded = errors.New("no FCU call was needed") + ErrELSyncTriggerUnexpected = errors.New("forced head needed for startup") + + maxFCURetryAttempts = 5 + fcuRetryDelay = 5 * time.Second + needSyncWithEngine = false +) var _ EngineControl = (*EngineController)(nil) var _ LocalEngineControl = (*EngineController)(nil) @@ -326,6 +335,10 @@ func (e *EngineController) checkNewPayloadStatus(status eth.ExecutePayloadStatus } // Allow SYNCING and ACCEPTED if engine EL sync is enabled return status == eth.ExecutionValid || status == eth.ExecutionSyncing || status == eth.ExecutionAccepted + } else if e.syncMode == sync.CLSync { + if status == eth.ExecutionInconsistent { + return true + } } return status == eth.ExecutionValid } @@ -343,6 +356,16 @@ func (e *EngineController) checkForkchoiceUpdatedStatus(status eth.ExecutePayloa return status == eth.ExecutionValid } +// checkELSyncTriggered checks returned err of engine_newPayloadV1 +func (e *EngineController) checkELSyncTriggered(status eth.ExecutePayloadStatus, err error) bool { + if err == nil { + return false + } else if strings.Contains(err.Error(), ErrELSyncTriggerUnexpected.Error()) { + return e.syncMode != sync.ELSync && status == eth.ExecutionSyncing + } + return false +} + // checkUpdateUnsafeHead checks if we can update current unsafeHead for op-node func (e *EngineController) checkUpdateUnsafeHead(status eth.ExecutePayloadStatus) bool { if e.syncMode == sync.ELSync { @@ -410,25 +433,25 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et // Insert the payload & then call FCU status, err := e.engine.NewPayload(ctx, envelope.ExecutionPayload, envelope.ParentBeaconBlockRoot) if err != nil { - return NewTemporaryError(fmt.Errorf("failed to update insert payload: %w", err)) + if strings.Contains(err.Error(), ErrELSyncTriggerUnexpected.Error()) { + log.Info("el sync triggered as unexpected") + } else { + return NewTemporaryError(fmt.Errorf("failed to update insert payload: %w", err)) + } } + var ( + needResetSafeHead bool + needResetFinalizedHead bool + ) + //process inconsistent state - if status.Status == eth.ExecutionInconsistent { + if status.Status == eth.ExecutionInconsistent || e.checkELSyncTriggered(status.Status, err) { currentL2Info, err := e.getCurrentL2Info(ctx) if err != nil { return NewTemporaryError(fmt.Errorf("failed to process inconsistent state: %w", err)) } else { - log.Info("engine has inconsistent state", "unsafe", currentL2Info.Unsafe.Number, "safe", currentL2Info.Safe.Number, "final", currentL2Info.Finalized.Number) - e.SetUnsafeHead(currentL2Info.Unsafe) - if currentL2Info.Safe.Number > currentL2Info.Unsafe.Number { - log.Info("current safe is higher than unsafe block, reset it", "set safe after", currentL2Info.Unsafe.Number, "set safe before", e.safeHead.Number) - e.SetSafeHead(currentL2Info.Unsafe) - } - if currentL2Info.Finalized.Number > currentL2Info.Unsafe.Number { - log.Info("current finalized is higher than unsafe block, reset it", "set Finalized after", currentL2Info.Unsafe.Number, "set Finalized before", e.safeHead.Number) - e.SetFinalizedHead(currentL2Info.Unsafe) - } + needResetSafeHead, needResetFinalizedHead = e.resetSafeAndFinalizedHead(currentL2Info) } fcuReq := eth.ForkchoiceState{ @@ -437,13 +460,9 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et FinalizedBlockHash: e.finalizedHead.Hash, } - fcuRes, err := e.engine.ForkchoiceUpdate(ctx, &fcuReq, nil) - if fcuRes.PayloadStatus.Status == eth.ExecutionValid { - log.Info("engine processed data successfully") - e.needFCUCall = false - return nil - } else { - return NewTemporaryError(fmt.Errorf("engine failed to process inconsistent data: %w", err)) + needSyncWithEngine, err = e.trySyncingWithEngine(ctx, fcuReq) + if err != nil { + return NewTemporaryError(err) } } @@ -461,19 +480,30 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et } //update unsafe,safe,finalize and send fcu for sync - if status.Status == eth.ExecutionInconsistent { - log.Info("engine meet inconsistent here") + if needSyncWithEngine { + log.Info("engine meet inconsistent, sync status") currentUnsafe, _ := e.engine.L2BlockRefByLabel(ctx, eth.Unsafe) //reset unsafe e.SetUnsafeHead(currentUnsafe) - //force reset safe,finalize - e.SetSafeHead(currentUnsafe) - e.SetFinalizedHead(currentUnsafe) - fc.HeadBlockHash = currentUnsafe.Hash - fc.SafeBlockHash = currentUnsafe.Hash - fc.FinalizedBlockHash = currentUnsafe.Hash + + //force reset safe,finalize if needed + if needResetFinalizedHead { + e.SetFinalizedHead(currentUnsafe) + fc.FinalizedBlockHash = currentUnsafe.Hash + needResetFinalizedHead = false + } + if needResetSafeHead { + e.SetSafeHead(currentUnsafe) + fc.SafeBlockHash = currentUnsafe.Hash + needResetSafeHead = false + } + + needSyncWithEngine = false } + // Ensure that the variables are used even if needSyncWithEngine is false + _ = needResetSafeHead + _ = needResetFinalizedHead if e.syncStatus == syncStatusFinishedELButNotFinalized { fc.SafeBlockHash = envelope.ExecutionPayload.BlockHash @@ -504,6 +534,7 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et } e.needFCUCall = false + // unsafe will update to the latest broadcast block anyway, this will trigger an el sync in geth when meet an inconsistent state and accelerate recover progress. if e.checkUpdateUnsafeHead(fcRes.PayloadStatus.Status) { e.SetUnsafeHead(ref) } @@ -631,3 +662,50 @@ func (e *EngineController) getCurrentL2Info(ctx context.Context) (*sync.FindHead Finalized: finalized, }, nil } + +// resetSafeAndFinalizedHead will reset current safe/finalized head to keep consistent with unsafe head from engine, reset safe/finalized head if current unsafe is behind them +func (e *EngineController) resetSafeAndFinalizedHead(currentL2Info *sync.FindHeadsResult) (bool, bool) { + var needResetSafeHead, needResetFinalizedHead bool + + log.Info("engine has inconsistent state", "unsafe", currentL2Info.Unsafe.Number, "safe", currentL2Info.Safe.Number, "final", currentL2Info.Finalized.Number) + e.SetUnsafeHead(currentL2Info.Unsafe) + + if currentL2Info.Safe.Number > currentL2Info.Unsafe.Number { + log.Info("current safe is higher than unsafe block, reset it", "set safe after", currentL2Info.Unsafe.Number, "set safe before", e.safeHead.Number) + e.SetSafeHead(currentL2Info.Unsafe) + needResetSafeHead = true + } + + if currentL2Info.Finalized.Number > currentL2Info.Unsafe.Number { + log.Info("current finalized is higher than unsafe block, reset it", "set Finalized after", currentL2Info.Unsafe.Number, "set Finalized before", e.safeHead.Number) + e.SetFinalizedHead(currentL2Info.Unsafe) + needResetFinalizedHead = true + } + + return needResetSafeHead, needResetFinalizedHead +} + +// trySyncingWithEngine will request engine to deleting data beyond diskroot to keep synced with current node status +func (e *EngineController) trySyncingWithEngine(ctx context.Context, fcuReq eth.ForkchoiceState) (bool, error) { + for attempts := 0; attempts < maxFCURetryAttempts; attempts++ { + fcuRes, err := e.engine.ForkchoiceUpdate(ctx, &fcuReq, nil) + if err != nil { + if strings.Contains(err.Error(), "context deadline exceeded") { + log.Warn("Failed to share forkchoice-updated signal", "attempt:", attempts+1, "err", err) + time.Sleep(fcuRetryDelay) + continue + } + return false, fmt.Errorf("engine failed to process due to error: %w", err) + } + + if fcuRes.PayloadStatus.Status == eth.ExecutionValid { + log.Info("engine processed data successfully") + e.needFCUCall = false + return true, nil + } else { + return false, fmt.Errorf("engine failed to process inconsistent data") + } + } + + return false, fmt.Errorf("max retry attempts reached for trySyncingWithEngine") +} diff --git a/op-service/sources/engine_client.go b/op-service/sources/engine_client.go index 183356f03..1bac00d8f 100644 --- a/op-service/sources/engine_client.go +++ b/op-service/sources/engine_client.go @@ -3,6 +3,7 @@ package sources import ( "context" "fmt" + "strings" "time" "github.com/ethereum/go-ethereum/common" @@ -12,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/sources/caching" @@ -136,6 +138,9 @@ func (s *EngineAPIClient) NewPayload(ctx context.Context, payload *eth.Execution e.Trace("Received payload execution result", "status", result.Status, "latestValidHash", result.LatestValidHash, "message", result.ValidationError) if err != nil { + if strings.Contains(err.Error(), derive.ErrELSyncTriggerUnexpected.Error()) { + return &result, err + } e.Error("Payload execution failed", "err", err) return nil, fmt.Errorf("failed to execute payload: %w", err) }