diff --git a/challenge-manager/chain-watcher/watcher.go b/challenge-manager/chain-watcher/watcher.go index 28ce85e28..dc17fa38c 100644 --- a/challenge-manager/chain-watcher/watcher.go +++ b/challenge-manager/chain-watcher/watcher.go @@ -87,6 +87,7 @@ type Watcher struct { // Only track challenges for these parent assertion hashes. // Track all if empty / nil. trackChallengeParentAssertionHashes []protocol.AssertionHash + maxLookbackBlocks uint64 } // New initializes a watcher service for frequently scanning the chain @@ -99,6 +100,7 @@ func New( assertionConfirmingInterval time.Duration, averageTimeForBlockCreation time.Duration, trackChallengeParentAssertionHashes []protocol.AssertionHash, + maxLookbackBlocks uint64, ) (*Watcher, error) { return &Watcher{ chain: chain, @@ -114,6 +116,7 @@ func New( averageTimeForBlockCreation: averageTimeForBlockCreation, evilEdgesByLevel: threadsafe.NewMap(threadsafe.MapWithMetric[protocol.ChallengeLevel, *threadsafe.Set[protocol.EdgeId]]("evilEdgesByLevel")), trackChallengeParentAssertionHashes: trackChallengeParentAssertionHashes, + maxLookbackBlocks: maxLookbackBlocks, }, nil } @@ -580,19 +583,8 @@ func (w *Watcher) AddEdge(ctx context.Context, edge protocol.SpecEdge) (bool, er if err != nil { return false, err } - challengeComplete, err := w.chain.IsChallengeComplete(ctx, challengeParentAssertionHash) - if err != nil { - return false, errors.Wrapf( - err, - "could not check if edge with parent assertion hash %#x is part of a completed challenge", - challengeParentAssertionHash.Hash, - ) - } start, startRoot := edge.StartCommitment() end, endRoot := edge.EndCommitment() - if challengeComplete { - return false, nil - } chal, ok := w.challenges.TryGet(challengeParentAssertionHash) if !ok { tree := challengetree.New( @@ -943,12 +935,16 @@ type filterRange struct { // Gets the start and end block numbers for our filter queries, starting from // the latest confirmed assertion's block number up to the latest block number. func (w *Watcher) getStartEndBlockNum(ctx context.Context) (filterRange, error) { - latestConfirmed, err := w.chain.LatestConfirmed(ctx, w.chain.GetCallOptsWithDesiredRpcHeadBlockNumber(&bind.CallOpts{Context: ctx})) + latestBlock, err := w.chain.Backend().HeaderU64(ctx) if err != nil { return filterRange{}, err } - firstBlock := latestConfirmed.CreatedAtBlock() - startBlock := firstBlock + startBlock := latestBlock + if w.maxLookbackBlocks < startBlock { + startBlock = startBlock - w.maxLookbackBlocks + } else { + startBlock = 0 + } headerNumber, err := w.backend.HeaderU64(ctx) if err != nil { return filterRange{}, err diff --git a/challenge-manager/manager_test.go b/challenge-manager/manager_test.go index 94917c4d9..9d9f6a6ca 100644 --- a/challenge-manager/manager_test.go +++ b/challenge-manager/manager_test.go @@ -187,6 +187,7 @@ func setupEdgeTrackersForBisection( confInterval, avgBlockTime, nil, + 100, ) require.NoError(t, err) honestWatcher.SetEdgeManager(honestValidator) @@ -218,6 +219,7 @@ func setupEdgeTrackersForBisection( confInterval, avgBlockTime, nil, + 100, ) require.NoError(t, err) evilWatcher.SetEdgeManager(evilValidator) diff --git a/challenge-manager/stack.go b/challenge-manager/stack.go index f77c3337e..c8005f4b5 100644 --- a/challenge-manager/stack.go +++ b/challenge-manager/stack.go @@ -7,6 +7,8 @@ package challengemanager import ( "time" + "github.com/ccoveille/go-safecast" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/bold/api/backend" @@ -32,6 +34,7 @@ type stackParams struct { headerProvider HeaderProvider enableFastConfirmation bool assertionManagerOverride *assertions.Manager + maxLookbackBlocks int64 } var defaultStackParams = stackParams{ @@ -47,6 +50,7 @@ var defaultStackParams = stackParams{ headerProvider: nil, enableFastConfirmation: false, assertionManagerOverride: nil, + maxLookbackBlocks: blocksPerInterval(time.Second*12, 21*24*time.Hour), // Default to 3 weeks worth of blocks. } // StackOpt is a functional option to configure the stack. @@ -130,6 +134,14 @@ func StackWithFastConfirmationEnabled() StackOpt { } } +// StackWithSyncMaxLookbackBlocks specifies the number of blocks behind the latest block +// to start syncing the chain watcher from. +func StackWithSyncMaxLookbackBlocks(maxLookback int64) StackOpt { + return func(p *stackParams) { + p.maxLookbackBlocks = maxLookback + } +} + // OverrideAssertionManger can be used in tests to override the assertion // manager. func OverrideAssertionManager(asm *assertions.Manager) StackOpt { @@ -160,6 +172,10 @@ func NewChallengeStack( } provider.UpdateAPIDatabase(apiDB) } + maxLookbackBlocks, err := safecast.ToUint64(params.maxLookbackBlocks) + if err != nil { + return nil, err + } // Create the chain watcher. watcher, err := watcher.New( @@ -170,6 +186,7 @@ func NewChallengeStack( params.confInterval, params.avgBlockTime, params.trackChallengeParentAssertionHashes, + maxLookbackBlocks, ) if err != nil { return nil, err @@ -228,3 +245,8 @@ func NewChallengeStack( } return New(chain, provider, watcher, asm, cmOpts...) } + +func blocksPerInterval(avgBlockTime time.Duration, interval time.Duration) int64 { + // Calculate the number of blocks as an integer division + return int64(interval / avgBlockTime) +} diff --git a/testing/endtoend/BUILD.bazel b/testing/endtoend/BUILD.bazel index b16a8a3c6..1207f66f1 100644 --- a/testing/endtoend/BUILD.bazel +++ b/testing/endtoend/BUILD.bazel @@ -13,6 +13,7 @@ go_test( name = "endtoend_test", size = "large", srcs = [ + "e2e_crash_test.go", "e2e_test.go", "helpers_test.go", ], @@ -26,7 +27,9 @@ go_test( "//chain-abstraction/sol-implementation", "//challenge-manager", "//challenge-manager/types", + "//runtime", "//solgen/go/bridgegen", + "//solgen/go/challengeV2gen", "//solgen/go/mocksgen", "//solgen/go/rollupgen", "//testing", diff --git a/testing/endtoend/e2e_crash_test.go b/testing/endtoend/e2e_crash_test.go new file mode 100644 index 000000000..68d411d5b --- /dev/null +++ b/testing/endtoend/e2e_crash_test.go @@ -0,0 +1,312 @@ +// Copyright 2023-2024, Offchain Labs, Inc. +// For license information, see: +// https://github.com/offchainlabs/bold/blob/main/LICENSE.md + +package endtoend + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + gethtypes "github.com/ethereum/go-ethereum/core/types" + + protocol "github.com/offchainlabs/bold/chain-abstraction" + cm "github.com/offchainlabs/bold/challenge-manager" + "github.com/offchainlabs/bold/challenge-manager/types" + retry "github.com/offchainlabs/bold/runtime" + "github.com/offchainlabs/bold/solgen/go/challengeV2gen" + "github.com/offchainlabs/bold/solgen/go/rollupgen" + challenge_testing "github.com/offchainlabs/bold/testing" + "github.com/offchainlabs/bold/testing/endtoend/backend" + statemanager "github.com/offchainlabs/bold/testing/mocks/state-provider" + "github.com/offchainlabs/bold/testing/setup" +) + +// This test ensures a challenge can complete even if the honest validator crashes mid-challenge. +// We cancel the honest validator's context after it opens the first subchallenge and prove that it +// can restart and carry things out to confirm the honest, claimed assertion in the challenge. +func TestEndToEnd_HonestValidatorCrashes(t *testing.T) { + t.Parallel() + neutralCtx, neutralCancel := context.WithCancel(context.Background()) + defer neutralCancel() + evilCtx, evilCancel := context.WithCancel(context.Background()) + defer evilCancel() + honestCtx, honestCancel := context.WithCancel(context.Background()) + defer honestCancel() + + protocolCfg := defaultProtocolParams() + timeCfg := defaultTimeParams() + timeCfg.blockTime = time.Second + inboxCfg := defaultInboxParams() + + challengeTestingOpts := []challenge_testing.Opt{ + challenge_testing.WithConfirmPeriodBlocks(protocolCfg.challengePeriodBlocks), + challenge_testing.WithLayerZeroHeights(&protocolCfg.layerZeroHeights), + challenge_testing.WithNumBigStepLevels(protocolCfg.numBigStepLevels), + } + deployOpts := []setup.Opt{ + setup.WithMockBridge(), + setup.WithMockOneStepProver(), + setup.WithNumAccounts(5), + setup.WithChallengeTestingOpts(challengeTestingOpts...), + } + + simBackend, err := backend.NewSimulated(timeCfg.blockTime, deployOpts...) + require.NoError(t, err) + bk := simBackend + + rollupAddr, err := bk.DeployRollup(neutralCtx, challengeTestingOpts...) + require.NoError(t, err) + + require.NoError(t, bk.Start(neutralCtx)) + + accounts := bk.Accounts() + bk.Commit() + + rollupUserBindings, err := rollupgen.NewRollupUserLogic(rollupAddr.Rollup, bk.Client()) + require.NoError(t, err) + bridgeAddr, err := rollupUserBindings.Bridge(&bind.CallOpts{}) + require.NoError(t, err) + dataHash := common.Hash{1} + enqueueSequencerMessageAsExecutor( + t, accounts[0], rollupAddr.UpgradeExecutor, bk.Client(), bridgeAddr, seqMessage{ + dataHash: dataHash, + afterDelayedMessagesRead: big.NewInt(1), + prevMessageCount: big.NewInt(1), + newMessageCount: big.NewInt(2), + }, + ) + + baseStateManagerOpts := []statemanager.Opt{ + statemanager.WithNumBatchesRead(inboxCfg.numBatchesPosted), + statemanager.WithLayerZeroHeights(&protocolCfg.layerZeroHeights, protocolCfg.numBigStepLevels), + } + honestStateManager, err := statemanager.NewForSimpleMachine(t, baseStateManagerOpts...) + require.NoError(t, err) + + shp := &simpleHeaderProvider{b: bk, chs: make([]chan<- *gethtypes.Header, 0)} + shp.Start(neutralCtx) + + baseStackOpts := []cm.StackOpt{ + cm.StackWithMode(types.MakeMode), + cm.StackWithPollingInterval(timeCfg.assertionScanningInterval), + cm.StackWithPostingInterval(timeCfg.assertionPostingInterval), + cm.StackWithAverageBlockCreationTime(timeCfg.blockTime), + cm.StackWithConfirmationInterval(timeCfg.assertionConfirmationAttemptInterval), + cm.StackWithHeaderProvider(shp), + } + + name := "honest" + txOpts := accounts[1] + //nolint:gocritic + honestOpts := append( + baseStackOpts, + cm.StackWithName(name), + ) + honestChain := setupAssertionChain(t, honestCtx, bk.Client(), rollupAddr.Rollup, txOpts) + honestManager, err := cm.NewChallengeStack(honestChain, honestStateManager, honestOpts...) + require.NoError(t, err) + + totalOpcodes := totalWasmOpcodes(&protocolCfg.layerZeroHeights, protocolCfg.numBigStepLevels) + t.Logf("Total wasm opcodes in test: %d", totalOpcodes) + + assertionDivergenceHeight := uint64(1) + assertionBlockHeightDifference := int64(1) + + machineDivergenceStep := uint64(1) + //nolint:gocritic + evilStateManagerOpts := append( + baseStateManagerOpts, + statemanager.WithMachineDivergenceStep(machineDivergenceStep), + statemanager.WithBlockDivergenceHeight(assertionDivergenceHeight), + statemanager.WithDivergentBlockHeightOffset(assertionBlockHeightDifference), + ) + evilStateManager, err := statemanager.NewForSimpleMachine(t, evilStateManagerOpts...) + require.NoError(t, err) + + // Honest validator has index 1 in the accounts slice, as 0 is admin, so + // evil ones should start at 2. + evilTxOpts := accounts[2] + //nolint:gocritic + evilOpts := append( + baseStackOpts, + cm.StackWithName("evil"), + ) + evilChain := setupAssertionChain(t, evilCtx, bk.Client(), rollupAddr.Rollup, evilTxOpts) + evilManager, err := cm.NewChallengeStack(evilChain, evilStateManager, evilOpts...) + require.NoError(t, err) + + chalManagerAddr := honestChain.SpecChallengeManager().Address() + cmBindings, err := challengeV2gen.NewEdgeChallengeManager(chalManagerAddr, bk.Client()) + require.NoError(t, err) + + honestManager.Start(honestCtx) + evilManager.Start(evilCtx) + + t.Run("crashes mid-challenge and recovers to complete it", func(t *testing.T) { + // We will listen for the first subchallenge edge created by the honest validator to appear, and then + // we will cancel the honest validator context. We will then wait for a bit, then restart the honest + // validator and we should expect the honest assertion is still confirmed by time. + // No more edges will be added here, so we then scrape all the edges added to the challenge. + // We await until all the essential root edges are also confirmed by time. + chainId, err2 := bk.Client().ChainID(neutralCtx) + require.NoError(t, err2) + var foundSubchalEdge bool + for neutralCtx.Err() == nil && !foundSubchalEdge { + it, err3 := cmBindings.FilterEdgeAdded(nil, nil, nil, nil) + require.NoError(t, err3) + for it.Next() { + txHash := it.Event.Raw.TxHash + tx, _, err3 := bk.Client().TransactionByHash(neutralCtx, txHash) + require.NoError(t, err3) + sender, err3 := gethtypes.Sender(gethtypes.NewCancunSigner(chainId), tx) + require.NoError(t, err3) + if sender != txOpts.From { + continue + } + if it.Event.Level > 0 { + foundSubchalEdge = true + t.Log("Honest validator made a subchallenge") + break // The honest validator made a subchallenge. + } + } + time.Sleep(500 * time.Millisecond) // Don't spam the backend. + } + // Cancel the honest context. + honestCancel() + t.Log("Honest context has been canceled") + + // We then restart the honest validator after a few seconds of wait time. + time.Sleep(time.Second * 3) + + honestCtx, honestCancel = context.WithCancel(context.Background()) + honestChain := setupAssertionChain(t, honestCtx, bk.Client(), rollupAddr.Rollup, txOpts) + honestManager, err := cm.NewChallengeStack(honestChain, honestStateManager, honestOpts...) + require.NoError(t, err) + + honestManager.Start(honestCtx) + + rc, err2 := rollupgen.NewRollupCore(rollupAddr.Rollup, bk.Client()) + require.NoError(t, err2) + + // Wait until a challenged assertion is confirmed by time. + var confirmed bool + for neutralCtx.Err() == nil && !confirmed { + var i *rollupgen.RollupCoreAssertionConfirmedIterator + i, err = retry.UntilSucceeds(neutralCtx, func() (*rollupgen.RollupCoreAssertionConfirmedIterator, error) { + return rc.FilterAssertionConfirmed(nil, nil) + }) + require.NoError(t, err) + for i.Next() { + creationInfo, err2 := evilChain.ReadAssertionCreationInfo(evilCtx, protocol.AssertionHash{Hash: i.Event.AssertionHash}) + require.NoError(t, err2) + + var parent rollupgen.AssertionNode + parent, err = retry.UntilSucceeds(neutralCtx, func() (rollupgen.AssertionNode, error) { + return rc.GetAssertion(&bind.CallOpts{Context: neutralCtx}, creationInfo.ParentAssertionHash.Hash) + }) + require.NoError(t, err) + + tx, _, err2 := bk.Client().TransactionByHash(neutralCtx, creationInfo.TransactionHash) + require.NoError(t, err2) + sender, err2 := gethtypes.Sender(gethtypes.NewCancunSigner(chainId), tx) + require.NoError(t, err2) + honestConfirmed := sender == txOpts.From + + isChallengeChild := parent.FirstChildBlock > 0 && parent.SecondChildBlock > 0 + if !isChallengeChild { + // Assertion must be be a challenge child. + continue + } + // We expect the honest party to have confirmed it. + if !honestConfirmed { + t.Fatal("Evil party confirmed the assertion by challenge win") + } + confirmed = true + break + } + time.Sleep(500 * time.Millisecond) // Don't spam the backend. + } + // Once the honest, claimed assertion in the challenge is confirmed by time, we + // then continue the test. + t.Log("Assertion was confirmed by time") + honestCancel() + }) + // This test ensures that an honest validator can crash after a challenge has completed, can resync + // the completed challenge and continue playing the game until all essential edges are confirmed. + // This is to ensure that even if a challenge is completed, we can still resync it and continue + // playing for the sake of refunding honest stakes. + t.Run( + "crashes once challenged assertion is confirmed and restarts to confirm essential edges", + func(t *testing.T) { + // We restart the honest validator after a few seconds of wait time. + time.Sleep(time.Second * 5) + + ctx := context.Background() + honestChain := setupAssertionChain(t, ctx, bk.Client(), rollupAddr.Rollup, txOpts) + honestManager, err := cm.NewChallengeStack(honestChain, honestStateManager, honestOpts...) + require.NoError(t, err) + + honestManager.Start(ctx) + + t.Log("Restarted honest validator to continue playing game after challenge has finished") + + // We then expect that all essential root edges created by the honest validator are confirmed by time. + // Scrape all the honest edges onchain (the ones made by the honest address). + // Check if the edges that have claim id != None are confirmed (those are essential root edges) + // and also check one step edges from honest party are confirmed. + honestEssentialRootIds := make(map[common.Hash]bool, 0) + chainId, err := bk.Client().ChainID(neutralCtx) + require.NoError(t, err) + it, err := cmBindings.FilterEdgeAdded(nil, nil, nil, nil) + require.NoError(t, err) + for it.Next() { + txHash := it.Event.Raw.TxHash + tx, _, err2 := bk.Client().TransactionByHash(neutralCtx, txHash) + require.NoError(t, err2) + sender, err2 := gethtypes.Sender(gethtypes.NewCancunSigner(chainId), tx) + require.NoError(t, err2) + if sender != txOpts.From { + continue + } + // Skip edges that are not essential roots. + if it.Event.ClaimId == (common.Hash{}) { + continue + } + honestEssentialRootIds[it.Event.EdgeId] = false + } + // Wait until all of the honest essential root ids are confirmed. + startBlk, err := bk.Client().HeaderU64(neutralCtx) + require.NoError(t, err) + chalPeriodBlocks, err := cmBindings.ChallengePeriodBlocks(&bind.CallOpts{}) + require.NoError(t, err) + totalPeriod := chalPeriodBlocks * uint64(len(honestEssentialRootIds)) + confirmedCount := 0 + _ = totalPeriod + _ = startBlk + for confirmedCount < len(honestEssentialRootIds) { + latestBlk, err2 := bk.Client().HeaderU64(neutralCtx) + require.NoError(t, err2) + numBlocksElapsed := latestBlk - startBlk + if numBlocksElapsed > totalPeriod { + t.Fatalf("%d blocks have passed without essential edges being confirmed", numBlocksElapsed) + } + for k, markedConfirmed := range honestEssentialRootIds { + edge, err2 := cmBindings.GetEdge(&bind.CallOpts{}, k) + require.NoError(t, err2) + if edge.Status == 1 && !markedConfirmed { + confirmedCount += 1 + honestEssentialRootIds[k] = true + t.Logf("Confirmed %d honest essential edges, got edge at level %d", confirmedCount, edge.Level) + } + } + time.Sleep(500 * time.Millisecond) // Don't spam the backend. + } + }) +} diff --git a/testing/endtoend/e2e_test.go b/testing/endtoend/e2e_test.go index d97ad9d53..43da9d246 100644 --- a/testing/endtoend/e2e_test.go +++ b/testing/endtoend/e2e_test.go @@ -280,7 +280,6 @@ func runEndToEndTest(t *testing.T, cfg *e2eConfig) { honestManager, err := cm.NewChallengeStack(honestChain, honestStateManager, honestOpts...) require.NoError(t, err) - // Diverge exactly at the last opcode within the block. totalOpcodes := totalWasmOpcodes(&cfg.protocol.layerZeroHeights, cfg.protocol.numBigStepLevels) t.Logf("Total wasm opcodes in test: %d", totalOpcodes) @@ -289,6 +288,7 @@ func runEndToEndTest(t *testing.T, cfg *e2eConfig) { evilChallengeManagers := make([]*cm.Manager, cfg.actors.numEvilValidators) for i := uint64(0); i < cfg.actors.numEvilValidators; i++ { + // Diverge at a random opcode within the block. machineDivergenceStep := randUint64(i) if machineDivergenceStep == 0 { machineDivergenceStep = 1 diff --git a/testing/endtoend/headers.go b/testing/endtoend/headers.go index b21c401c2..e349f17dc 100644 --- a/testing/endtoend/headers.go +++ b/testing/endtoend/headers.go @@ -48,6 +48,16 @@ func (s *simpleHeaderProvider) Subscribe(requireBlockNrUpdates bool) (<-chan *ty ch := make(chan *types.Header, 100) s.chs = append(s.chs, ch) return ch, func() { + s.removeChannel(ch) close(ch) } } + +func (s *simpleHeaderProvider) removeChannel(ch chan<- *types.Header) { + for i, sch := range s.chs { + if sch == ch { + s.chs = append(s.chs[:i], s.chs[i+1:]...) + return + } + } +}