From c97fac33d78085b95fe073c0d055e716e230bc56 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Wed, 29 Mar 2023 14:20:24 +0000 Subject: [PATCH 01/20] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index f260155e26..b884ded522 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit f260155e26e54e8aa2add60d7c2628b39e2526b9 +Subproject commit b884ded52258c18eb4bcaf89b0d41fc3c91b30fe From 65aaa49062e2e1d4a62ad1cf4ccf594740d9006d Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Wed, 29 Mar 2023 17:45:54 +0000 Subject: [PATCH 02/20] add initial test for skipping saving states --- system_tests/common_test.go | 12 ++-- system_tests/forwarder_test.go | 8 +-- system_tests/ipc_test.go | 2 +- system_tests/recreatestate_rpc_test.go | 80 +++++++++++++++++++++----- 4 files changed, 80 insertions(+), 22 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 427807472c..4888dea664 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -194,7 +194,7 @@ func createTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, return createTestL1BlockChainWithConfig(t, l1info, nil) } -func getTestStackConfig(t *testing.T) *node.Config { +func getTestStackConfig(t *testing.T, dataDir string) *node.Config { stackConfig := node.DefaultConfig stackConfig.HTTPPort = 0 stackConfig.WSPort = 0 @@ -203,7 +203,11 @@ func getTestStackConfig(t *testing.T) *node.Config { stackConfig.P2P.NoDial = true stackConfig.P2P.NoDiscovery = true stackConfig.P2P.NAT = nil - stackConfig.DataDir = t.TempDir() + if dataDir != "" { + stackConfig.DataDir = dataDir + } else { + stackConfig.DataDir = t.TempDir() + } return &stackConfig } @@ -277,7 +281,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no l1info = NewL1TestInfo(t) } if stackConfig == nil { - stackConfig = getTestStackConfig(t) + stackConfig = getTestStackConfig(t, "") } l1info.GenerateAccount("Faucet") @@ -587,7 +591,7 @@ func Create2ndNodeWithConfig( l1client := ethclient.NewClient(l1rpcClient) if stackConfig == nil { - stackConfig = getTestStackConfig(t) + stackConfig = getTestStackConfig(t, "") } l2stack, err := node.New(stackConfig) Require(t, err) diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index bd02efd2c4..c23b664f0a 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -29,7 +29,7 @@ func TestStaticForwarder(t *testing.T) { ipcPath := filepath.Join(t.TempDir(), "test.ipc") ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath - stackConfig := getTestStackConfig(t) + stackConfig := getTestStackConfig(t, "") ipcConfig.Apply(stackConfig) nodeConfigA := arbnode.ConfigDefaultL1Test() nodeConfigA.BatchPoster.Enable = false @@ -84,7 +84,7 @@ func createFallbackSequencer( t *testing.T, ctx context.Context, ipcPath string, redisUrl string, ) (l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { - stackConfig := getTestStackConfig(t) + stackConfig := getTestStackConfig(t, "") ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath ipcConfig.Apply(stackConfig) @@ -105,7 +105,7 @@ func createForwardingNode( redisUrl string, fallbackPath string, ) (*ethclient.Client, *arbnode.Node) { - stackConfig := getTestStackConfig(t) + stackConfig := getTestStackConfig(t, "") if ipcPath != "" { ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath @@ -130,7 +130,7 @@ func createSequencer( ipcPath string, redisUrl string, ) (*ethclient.Client, *arbnode.Node) { - stackConfig := getTestStackConfig(t) + stackConfig := getTestStackConfig(t, "") ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath ipcConfig.Apply(stackConfig) diff --git a/system_tests/ipc_test.go b/system_tests/ipc_test.go index ad5a8fbc64..fc48d95fc2 100644 --- a/system_tests/ipc_test.go +++ b/system_tests/ipc_test.go @@ -18,7 +18,7 @@ func TestIpcRpc(t *testing.T) { ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath - stackConf := getTestStackConfig(t) + stackConf := getTestStackConfig(t, "") ipcConfig.Apply(stackConf) ctx, cancel := context.WithCancel(context.Background()) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 6e202ae17a..fecce54859 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/arbnode" @@ -22,7 +23,7 @@ import ( "github.com/offchainlabs/nitro/util/testhelpers" ) -func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateDepth int64, txCount uint64) (node *arbnode.Node, bc *core.BlockChain, db ethdb.Database, l2client *ethclient.Client, l2info info, cancel func()) { +func prepareNode(t *testing.T, ctx context.Context, maxRecreateStateDepth int64, dataDir string, skipBlocks uint32, skipGas uint64) (node *arbnode.Node, bc *core.BlockChain, db ethdb.Database, l2client *ethclient.Client, l2info info, cancel func(), stackDataDir string) { t.Helper() nodeConfig := arbnode.ConfigDefaultL1Test() nodeConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth @@ -30,8 +31,10 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateD nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 cacheConfig := &core.CacheConfig{ // Arbitrum Config Options - TriesInMemory: 128, - TrieRetention: 30 * time.Minute, + TriesInMemory: 128, + TrieRetention: 30 * time.Minute, + MaxNumberOfBlocksToSkipStateSaving: skipBlocks, + MaxAmountOfGasToSkipStateSaving: skipGas, // disable caching of states in BlockChain.stateCache TrieCleanLimit: 0, @@ -43,11 +46,21 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateD SnapshotLimit: 256, SnapshotWait: true, } - l2info, node, l2client, _, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, nil, cacheConfig) + stackConfig := getTestStackConfig(t, dataDir) + stackDataDir = stackConfig.DataDir + l2info, node, l2client, _, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, stackConfig, cacheConfig) cancel = func() { defer requireClose(t, l1stack) defer node.StopAndWait() } + bc = node.Backend.ArbInterface().BlockChain() + db = node.Backend.ChainDb() + return +} + +func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateDepth int64, txCount uint64, skipBlocks uint32, skipGas uint64) (node *arbnode.Node, bc *core.BlockChain, db ethdb.Database, l2client *ethclient.Client, l2info info, cancel func(), stackDataDir string) { + t.Helper() + node, bc, db, l2client, l2info, cancel, stackDataDir = prepareNode(t, ctx, maxRecreateStateDepth, "", skipBlocks, skipGas) l2info.GenerateAccount("User2") var txs []*types.Transaction for i := uint64(0); i < txCount; i++ { @@ -60,9 +73,6 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateD _, err := EnsureTxSucceeded(ctx, l2client, tx) testhelpers.RequireImpl(t, err) } - bc = node.Backend.ArbInterface().BlockChain() - db = node.Backend.ChainDb() - return } @@ -110,7 +120,7 @@ func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, fr func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32) + _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -134,7 +144,7 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) - _, bc, db, l2client, _, cancelNode := prepareNodeWithHistory(t, ctx, depthGasLimit, 32) + _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -158,7 +168,7 @@ func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(200) - _, bc, db, l2client, _, cancelNode := prepareNodeWithHistory(t, ctx, depthGasLimit, 32) + _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -181,7 +191,7 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { var headerCacheLimit uint64 = 512 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, headerCacheLimit+5) + _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, headerCacheLimit+5, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -214,7 +224,7 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32) + _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -238,7 +248,7 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { var blockCacheLimit uint64 = 256 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, blockCacheLimit+4) + _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, blockCacheLimit+4, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -265,3 +275,47 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { testhelpers.FailImpl(t, "Failed with unexpected error: \"", err, "\", at block:", lastBlock, "lastBlock:", lastBlock) } } + +func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { + _ = testhelpers.InitTestLog(t, log.LvlTrace) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, bc, _, l2client, _, cancelNode, stackDataDir := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 101, 5, 0) + genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum + lastBlock, err := l2client.BlockNumber(ctx) + testhelpers.RequireImpl(t, err) + if lastBlock < genesis+100 { + testhelpers.FailImpl(t, "Internal test error - not enough blocks produced during preparation, want:", genesis+100, "have:", lastBlock) + } + expectedBalance, err := l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + testhelpers.RequireImpl(t, err) + cancelNode() + _, bc, _, l2client, _, cancelNode, _ = prepareNode(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, stackDataDir, 0, 0) + defer cancelNode() + for i := genesis + 1; i <= genesis+100; i++ { + block := bc.GetBlockByNumber(i) + if block == nil { + Fail(t, "header not found for block number:", i) + } + _, err := bc.StateAt(block.Root()) + if (i-genesis-1)%5 == 0 { + Require(t, err, "state not found, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash(), "err:", err) + t.Log("have state for block:", i) + } else { + if err == nil { + Fail(t, "state shouldn't be available, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) + } + expectedErr := &trie.MissingNodeError{} + if !errors.As(err, &expectedErr) { + Fail(t, "getting state failed with unexpected error, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) + } + t.Log("no state for block:", i) + } + } + + balance, err := l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + testhelpers.RequireImpl(t, err) + if balance.Cmp(expectedBalance) != 0 { + testhelpers.FailImpl(t, "unexpected balance result for last block, want: ", expectedBalance, " have: ", balance) + } +} From 3f466283dfea2272ecd9610b06dad19af71aa7cb Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 30 Mar 2023 20:53:14 +0000 Subject: [PATCH 03/20] improve not saving states tests --- system_tests/recreatestate_rpc_test.go | 183 ++++++++++++++++++++----- 1 file changed, 145 insertions(+), 38 deletions(-) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index fecce54859..e0b98f6524 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -15,21 +15,22 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/testhelpers" ) -func prepareNode(t *testing.T, ctx context.Context, maxRecreateStateDepth int64, dataDir string, skipBlocks uint32, skipGas uint64) (node *arbnode.Node, bc *core.BlockChain, db ethdb.Database, l2client *ethclient.Client, l2info info, cancel func(), stackDataDir string) { +func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateDepth int64, txCount uint64, skipBlocks uint32, skipGas uint64) (node *arbnode.Node, bc *core.BlockChain, db ethdb.Database, l2client *ethclient.Client, l2info info, l1info info, l1stack *node.Node, nodeConfig *arbnode.Config, cacheConfig *core.CacheConfig, cancel func()) { t.Helper() - nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig = arbnode.ConfigDefaultL1Test() nodeConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - cacheConfig := &core.CacheConfig{ + cacheConfig = &core.CacheConfig{ // Arbitrum Config Options TriesInMemory: 128, TrieRetention: 30 * time.Minute, @@ -46,21 +47,11 @@ func prepareNode(t *testing.T, ctx context.Context, maxRecreateStateDepth int64, SnapshotLimit: 256, SnapshotWait: true, } - stackConfig := getTestStackConfig(t, dataDir) - stackDataDir = stackConfig.DataDir - l2info, node, l2client, _, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, stackConfig, cacheConfig) + l2info, node, l2client, _, l1info, _, _, l1stack = createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, nil, cacheConfig) cancel = func() { defer requireClose(t, l1stack) defer node.StopAndWait() } - bc = node.Backend.ArbInterface().BlockChain() - db = node.Backend.ChainDb() - return -} - -func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateDepth int64, txCount uint64, skipBlocks uint32, skipGas uint64) (node *arbnode.Node, bc *core.BlockChain, db ethdb.Database, l2client *ethclient.Client, l2info info, cancel func(), stackDataDir string) { - t.Helper() - node, bc, db, l2client, l2info, cancel, stackDataDir = prepareNode(t, ctx, maxRecreateStateDepth, "", skipBlocks, skipGas) l2info.GenerateAccount("User2") var txs []*types.Transaction for i := uint64(0); i < txCount; i++ { @@ -73,6 +64,9 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateD _, err := EnsureTxSucceeded(ctx, l2client, tx) testhelpers.RequireImpl(t, err) } + bc = node.Backend.ArbInterface().BlockChain() + db = node.Backend.ChainDb() + return } @@ -120,7 +114,7 @@ func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, fr func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) + _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -144,7 +138,7 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) - _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) + _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -168,7 +162,7 @@ func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(200) - _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) + _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -191,7 +185,7 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { var headerCacheLimit uint64 = 512 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, headerCacheLimit+5, 0, 0) + _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, headerCacheLimit+5, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -224,7 +218,7 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) + _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -248,7 +242,7 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { var blockCacheLimit uint64 = 256 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, cancelNode, _ := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, blockCacheLimit+4, 0, 0) + _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, blockCacheLimit+4, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -276,46 +270,159 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { } } -func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { - _ = testhelpers.InitTestLog(t, log.LvlTrace) +func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks uint32, skipGas uint64, txCount int) { + t.Helper() + maxRecreateStateDepth := int64(30 * 1000 * 1000) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, _, l2client, _, cancelNode, stackDataDir := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 101, 5, 0) + + ctx1, cancel1 := context.WithCancel(ctx) + nodeConfig := arbnode.ConfigDefaultL2Test() + nodeConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + cacheConfig := &core.CacheConfig{ + // Arbitrum Config Options + TriesInMemory: 128, + TrieRetention: 30 * time.Minute, + MaxNumberOfBlocksToSkipStateSaving: skipBlocks, + MaxAmountOfGasToSkipStateSaving: skipGas, + + // disable caching of states in BlockChain.stateCache + TrieCleanLimit: 0, + TrieDirtyLimit: 0, + + TrieDirtyDisabled: true, + + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 256, + SnapshotWait: true, + } + + feedErrChan := make(chan error, 10) + AddDefaultValNode(t, ctx1, nodeConfig, true) + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), cacheConfig) + + node, err := arbnode.CreateNode(ctx1, stack, chainDb, arbDb, nodeConfig, blockchain, nil, nil, nil, nil, feedErrChan) + Require(t, err) + err = node.TxStreamer.AddFakeInitMessage() + Require(t, err) + Require(t, node.Start(ctx1)) + client := ClientForStack(t, stack) + debugAuth := l2info.GetDefaultTransactOpts("Owner", ctx1) + // make auth a chain owner + arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), client) + Require(t, err, "failed to deploy ArbDebug") + tx, err := arbdebug.BecomeChainOwner(&debugAuth) + Require(t, err, "failed to deploy ArbDebug") + _, err = EnsureTxSucceeded(ctx1, client, tx) + Require(t, err) + + StartWatchChanErr(t, ctx, feedErrChan, node) + dataDir := node.Stack.DataDir() + + l2info.GenerateAccount("User2") + var txs []*types.Transaction + for i := 0; i < txCount; i++ { + tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) + txs = append(txs, tx) + err := client.SendTransaction(ctx, tx) + testhelpers.RequireImpl(t, err) + } + for _, tx := range txs { + _, err := EnsureTxSucceeded(ctx, client, tx) + testhelpers.RequireImpl(t, err) + } + bc := node.Backend.ArbInterface().BlockChain() genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum - lastBlock, err := l2client.BlockNumber(ctx) - testhelpers.RequireImpl(t, err) - if lastBlock < genesis+100 { - testhelpers.FailImpl(t, "Internal test error - not enough blocks produced during preparation, want:", genesis+100, "have:", lastBlock) + lastBlock, err := client.BlockNumber(ctx) + Require(t, err) + if lastBlock < genesis+uint64(txCount) { + Fail(t, "internal test error - not enough blocks produced during preparation, want:", genesis+100, "have:", lastBlock) } - expectedBalance, err := l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) - testhelpers.RequireImpl(t, err) - cancelNode() - _, bc, _, l2client, _, cancelNode, _ = prepareNode(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, stackDataDir, 0, 0) - defer cancelNode() - for i := genesis + 1; i <= genesis+100; i++ { + expectedBalance, err := client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + Require(t, err) + + node.StopAndWait() + cancel1() + t.Log("stopped first node") + + AddDefaultValNode(t, ctx, nodeConfig, true) + l2info, stack, chainDb, arbDb, blockchain = createL2BlockChain(t, l2info, dataDir, params.ArbitrumDevTestChainConfig(), cacheConfig) + node, err = arbnode.CreateNode(ctx, stack, chainDb, arbDb, nodeConfig, blockchain, nil, node.DeployInfo, nil, nil, feedErrChan) + Require(t, err) + Require(t, node.Start(ctx)) + client = ClientForStack(t, stack) + defer node.StopAndWait() + bc = node.Backend.ArbInterface().BlockChain() + gas := skipGas + blocks := skipBlocks + for i := genesis + 1; i <= genesis+uint64(txCount); i++ { block := bc.GetBlockByNumber(i) if block == nil { Fail(t, "header not found for block number:", i) + continue } + gas += block.GasUsed() + blocks++ _, err := bc.StateAt(block.Root()) - if (i-genesis-1)%5 == 0 { + if (skipBlocks == 0 && skipGas == 0) || (skipBlocks != 0 && blocks > skipBlocks) || (skipGas != 0 && gas > skipGas) { + if err != nil { + t.Log("blocks:", blocks, "skipBlocks:", skipBlocks, "gas:", gas, "skipGas:", skipGas) + } Require(t, err, "state not found, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash(), "err:", err) - t.Log("have state for block:", i) + gas = 0 + blocks = 0 } else { if err == nil { + t.Log("blocks:", blocks, "skipBlocks:", skipBlocks, "gas:", gas, "skipGas:", skipGas) Fail(t, "state shouldn't be available, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) } expectedErr := &trie.MissingNodeError{} if !errors.As(err, &expectedErr) { Fail(t, "getting state failed with unexpected error, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) } - t.Log("no state for block:", i) } } + for i := genesis + 1; i <= genesis+uint64(txCount); i += i % 10 { + _, err = client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(i)) + testhelpers.RequireImpl(t, err) + } - balance, err := l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + balance, err := client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) testhelpers.RequireImpl(t, err) if balance.Cmp(expectedBalance) != 0 { testhelpers.FailImpl(t, "unexpected balance result for last block, want: ", expectedBalance, " have: ", balance) } } + +func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { + // test defaults + testSkippingSavingStateAndRecreatingAfterRestart(t, 0, 0, 512) + testSkippingSavingStateAndRecreatingAfterRestart(t, 127, 0, 512) + testSkippingSavingStateAndRecreatingAfterRestart(t, 0, 15*1000*1000, 512) + testSkippingSavingStateAndRecreatingAfterRestart(t, 127, 15*1000*1000, 512) + + // one test block ~ 925000 gas + testBlockGas := uint64(925000) + skipGasValues := []uint64{testBlockGas, 2 * testBlockGas, 3 * testBlockGas, 5 * testBlockGas, 21 * testBlockGas} + skipBlockValues := []uint32{1, 2, 3, 5, 21} + for _, skipGas := range skipGasValues { + for _, skipBlocks := range skipBlockValues[:len(skipBlockValues)-2] { + testSkippingSavingStateAndRecreatingAfterRestart(t, skipBlocks, skipGas, 21) + } + } + skipBlockValues = []uint32{1, 2, 3, 7, 19, 20, 21, 22} + for _, skipBlocks := range skipBlockValues[:len(skipBlockValues)-2] { + testSkippingSavingStateAndRecreatingAfterRestart(t, skipBlocks, 0, 21) + testSkippingSavingStateAndRecreatingAfterRestart(t, skipBlocks, testBlockGas*100, 21) + } + skipGasValues = []uint64{1, + testBlockGas - 2, testBlockGas - 1, testBlockGas, testBlockGas + 1, testBlockGas + 2, + 2*testBlockGas - 2, 2*testBlockGas - 1, 2 * testBlockGas, 2*testBlockGas + 1, + 7 * testBlockGas, 21 * testBlockGas} + for _, skipGas := range skipGasValues { + testSkippingSavingStateAndRecreatingAfterRestart(t, 0, skipGas, 21) + testSkippingSavingStateAndRecreatingAfterRestart(t, 100, skipGas, 21) + } +} From 4cbf2eb7d820c3a719a5d35bd0d5946f505ae5c7 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 11 Apr 2023 13:28:14 +0000 Subject: [PATCH 04/20] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index b884ded522..c90d53074f 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit b884ded52258c18eb4bcaf89b0d41fc3c91b30fe +Subproject commit c90d53074f5e57fd39a70994c4143bf3b2496946 From 9d7f0887240a507682017491a63b44b7a8be6b71 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 11 Apr 2023 13:40:54 +0000 Subject: [PATCH 05/20] fix test failure message --- system_tests/recreatestate_rpc_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index e0b98f6524..8bc0a083ea 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -337,8 +337,8 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum lastBlock, err := client.BlockNumber(ctx) Require(t, err) - if lastBlock < genesis+uint64(txCount) { - Fail(t, "internal test error - not enough blocks produced during preparation, want:", genesis+100, "have:", lastBlock) + if want := genesis + uint64(txCount); lastBlock < want { + Fail(t, "internal test error - not enough blocks produced during preparation, want:", want, "have:", lastBlock) } expectedBalance, err := client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) Require(t, err) From 22c9df320acc6c3ec099b98de06b16f659bf6da3 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 13 Jun 2023 16:11:11 +0000 Subject: [PATCH 06/20] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index c90d53074f..c98e2c342d 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit c90d53074f5e57fd39a70994c4143bf3b2496946 +Subproject commit c98e2c342dbbcf7479b1aff987ed251ad8b4e969 From 1bf55862536135186887a0a8b478648dc8ffd764 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 13 Jun 2023 16:26:52 +0000 Subject: [PATCH 07/20] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index c98e2c342d..a47849f671 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit c98e2c342dbbcf7479b1aff987ed251ad8b4e969 +Subproject commit a47849f671f2f247bf5b3c02ab7b3bc2785b42fc From cb65f7ef7a9732e4ed929ab6b8640c80b288fc42 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 13 Jun 2023 17:26:17 +0000 Subject: [PATCH 08/20] fix recreate state test build --- system_tests/recreatestate_rpc_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 3494179be3..0415533517 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -47,7 +47,7 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateD SnapshotLimit: 256, SnapshotWait: true, } - l2info, node, l2client, _, l1info, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, nil, cacheConfig, nil) + l2info, node, l2client, _, l1info, _, _, l1stack = createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, nil, cacheConfig, nil) cancel = func() { defer requireClose(t, l1stack) defer node.StopAndWait() @@ -303,7 +303,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u AddDefaultValNode(t, ctx1, nodeConfig, true) l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), cacheConfig) - node, err := arbnode.CreateNode(ctx1, stack, chainDb, arbDb, nodeConfig, blockchain, nil, nil, nil, nil, feedErrChan) + node, err := arbnode.CreateNode(ctx1, stack, chainDb, arbDb, NewFetcherFromConfig(nodeConfig), blockchain, nil, nil, nil, nil, nil, feedErrChan) Require(t, err) err = node.TxStreamer.AddFakeInitMessage() Require(t, err) @@ -333,7 +333,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u _, err := EnsureTxSucceeded(ctx, client, tx) testhelpers.RequireImpl(t, err) } - bc := node.Backend.ArbInterface().BlockChain() + bc := node.Execution.Backend.ArbInterface().BlockChain() genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum lastBlock, err := client.BlockNumber(ctx) Require(t, err) @@ -349,12 +349,12 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u AddDefaultValNode(t, ctx, nodeConfig, true) l2info, stack, chainDb, arbDb, blockchain = createL2BlockChain(t, l2info, dataDir, params.ArbitrumDevTestChainConfig(), cacheConfig) - node, err = arbnode.CreateNode(ctx, stack, chainDb, arbDb, nodeConfig, blockchain, nil, node.DeployInfo, nil, nil, feedErrChan) + node, err = arbnode.CreateNode(ctx, stack, chainDb, arbDb, NewFetcherFromConfig(nodeConfig), blockchain, nil, node.DeployInfo, nil, nil, nil, feedErrChan) Require(t, err) Require(t, node.Start(ctx)) client = ClientForStack(t, stack) defer node.StopAndWait() - bc = node.Backend.ArbInterface().BlockChain() + bc = node.Execution.Backend.ArbInterface().BlockChain() gas := skipGas blocks := skipBlocks for i := genesis + 1; i <= genesis+uint64(txCount); i++ { From e165f3ded1d943222fc332936157a3bfb259540f Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 15 Jun 2023 01:38:13 +0000 Subject: [PATCH 09/20] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index a47849f671..eb96086a95 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit a47849f671f2f247bf5b3c02ab7b3bc2785b42fc +Subproject commit eb96086a95859470cfedd3539a9c8bb3132f015f From e8767b733ac9af585f24fcc2ce6a804c5fc4c85a Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 15 Jun 2023 01:55:27 +0000 Subject: [PATCH 10/20] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index eb96086a95..c86303cf0b 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit eb96086a95859470cfedd3539a9c8bb3132f015f +Subproject commit c86303cf0b7c10f55a19b767a326a8bd06ce8108 From cba9ceacc25fbe826097ccf1ce07993d7a24b753 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 16 Jun 2023 14:36:00 +0000 Subject: [PATCH 11/20] add MaxNumberOfBlocksToSkipStateSaving and MaxAmountOfGasToSkipStateSaving to CachingConfig --- arbnode/execution/blockchain.go | 68 ++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 30 deletions(-) diff --git a/arbnode/execution/blockchain.go b/arbnode/execution/blockchain.go index 2ed0221b04..c05fafa2b2 100644 --- a/arbnode/execution/blockchain.go +++ b/arbnode/execution/blockchain.go @@ -25,15 +25,17 @@ import ( ) type CachingConfig struct { - Archive bool `koanf:"archive"` - BlockCount uint64 `koanf:"block-count"` - BlockAge time.Duration `koanf:"block-age"` - TrieTimeLimit time.Duration `koanf:"trie-time-limit"` - TrieDirtyCache int `koanf:"trie-dirty-cache"` - TrieCleanCache int `koanf:"trie-clean-cache"` - SnapshotCache int `koanf:"snapshot-cache"` - DatabaseCache int `koanf:"database-cache"` - SnapshotRestoreMaxGas uint64 `koanf:"snapshot-restore-gas-limit"` + Archive bool `koanf:"archive"` + BlockCount uint64 `koanf:"block-count"` + BlockAge time.Duration `koanf:"block-age"` + TrieTimeLimit time.Duration `koanf:"trie-time-limit"` + TrieDirtyCache int `koanf:"trie-dirty-cache"` + TrieCleanCache int `koanf:"trie-clean-cache"` + SnapshotCache int `koanf:"snapshot-cache"` + DatabaseCache int `koanf:"database-cache"` + SnapshotRestoreMaxGas uint64 `koanf:"snapshot-restore-gas-limit"` + MaxNumberOfBlocksToSkipStateSaving uint32 `koanf:"max-number-of-blocks-to-skip-state-saving"` + MaxAmountOfGasToSkipStateSaving uint64 `koanf:"max-amount-of-gas-to-skip-state-saving"` } func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -46,18 +48,22 @@ func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".snapshot-cache", DefaultCachingConfig.SnapshotCache, "amount of memory in megabytes to cache state snapshots with") f.Int(prefix+".database-cache", DefaultCachingConfig.DatabaseCache, "amount of memory in megabytes to cache database contents with") f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreMaxGas, "maximum gas rolled back to recover snapshot") + f.Uint32(prefix+".max-number-of-blocks-to-skip-state-saving", DefaultCachingConfig.MaxNumberOfBlocksToSkipStateSaving, "maximum number of blocks to skip state saving to persistent storage (archive node only)") + f.Uint64(prefix+".max-amount-of-gas-to-skip-state-saving", DefaultCachingConfig.MaxAmountOfGasToSkipStateSaving, "maximum amount of gas in blocks to skip saving state to Persistent storage (archive node only)") } var DefaultCachingConfig = CachingConfig{ - Archive: false, - BlockCount: 128, - BlockAge: 30 * time.Minute, - TrieTimeLimit: time.Hour, - TrieDirtyCache: 1024, - TrieCleanCache: 600, - SnapshotCache: 400, - DatabaseCache: 2048, - SnapshotRestoreMaxGas: 300_000_000_000, + Archive: false, + BlockCount: 128, + BlockAge: 30 * time.Minute, + TrieTimeLimit: time.Hour, + TrieDirtyCache: 1024, + TrieCleanCache: 600, + SnapshotCache: 400, + DatabaseCache: 2048, + SnapshotRestoreMaxGas: 300_000_000_000, + MaxNumberOfBlocksToSkipStateSaving: 127, + MaxAmountOfGasToSkipStateSaving: 15 * 1000 * 1000, } func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core.CacheConfig { @@ -67,18 +73,20 @@ func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core } return &core.CacheConfig{ - TrieCleanLimit: cachingConfig.TrieCleanCache, - TrieCleanJournal: stack.ResolvePath(baseConf.TrieCleanCacheJournal), - TrieCleanRejournal: baseConf.TrieCleanCacheRejournal, - TrieCleanNoPrefetch: baseConf.NoPrefetch, - TrieDirtyLimit: cachingConfig.TrieDirtyCache, - TrieDirtyDisabled: cachingConfig.Archive, - TrieTimeLimit: cachingConfig.TrieTimeLimit, - TriesInMemory: cachingConfig.BlockCount, - TrieRetention: cachingConfig.BlockAge, - SnapshotLimit: cachingConfig.SnapshotCache, - Preimages: baseConf.Preimages, - SnapshotRestoreMaxGas: cachingConfig.SnapshotRestoreMaxGas, + TrieCleanLimit: cachingConfig.TrieCleanCache, + TrieCleanJournal: stack.ResolvePath(baseConf.TrieCleanCacheJournal), + TrieCleanRejournal: baseConf.TrieCleanCacheRejournal, + TrieCleanNoPrefetch: baseConf.NoPrefetch, + TrieDirtyLimit: cachingConfig.TrieDirtyCache, + TrieDirtyDisabled: cachingConfig.Archive, + TrieTimeLimit: cachingConfig.TrieTimeLimit, + TriesInMemory: cachingConfig.BlockCount, + TrieRetention: cachingConfig.BlockAge, + SnapshotLimit: cachingConfig.SnapshotCache, + Preimages: baseConf.Preimages, + SnapshotRestoreMaxGas: cachingConfig.SnapshotRestoreMaxGas, + MaxNumberOfBlocksToSkipStateSaving: cachingConfig.MaxNumberOfBlocksToSkipStateSaving, + MaxAmountOfGasToSkipStateSaving: cachingConfig.MaxAmountOfGasToSkipStateSaving, } } From 3c4ba0ad08565f686afcc3113277207a588065a2 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 7 Sep 2023 12:18:55 +0000 Subject: [PATCH 12/20] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index c86303cf0b..1a58f1b34e 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit c86303cf0b7c10f55a19b767a326a8bd06ce8108 +Subproject commit 1a58f1b34e24ecf1679a21edff0a199305aa82f8 From 8e9d7870c0ee65cc99dcd16d1dcd587f8dfaddda Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 7 Sep 2023 12:46:14 +0000 Subject: [PATCH 13/20] remove not used parameter of stackConfigForTest --- system_tests/common_test.go | 12 ++++-------- system_tests/forwarder_test.go | 8 ++++---- system_tests/ipc_test.go | 2 +- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 11cda21216..81cb18ab30 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -290,7 +290,7 @@ func createTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, return createTestL1BlockChainWithConfig(t, l1info, nil) } -func stackConfigForTest(t *testing.T, dataDir string) *node.Config { +func stackConfigForTest(t *testing.T) *node.Config { stackConfig := node.DefaultConfig stackConfig.HTTPPort = 0 stackConfig.WSPort = 0 @@ -299,11 +299,7 @@ func stackConfigForTest(t *testing.T, dataDir string) *node.Config { stackConfig.P2P.NoDial = true stackConfig.P2P.NoDiscovery = true stackConfig.P2P.NAT = nil - if dataDir != "" { - stackConfig.DataDir = dataDir - } else { - stackConfig.DataDir = t.TempDir() - } + stackConfig.DataDir = t.TempDir() return &stackConfig } @@ -396,7 +392,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no l1info = NewL1TestInfo(t) } if stackConfig == nil { - stackConfig = stackConfigForTest(t, "") + stackConfig = stackConfigForTest(t) } l1info.GenerateAccount("Faucet") @@ -738,7 +734,7 @@ func Create2ndNodeWithConfig( l1client := ethclient.NewClient(l1rpcClient) if stackConfig == nil { - stackConfig = stackConfigForTest(t, "") + stackConfig = stackConfigForTest(t) } l2stack, err := node.New(stackConfig) Require(t, err) diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 7f54b4811f..0a954719d8 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -35,7 +35,7 @@ func TestStaticForwarder(t *testing.T) { ipcPath := tmpPath(t, "test.ipc") ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath - stackConfig := stackConfigForTest(t, "") + stackConfig := stackConfigForTest(t) ipcConfig.Apply(stackConfig) nodeConfigA := arbnode.ConfigDefaultL1Test() nodeConfigA.BatchPoster.Enable = false @@ -97,7 +97,7 @@ func fallbackSequencer( ctx context.Context, t *testing.T, opts *fallbackSequencerOpts, ) (l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { - stackConfig := stackConfigForTest(t, "") + stackConfig := stackConfigForTest(t) ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = opts.ipcPath ipcConfig.Apply(stackConfig) @@ -118,7 +118,7 @@ func createForwardingNode( redisUrl string, fallbackPath string, ) (*ethclient.Client, *arbnode.Node) { - stackConfig := stackConfigForTest(t, "") + stackConfig := stackConfigForTest(t) if ipcPath != "" { ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath @@ -144,7 +144,7 @@ func createSequencer( ipcPath string, redisUrl string, ) (*ethclient.Client, *arbnode.Node) { - stackConfig := stackConfigForTest(t, "") + stackConfig := stackConfigForTest(t) ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath ipcConfig.Apply(stackConfig) diff --git a/system_tests/ipc_test.go b/system_tests/ipc_test.go index 40f2aad625..01ecf859d8 100644 --- a/system_tests/ipc_test.go +++ b/system_tests/ipc_test.go @@ -18,7 +18,7 @@ func TestIpcRpc(t *testing.T) { ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath - stackConf := stackConfigForTest(t, "") + stackConf := stackConfigForTest(t) ipcConfig.Apply(stackConf) ctx, cancel := context.WithCancel(context.Background()) From 0b42d1faa41e995b1a81b1c7ed363faed04ac708 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 7 Sep 2023 13:11:13 +0000 Subject: [PATCH 14/20] make sure that each test tx is included in next block --- system_tests/recreatestate_rpc_test.go | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 4a92879edd..45b7d1bd15 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -273,7 +273,6 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { } func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks uint32, skipGas uint64, txCount int) { - t.Helper() maxRecreateStateDepth := int64(30 * 1000 * 1000) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -311,14 +310,6 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u Require(t, err) Require(t, node.Start(ctx1)) client := ClientForStack(t, stack) - debugAuth := l2info.GetDefaultTransactOpts("Owner", ctx1) - // make auth a chain owner - arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), client) - Require(t, err, "failed to deploy ArbDebug") - tx, err := arbdebug.BecomeChainOwner(&debugAuth) - Require(t, err, "failed to deploy ArbDebug") - _, err = EnsureTxSucceeded(ctx1, client, tx) - Require(t, err) StartWatchChanErr(t, ctx, feedErrChan, node) dataDir := node.Stack.DataDir() @@ -330,13 +321,13 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u txs = append(txs, tx) err := client.SendTransaction(ctx, tx) Require(t, err) - } - for _, tx := range txs { - _, err := EnsureTxSucceeded(ctx, client, tx) + receipt, err := EnsureTxSucceeded(ctx, client, tx) Require(t, err) + if have, want := receipt.BlockNumber.Uint64(), uint64(i)+1; have != want { + Fatal(t, "internal test error - tx got included in unexpected block number, have:", have, "want:", want) + } } - bc := node.Execution.Backend.ArbInterface().BlockChain() - genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum + genesis := uint64(0) lastBlock, err := client.BlockNumber(ctx) Require(t, err) if want := genesis + uint64(txCount); lastBlock < want { @@ -356,7 +347,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u Require(t, node.Start(ctx)) client = ClientForStack(t, stack) defer node.StopAndWait() - bc = node.Execution.Backend.ArbInterface().BlockChain() + bc := node.Execution.Backend.ArbInterface().BlockChain() gas := skipGas blocks := skipBlocks for i := genesis + 1; i <= genesis+uint64(txCount); i++ { From 68c0039aab67cd1b4ed7ade05b23dd89271ef775 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 7 Sep 2023 16:06:16 +0000 Subject: [PATCH 15/20] simplify test params --- system_tests/recreatestate_rpc_test.go | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 45b7d1bd15..d0ee9b36b4 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -19,7 +19,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util" ) @@ -391,31 +390,20 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { // test defaults - testSkippingSavingStateAndRecreatingAfterRestart(t, 0, 0, 512) testSkippingSavingStateAndRecreatingAfterRestart(t, 127, 0, 512) testSkippingSavingStateAndRecreatingAfterRestart(t, 0, 15*1000*1000, 512) testSkippingSavingStateAndRecreatingAfterRestart(t, 127, 15*1000*1000, 512) // one test block ~ 925000 gas testBlockGas := uint64(925000) - skipGasValues := []uint64{testBlockGas, 2 * testBlockGas, 3 * testBlockGas, 5 * testBlockGas, 21 * testBlockGas} - skipBlockValues := []uint32{1, 2, 3, 5, 21} + skipBlockValues := []uint64{0, 1, 2, 3, 5, 7, 19, 20, 21, 99, 100, 101} + skipGasValues := []uint64{0} + for _, i := range skipBlockValues[1:] { + skipGasValues = append(skipGasValues, []uint64{i*testBlockGas - 2, i*testBlockGas - 1, i * testBlockGas, i*testBlockGas + 1, i*testBlockGas + 2}...) + } for _, skipGas := range skipGasValues { for _, skipBlocks := range skipBlockValues[:len(skipBlockValues)-2] { - testSkippingSavingStateAndRecreatingAfterRestart(t, skipBlocks, skipGas, 21) + testSkippingSavingStateAndRecreatingAfterRestart(t, uint32(skipBlocks), skipGas, 100) } } - skipBlockValues = []uint32{1, 2, 3, 7, 19, 20, 21, 22} - for _, skipBlocks := range skipBlockValues[:len(skipBlockValues)-2] { - testSkippingSavingStateAndRecreatingAfterRestart(t, skipBlocks, 0, 21) - testSkippingSavingStateAndRecreatingAfterRestart(t, skipBlocks, testBlockGas*100, 21) - } - skipGasValues = []uint64{1, - testBlockGas - 2, testBlockGas - 1, testBlockGas, testBlockGas + 1, testBlockGas + 2, - 2*testBlockGas - 2, 2*testBlockGas - 1, 2 * testBlockGas, 2*testBlockGas + 1, - 7 * testBlockGas, 21 * testBlockGas} - for _, skipGas := range skipGasValues { - testSkippingSavingStateAndRecreatingAfterRestart(t, 0, skipGas, 21) - testSkippingSavingStateAndRecreatingAfterRestart(t, 100, skipGas, 21) - } } From c67b318f3773e25d42c536f8c00d54a9fa9bb052 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 7 Sep 2023 16:53:59 +0000 Subject: [PATCH 16/20] shorten state saving skipping test --- system_tests/recreatestate_rpc_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index d0ee9b36b4..fd078e1948 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -396,10 +396,10 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { // one test block ~ 925000 gas testBlockGas := uint64(925000) - skipBlockValues := []uint64{0, 1, 2, 3, 5, 7, 19, 20, 21, 99, 100, 101} + skipBlockValues := []uint64{0, 1, 2, 3, 5, 100, 101} skipGasValues := []uint64{0} for _, i := range skipBlockValues[1:] { - skipGasValues = append(skipGasValues, []uint64{i*testBlockGas - 2, i*testBlockGas - 1, i * testBlockGas, i*testBlockGas + 1, i*testBlockGas + 2}...) + skipGasValues = append(skipGasValues, []uint64{i*testBlockGas - 1, i * testBlockGas, i*testBlockGas + 1}...) } for _, skipGas := range skipGasValues { for _, skipBlocks := range skipBlockValues[:len(skipBlockValues)-2] { From 20b213a7cc222862f45eb887f4e23bafeba23fe2 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 7 Sep 2023 23:05:52 +0000 Subject: [PATCH 17/20] clean extra return values form prepareNodeWithHistory --- system_tests/recreatestate_rpc_test.go | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index fd078e1948..fbc008c9f0 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -15,20 +15,19 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/util" ) -func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateDepth int64, txCount uint64, skipBlocks uint32, skipGas uint64) (node *arbnode.Node, bc *core.BlockChain, db ethdb.Database, l2client *ethclient.Client, l2info info, l1info info, l1stack *node.Node, nodeConfig *arbnode.Config, cacheConfig *core.CacheConfig, cancel func()) { +func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateDepth int64, txCount uint64, skipBlocks uint32, skipGas uint64) (node *arbnode.Node, bc *core.BlockChain, db ethdb.Database, l2client *ethclient.Client, cancel func()) { t.Helper() - nodeConfig = arbnode.ConfigDefaultL1Test() + nodeConfig := arbnode.ConfigDefaultL1Test() nodeConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - cacheConfig = &core.CacheConfig{ + cacheConfig := &core.CacheConfig{ // Arbitrum Config Options TriesInMemory: 128, TrieRetention: 30 * time.Minute, @@ -45,7 +44,7 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateD SnapshotLimit: 256, SnapshotWait: true, } - l2info, node, l2client, _, l1info, _, _, l1stack = createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, nil, cacheConfig, nil) + l2info, node, l2client, _, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, nil, cacheConfig, nil) cancel = func() { defer requireClose(t, l1stack) defer node.StopAndWait() @@ -64,8 +63,7 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateD } bc = node.Execution.Backend.ArbInterface().BlockChain() db = node.Execution.Backend.ChainDb() - - return + return node, bc, db, l2client, cancel } func fillHeaderCache(t *testing.T, bc *core.BlockChain, from, to uint64) { @@ -115,7 +113,7 @@ func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, fr func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) + _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -139,7 +137,7 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) - _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) + _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -163,7 +161,7 @@ func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(200) - _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) + _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -186,7 +184,7 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { var headerCacheLimit uint64 = 512 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, headerCacheLimit+5, 0, 0) + _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, headerCacheLimit+5, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -219,7 +217,7 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) + _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) @@ -243,7 +241,7 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { var blockCacheLimit uint64 = 256 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, _, _, _, _, _, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, blockCacheLimit+4, 0, 0) + _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, blockCacheLimit+4, 0, 0) defer cancelNode() lastBlock, err := l2client.BlockNumber(ctx) From 0e0fe26ed7d74bcb5c39040a23c57eecc9abee87 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 8 Sep 2023 16:06:34 +0000 Subject: [PATCH 18/20] refactor setting cache config in tests --- system_tests/arbtrace_test.go | 2 +- system_tests/common_test.go | 21 +-- system_tests/debugapi_test.go | 2 +- system_tests/recreatestate_rpc_test.go | 177 ++++++++++++++++--------- system_tests/staker_test.go | 2 +- 5 files changed, 131 insertions(+), 73 deletions(-) diff --git a/system_tests/arbtrace_test.go b/system_tests/arbtrace_test.go index 4aab5c71bd..78907aa622 100644 --- a/system_tests/arbtrace_test.go +++ b/system_tests/arbtrace_test.go @@ -147,7 +147,7 @@ func TestArbTraceForwarding(t *testing.T) { nodeConfig := arbnode.ConfigDefaultL1Test() nodeConfig.RPC.ClassicRedirect = ipcPath nodeConfig.RPC.ClassicRedirectTimeout = time.Second - _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, nil, nil, nil) + _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, nil, nil) defer requireClose(t, l1stack) defer requireClose(t, l2stack) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 81cb18ab30..ce1624a27d 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -493,13 +493,13 @@ func DeployOnTestL1( } func createL2BlockChain( - t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, cacheConfig *core.CacheConfig, + t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, cacheConfig *execution.CachingConfig, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { return createL2BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil, cacheConfig) } func createL2BlockChainWithStackConfig( - t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, cacheConfig *core.CacheConfig, + t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, cacheConfig *execution.CachingConfig, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { if l2info == nil { l2info = NewArbTestInfo(t, chainConfig.ChainID) @@ -530,7 +530,11 @@ func createL2BlockChainWithStackConfig( SerializedChainConfig: serializedChainConfig, } } - blockchain, err := execution.WriteOrTestBlockChain(chainDb, cacheConfig, initReader, chainConfig, initMessage, arbnode.ConfigDefaultL2Test().TxLookupLimit, 0) + var coreCacheConfig *core.CacheConfig + if cacheConfig != nil { + coreCacheConfig = execution.DefaultCacheConfigFor(stack, cacheConfig) + } + blockchain, err := execution.WriteOrTestBlockChain(chainDb, coreCacheConfig, initReader, chainConfig, initMessage, arbnode.ConfigDefaultL2Test().TxLookupLimit, 0) Require(t, err) return l2info, stack, chainDb, arbDb, blockchain @@ -565,7 +569,7 @@ func createTestNodeOnL1WithConfig( l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, ) { - l2info, currentNode, l2client, _, l1info, l1backend, l1client, l1stack = createTestNodeOnL1WithConfigImpl(t, ctx, isSequencer, nodeConfig, chainConfig, stackConfig, nil, nil) + l2info, currentNode, l2client, _, l1info, l1backend, l1client, l1stack = createTestNodeOnL1WithConfigImpl(t, ctx, isSequencer, nodeConfig, chainConfig, stackConfig, nil) return } @@ -576,7 +580,6 @@ func createTestNodeOnL1WithConfigImpl( nodeConfig *arbnode.Config, chainConfig *params.ChainConfig, stackConfig *node.Config, - cacheConfig *core.CacheConfig, l2info_in info, ) ( l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l2stack *node.Node, @@ -598,7 +601,7 @@ func createTestNodeOnL1WithConfigImpl( l2info = NewArbTestInfo(t, chainConfig.ChainID) } addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) - _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, stackConfig, cacheConfig) + _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, stackConfig, &nodeConfig.Caching) var sequencerTxOptsPtr *bind.TransactOpts var dataSigner signature.DataSignerFunc if isSequencer { @@ -644,7 +647,7 @@ func CreateTestL2WithConfig( AddDefaultValNode(t, ctx, nodeConfig, true) - l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, "", params.ArbitrumDevTestChainConfig(), nil) + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, "", params.ArbitrumDevTestChainConfig(), &nodeConfig.Caching) currentNode, err := arbnode.CreateNode(ctx, stack, chainDb, arbDb, NewFetcherFromConfig(nodeConfig), blockchain, nil, nil, nil, nil, nil, feedErrChan) Require(t, err) @@ -749,7 +752,9 @@ func Create2ndNodeWithConfig( txOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) chainConfig := first.Execution.ArbInterface.BlockChain().Config() initMessage := getInitMessage(ctx, t, l1client, first.DeployInfo) - l2blockchain, err := execution.WriteOrTestBlockChain(l2chainDb, nil, initReader, chainConfig, initMessage, arbnode.ConfigDefaultL2Test().TxLookupLimit, 0) + + coreCacheConfig := execution.DefaultCacheConfigFor(l2stack, &nodeConfig.Caching) + l2blockchain, err := execution.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, initMessage, arbnode.ConfigDefaultL2Test().TxLookupLimit, 0) Require(t, err) AddDefaultValNode(t, ctx, nodeConfig, true) diff --git a/system_tests/debugapi_test.go b/system_tests/debugapi_test.go index ff28e2350c..03e3dfd405 100644 --- a/system_tests/debugapi_test.go +++ b/system_tests/debugapi_test.go @@ -14,7 +14,7 @@ import ( func TestDebugAPI(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, nil, nil, nil, nil) + _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, nil, nil, nil) defer requireClose(t, l1stack) defer requireClose(t, l2stack) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index fbc008c9f0..dbf68c8479 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -6,7 +6,6 @@ import ( "math/big" "strings" "testing" - "time" "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" @@ -18,33 +17,13 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/util" ) -func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateDepth int64, txCount uint64, skipBlocks uint32, skipGas uint64) (node *arbnode.Node, bc *core.BlockChain, db ethdb.Database, l2client *ethclient.Client, cancel func()) { +func prepareNodeWithHistory(t *testing.T, ctx context.Context, nodeConfig *arbnode.Config, txCount uint64) (node *arbnode.Node, executionNode *execution.ExecutionNode, l2client *ethclient.Client, cancel func()) { t.Helper() - nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - cacheConfig := &core.CacheConfig{ - // Arbitrum Config Options - TriesInMemory: 128, - TrieRetention: 30 * time.Minute, - MaxNumberOfBlocksToSkipStateSaving: skipBlocks, - MaxAmountOfGasToSkipStateSaving: skipGas, - - // disable caching of states in BlockChain.stateCache - TrieCleanLimit: 0, - TrieDirtyLimit: 0, - - TrieDirtyDisabled: true, - - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 256, - SnapshotWait: true, - } - l2info, node, l2client, _, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, nil, cacheConfig, nil) + l2info, node, l2client, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, nil) cancel = func() { defer requireClose(t, l1stack) defer node.StopAndWait() @@ -61,9 +40,7 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, maxRecreateStateD _, err := EnsureTxSucceeded(ctx, l2client, tx) Require(t, err) } - bc = node.Execution.Backend.ArbInterface().BlockChain() - db = node.Execution.Backend.ChainDb() - return node, bc, db, l2client, cancel + return node, node.Execution, l2client, cancel } func fillHeaderCache(t *testing.T, bc *core.BlockChain, from, to uint64) { @@ -113,8 +90,20 @@ func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, fr func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) + nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable caching of states in BlockChain.stateCache + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.TrieDirtyCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() lastBlock, err := l2client.BlockNumber(ctx) Require(t, err) @@ -137,8 +126,20 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) - _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) + nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig.RPC.MaxRecreateStateDepth = depthGasLimit + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable caching of states in BlockChain.stateCache + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.TrieDirtyCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() lastBlock, err := l2client.BlockNumber(ctx) Require(t, err) @@ -160,9 +161,20 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - depthGasLimit := int64(200) - _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, depthGasLimit, 32, 0, 0) + nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig.RPC.MaxRecreateStateDepth = int64(200) + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable caching of states in BlockChain.stateCache + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.TrieDirtyCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() lastBlock, err := l2client.BlockNumber(ctx) Require(t, err) @@ -184,8 +196,20 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { var headerCacheLimit uint64 = 512 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, headerCacheLimit+5, 0, 0) + nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable caching of states in BlockChain.stateCache + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.TrieDirtyCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, headerCacheLimit+5) defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() lastBlock, err := l2client.BlockNumber(ctx) Require(t, err) @@ -217,8 +241,21 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, 32, 0, 0) + + nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable caching of states in BlockChain.stateCache + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.TrieDirtyCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() lastBlock, err := l2client.BlockNumber(ctx) Require(t, err) @@ -241,8 +278,20 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { var blockCacheLimit uint64 = 256 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, bc, db, l2client, cancelNode := prepareNodeWithHistory(t, ctx, arbitrum.InfiniteMaxRecreateStateDepth, blockCacheLimit+4, 0, 0) + nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable caching of states in BlockChain.stateCache + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.TrieDirtyCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, blockCacheLimit+4) defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() lastBlock, err := l2client.BlockNumber(ctx) Require(t, err) @@ -269,7 +318,7 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { } } -func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks uint32, skipGas uint64, txCount int) { +func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig *execution.CachingConfig, txCount int) { maxRecreateStateDepth := int64(30 * 1000 * 1000) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -279,27 +328,14 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u nodeConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - cacheConfig := &core.CacheConfig{ - // Arbitrum Config Options - TriesInMemory: 128, - TrieRetention: 30 * time.Minute, - MaxNumberOfBlocksToSkipStateSaving: skipBlocks, - MaxAmountOfGasToSkipStateSaving: skipGas, - - // disable caching of states in BlockChain.stateCache - TrieCleanLimit: 0, - TrieDirtyLimit: 0, - - TrieDirtyDisabled: true, - - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 256, - SnapshotWait: true, - } + nodeConfig.Caching = *cacheConfig + + skipBlocks := nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving + skipGas := nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving feedErrChan := make(chan error, 10) AddDefaultValNode(t, ctx1, nodeConfig, true) - l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), cacheConfig) + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), &nodeConfig.Caching) node, err := arbnode.CreateNode(ctx1, stack, chainDb, arbDb, NewFetcherFromConfig(nodeConfig), blockchain, nil, nil, nil, nil, nil, feedErrChan) Require(t, err) @@ -338,7 +374,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u t.Log("stopped first node") AddDefaultValNode(t, ctx, nodeConfig, true) - l2info, stack, chainDb, arbDb, blockchain = createL2BlockChain(t, l2info, dataDir, params.ArbitrumDevTestChainConfig(), cacheConfig) + l2info, stack, chainDb, arbDb, blockchain = createL2BlockChain(t, l2info, dataDir, params.ArbitrumDevTestChainConfig(), &nodeConfig.Caching) node, err = arbnode.CreateNode(ctx, stack, chainDb, arbDb, NewFetcherFromConfig(nodeConfig), blockchain, nil, node.DeployInfo, nil, nil, nil, feedErrChan) Require(t, err) Require(t, node.Start(ctx)) @@ -387,21 +423,38 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, skipBlocks u } func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { + cacheConfig := execution.DefaultCachingConfig + cacheConfig.Archive = true + // disable caching of states in BlockChain.stateCache + cacheConfig.TrieCleanCache = 0 + cacheConfig.TrieDirtyCache = 0 // test defaults - testSkippingSavingStateAndRecreatingAfterRestart(t, 127, 0, 512) - testSkippingSavingStateAndRecreatingAfterRestart(t, 0, 15*1000*1000, 512) - testSkippingSavingStateAndRecreatingAfterRestart(t, 127, 15*1000*1000, 512) + testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) + + cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 127 + cacheConfig.MaxAmountOfGasToSkipStateSaving = 0 + testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) + + cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 0 + cacheConfig.MaxAmountOfGasToSkipStateSaving = 15 * 1000 * 1000 + testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) + + cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 127 + cacheConfig.MaxAmountOfGasToSkipStateSaving = 15 * 1000 * 1000 + testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) // one test block ~ 925000 gas testBlockGas := uint64(925000) - skipBlockValues := []uint64{0, 1, 2, 3, 5, 100, 101} - skipGasValues := []uint64{0} - for _, i := range skipBlockValues[1:] { - skipGasValues = append(skipGasValues, []uint64{i*testBlockGas - 1, i * testBlockGas, i*testBlockGas + 1}...) + skipBlockValues := []uint64{0, 1, 2, 3, 5, 21, 51, 100, 101} + var skipGasValues []uint64 + for _, i := range skipBlockValues { + skipGasValues = append(skipGasValues, i*testBlockGas) } for _, skipGas := range skipGasValues { for _, skipBlocks := range skipBlockValues[:len(skipBlockValues)-2] { - testSkippingSavingStateAndRecreatingAfterRestart(t, uint32(skipBlocks), skipGas, 100) + cacheConfig.MaxAmountOfGasToSkipStateSaving = skipGas + cacheConfig.MaxNumberOfBlocksToSkipStateSaving = uint32(skipBlocks) + testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 100) } } } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 82eede9f60..b1c7091e51 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -64,7 +64,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), transferGas, ) - _, l2nodeA, l2clientA, _, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, l2chainConfig, nil, nil, l2info) + _, l2nodeA, l2clientA, _, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, l2chainConfig, nil, nil) defer requireClose(t, l1stack) defer l2nodeA.StopAndWait() From cc58fbb8e263839d2425d1c34fbed846eec71656 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 8 Sep 2023 16:36:23 +0000 Subject: [PATCH 19/20] fix staker test --- system_tests/staker_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index b1c7091e51..dc64450924 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -64,7 +64,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), transferGas, ) - _, l2nodeA, l2clientA, _, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, l2chainConfig, nil, nil) + _, l2nodeA, l2clientA, _, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, l2chainConfig, nil, l2info) defer requireClose(t, l1stack) defer l2nodeA.StopAndWait() From 3bf7dd0eff91742a65e877890cb00c4dc4fd41d3 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 26 Sep 2023 22:15:14 +0000 Subject: [PATCH 20/20] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 1a58f1b34e..89f53b035d 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 1a58f1b34e24ecf1679a21edff0a199305aa82f8 +Subproject commit 89f53b035d7a8b9b1ff8599958bf0c55efcdf718