diff --git a/core/block_validator_test.go b/core/block_validator_test.go
deleted file mode 100644
index 1ab82ea0be..0000000000
--- a/core/block_validator_test.go
+++ /dev/null
@@ -1,272 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "math/big"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/beacon"
- "github.com/ethereum/go-ethereum/consensus/clique"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/params"
-)
-
-// Tests that simple header verification works, for both good and bad blocks.
-func TestHeaderVerification(t *testing.T) {
- testHeaderVerification(t, rawdb.HashScheme)
- testHeaderVerification(t, rawdb.PathScheme)
-}
-
-func testHeaderVerification(t *testing.T, scheme string) {
- // Create a simple chain to verify
- var (
- gspec = &Genesis{Config: params.TestChainConfig}
- _, blocks, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 8, nil)
- )
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
- chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer chain.Stop()
-
- for i := 0; i < len(blocks); i++ {
- for j, valid := range []bool{true, false} {
- var results <-chan error
-
- if valid {
- engine := ethash.NewFaker()
- _, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]})
- } else {
- engine := ethash.NewFakeFailer(headers[i].Number.Uint64())
- _, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]})
- }
- // Wait for the verification result
- select {
- case result := <-results:
- if (result == nil) != valid {
- t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, result, valid)
- }
- case <-time.After(time.Second):
- t.Fatalf("test %d.%d: verification timeout", i, j)
- }
- // Make sure no more data is returned
- select {
- case result := <-results:
- t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result)
- case <-time.After(25 * time.Millisecond):
- }
- }
- chain.InsertChain(blocks[i : i+1])
- }
-}
-
-func TestHeaderVerificationForMergingClique(t *testing.T) { testHeaderVerificationForMerging(t, true) }
-func TestHeaderVerificationForMergingEthash(t *testing.T) { testHeaderVerificationForMerging(t, false) }
-
-// Tests the verification for eth1/2 merging, including pre-merge and post-merge
-func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
- var (
- gspec *Genesis
- preBlocks []*types.Block
- postBlocks []*types.Block
- engine consensus.Engine
- merger = consensus.NewMerger(rawdb.NewMemoryDatabase())
- )
- if isClique {
- var (
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr = crypto.PubkeyToAddress(key.PublicKey)
- config = *params.AllCliqueProtocolChanges
- )
- engine = beacon.New(clique.New(params.AllCliqueProtocolChanges.Clique, rawdb.NewMemoryDatabase()))
- gspec = &Genesis{
- Config: &config,
- ExtraData: make([]byte, 32+common.AddressLength+crypto.SignatureLength),
- Alloc: map[common.Address]types.Account{
- addr: {Balance: big.NewInt(1)},
- },
- BaseFee: big.NewInt(params.InitialBaseFee),
- Difficulty: new(big.Int),
- }
- copy(gspec.ExtraData[32:], addr[:])
-
- td := 0
- genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 8, nil)
- for i, block := range blocks {
- header := block.Header()
- if i > 0 {
- header.ParentHash = blocks[i-1].Hash()
- }
- header.Extra = make([]byte, 32+crypto.SignatureLength)
- header.Difficulty = big.NewInt(2)
-
- sig, _ := crypto.Sign(engine.SealHash(header).Bytes(), key)
- copy(header.Extra[len(header.Extra)-crypto.SignatureLength:], sig)
- blocks[i] = block.WithSeal(header)
-
- // calculate td
- td += int(block.Difficulty().Uint64())
- }
- preBlocks = blocks
- gspec.Config.TerminalTotalDifficulty = big.NewInt(int64(td))
- postBlocks, _ = GenerateChain(gspec.Config, preBlocks[len(preBlocks)-1], engine, genDb, 8, nil)
- } else {
- config := *params.TestChainConfig
- gspec = &Genesis{Config: &config}
- engine = beacon.New(ethash.NewFaker())
- td := int(params.GenesisDifficulty.Uint64())
- genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 8, nil)
- for _, block := range blocks {
- // calculate td
- td += int(block.Difficulty().Uint64())
- }
- preBlocks = blocks
- gspec.Config.TerminalTotalDifficulty = big.NewInt(int64(td))
- t.Logf("Set ttd to %v\n", gspec.Config.TerminalTotalDifficulty)
- postBlocks, _ = GenerateChain(gspec.Config, preBlocks[len(preBlocks)-1], engine, genDb, 8, func(i int, gen *BlockGen) {
- gen.SetPoS()
- })
- }
- // Assemble header batch
- preHeaders := make([]*types.Header, len(preBlocks))
- for i, block := range preBlocks {
- preHeaders[i] = block.Header()
- t.Logf("Pre-merge header: %d", block.NumberU64())
- }
- postHeaders := make([]*types.Header, len(postBlocks))
- for i, block := range postBlocks {
- postHeaders[i] = block.Header()
- t.Logf("Post-merge header: %d", block.NumberU64())
- }
- // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
- chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
- defer chain.Stop()
-
- // Verify the blocks before the merging
- for i := 0; i < len(preBlocks); i++ {
- _, results := engine.VerifyHeaders(chain, []*types.Header{preHeaders[i]})
- // Wait for the verification result
- select {
- case result := <-results:
- if result != nil {
- t.Errorf("pre-block %d: verification failed %v", i, result)
- }
- case <-time.After(time.Second):
- t.Fatalf("pre-block %d: verification timeout", i)
- }
- // Make sure no more data is returned
- select {
- case result := <-results:
- t.Fatalf("pre-block %d: unexpected result returned: %v", i, result)
- case <-time.After(25 * time.Millisecond):
- }
- chain.InsertChain(preBlocks[i : i+1])
- }
-
- // Make the transition
- merger.ReachTTD()
- merger.FinalizePoS()
-
- // Verify the blocks after the merging
- for i := 0; i < len(postBlocks); i++ {
- _, results := engine.VerifyHeaders(chain, []*types.Header{postHeaders[i]})
- // Wait for the verification result
- select {
- case result := <-results:
- if result != nil {
- t.Errorf("post-block %d: verification failed %v", i, result)
- }
- case <-time.After(time.Second):
- t.Fatalf("test %d: verification timeout", i)
- }
- // Make sure no more data is returned
- select {
- case result := <-results:
- t.Fatalf("post-block %d: unexpected result returned: %v", i, result)
- case <-time.After(25 * time.Millisecond):
- }
- chain.InsertBlockWithoutSetHead(postBlocks[i])
- }
-
- // Verify the blocks with pre-merge blocks and post-merge blocks
- var headers []*types.Header
- for _, block := range preBlocks {
- headers = append(headers, block.Header())
- }
- for _, block := range postBlocks {
- headers = append(headers, block.Header())
- }
- _, results := engine.VerifyHeaders(chain, headers)
- for i := 0; i < len(headers); i++ {
- select {
- case result := <-results:
- if result != nil {
- t.Errorf("test %d: verification failed %v", i, result)
- }
- case <-time.After(time.Second):
- t.Fatalf("test %d: verification timeout", i)
- }
- }
- // Make sure no more data is returned
- select {
- case result := <-results:
- t.Fatalf("unexpected result returned: %v", result)
- case <-time.After(25 * time.Millisecond):
- }
-}
-
-func TestCalcGasLimit(t *testing.T) {
- for i, tc := range []struct {
- pGasLimit uint64
- max uint64
- min uint64
- }{
- {20000000, 20078124, 19921876},
- {40000000, 40156249, 39843751},
- } {
- // Increase
- if have, want := CalcGasLimit(tc.pGasLimit, 2*tc.pGasLimit), tc.max; have != want {
- t.Errorf("test %d: have %d want <%d", i, have, want)
- }
- // Decrease
- if have, want := CalcGasLimit(tc.pGasLimit, 0), tc.min; have != want {
- t.Errorf("test %d: have %d want >%d", i, have, want)
- }
- // Small decrease
- if have, want := CalcGasLimit(tc.pGasLimit, tc.pGasLimit-1), tc.pGasLimit-1; have != want {
- t.Errorf("test %d: have %d want %d", i, have, want)
- }
- // Small increase
- if have, want := CalcGasLimit(tc.pGasLimit, tc.pGasLimit+1), tc.pGasLimit+1; have != want {
- t.Errorf("test %d: have %d want %d", i, have, want)
- }
- // No change
- if have, want := CalcGasLimit(tc.pGasLimit, tc.pGasLimit), tc.pGasLimit; have != want {
- t.Errorf("test %d: have %d want %d", i, have, want)
- }
- }
-}
diff --git a/core/blockarchiver/block_archiver_service.go b/core/blockarchiver/block_archiver_service.go
index bf0753f960..f1ad779d53 100644
--- a/core/blockarchiver/block_archiver_service.go
+++ b/core/blockarchiver/block_archiver_service.go
@@ -126,7 +126,7 @@ func (c *BlockArchiverService) getBlockByNumber(number uint64) (*types.Body, *ty
c.requestLock.AddRange(start, end)
defer c.requestLock.RemoveRange(start, end)
- blocks, err := c.client.GetBlocksByRange(number)
+ blocks, err := c.client.GetBundleBlocksByBlockNum(number)
if err != nil {
return nil, nil, err
}
diff --git a/core/blockarchiver/client.go b/core/blockarchiver/client.go
index 407b4f956e..34dda2b8d7 100644
--- a/core/blockarchiver/client.go
+++ b/core/blockarchiver/client.go
@@ -119,7 +119,8 @@ func (c *Client) GetBundleBlocksRange(blockNum uint64) (uint64, uint64, error) {
return startSlot, endSlot, nil
}
-func (c *Client) GetBlocksByRange(blockNum uint64) ([]*Block, error) {
+// GetBundleBlocksByBlockNum returns the bundle blocks by block number that within the range
+func (c *Client) GetBundleBlocksByBlockNum(blockNum uint64) ([]*Block, error) {
payload := preparePayload("eth_getBundledBlockByNumber", []interface{}{Int64ToHex(int64(blockNum))})
body, err := c.postRequest(payload)
if err != nil {
@@ -133,6 +134,7 @@ func (c *Client) GetBlocksByRange(blockNum uint64) ([]*Block, error) {
return getBlocksResp.Result, nil
}
+// postRequest sends a POST request to the block archiver service
func (c *Client) postRequest(payload map[string]interface{}) ([]byte, error) {
// Encode payload to JSON
payloadBytes, err := json.Marshal(payload)
@@ -162,6 +164,7 @@ func (c *Client) postRequest(payload map[string]interface{}) ([]byte, error) {
return body, nil
}
+// preparePayload prepares the payload for the request
func preparePayload(method string, params []interface{}) map[string]interface{} {
return map[string]interface{}{
"jsonrpc": "2.0",
diff --git a/core/blockarchiver/converter.go b/core/blockarchiver/converter.go
index 476a65d8b0..b47ee452f1 100644
--- a/core/blockarchiver/converter.go
+++ b/core/blockarchiver/converter.go
@@ -63,7 +63,6 @@ func convertBlock(block *Block) (*GeneralBlock, error) {
ts, err := HexToUint64(block.Timestamp)
if err != nil {
return nil, err
-
}
nonce, err := HexToUint64(block.Nonce)
if err != nil {
diff --git a/core/blockchain_diff_test.go b/core/blockchain_diff_test.go
deleted file mode 100644
index 8ec14bce43..0000000000
--- a/core/blockchain_diff_test.go
+++ /dev/null
@@ -1,429 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Tests that abnormal program termination (i.e.crash) and restart doesn't leave
-// the database in some strange state with gaps in the chain, nor with block data
-// dangling in the future.
-
-package core
-
-import (
- "encoding/hex"
- "math/big"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
- "github.com/ethereum/go-ethereum/params"
-)
-
-var (
- // testKey is a private key to use for funding a tester account.
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- contractCode, _ = hex.DecodeString("608060405260016000806101000a81548160ff02191690831515021790555034801561002a57600080fd5b506101688061003a6000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806389a2d8011461003b578063b0483f4814610059575b600080fd5b610043610075565b60405161005091906100f4565b60405180910390f35b610073600480360381019061006e91906100bc565b61008b565b005b60008060009054906101000a900460ff16905090565b806000806101000a81548160ff02191690831515021790555050565b6000813590506100b68161011b565b92915050565b6000602082840312156100ce57600080fd5b60006100dc848285016100a7565b91505092915050565b6100ee8161010f565b82525050565b600060208201905061010960008301846100e5565b92915050565b60008115159050919050565b6101248161010f565b811461012f57600080fd5b5056fea264697066735822122092f788b569bfc3786e90601b5dbec01cfc3d76094164fd66ca7d599c4239fc5164736f6c63430008000033")
- contractAddr = common.HexToAddress("0xe74a3c7427cda785e0000d42a705b1f3fd371e09")
- contractData1, _ = hex.DecodeString("b0483f480000000000000000000000000000000000000000000000000000000000000000")
- contractData2, _ = hex.DecodeString("b0483f480000000000000000000000000000000000000000000000000000000000000001")
- commonGas = 192138
- // testAddr is the Ethereum address of the tester account.
- testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
-
- // testBlocks is the test parameters array for specific blocks.
- testBlocks = []testBlockParam{
- {
- // This txs params also used to default block.
- blockNr: 11,
- txs: []testTransactionParam{
- {
- to: &common.Address{0x01},
- value: big.NewInt(1),
- gasPrice: big.NewInt(params.InitialBaseFee),
- data: nil,
- },
- },
- },
- {
- blockNr: 12,
- txs: []testTransactionParam{
- {
- to: &common.Address{0x01},
- value: big.NewInt(1),
- gasPrice: big.NewInt(params.InitialBaseFee),
- data: nil,
- },
- {
- to: &common.Address{0x02},
- value: big.NewInt(2),
- gasPrice: big.NewInt(params.InitialBaseFee + 1),
- data: nil,
- },
- {
- to: nil,
- value: big.NewInt(0),
- gasPrice: big.NewInt(params.InitialBaseFee + 1),
- data: contractCode,
- },
- },
- },
- {
- blockNr: 13,
- txs: []testTransactionParam{
- {
- to: &common.Address{0x01},
- value: big.NewInt(1),
- gasPrice: big.NewInt(params.InitialBaseFee),
- data: nil,
- },
- {
- to: &common.Address{0x02},
- value: big.NewInt(2),
- gasPrice: big.NewInt(params.InitialBaseFee + 1),
- data: nil,
- },
- {
- to: &common.Address{0x03},
- value: big.NewInt(3),
- gasPrice: big.NewInt(params.InitialBaseFee + 2),
- data: nil,
- },
- {
- to: &contractAddr,
- value: big.NewInt(0),
- gasPrice: big.NewInt(params.InitialBaseFee + 2),
- data: contractData1,
- },
- },
- },
- {
- blockNr: 14,
- txs: []testTransactionParam{
- {
- to: &contractAddr,
- value: big.NewInt(0),
- gasPrice: big.NewInt(params.InitialBaseFee + 2),
- data: contractData2,
- },
- },
- },
- {
- blockNr: 15,
- txs: []testTransactionParam{},
- },
- }
-)
-
-type testTransactionParam struct {
- to *common.Address
- value *big.Int
- gasPrice *big.Int
- data []byte
-}
-
-type testBlockParam struct {
- blockNr int
- txs []testTransactionParam
-}
-
-// testBackend is a mock implementation of the live Ethereum message handler. Its
-// purpose is to allow testing the request/reply workflows and wire serialization
-// in the `eth` protocol without actually doing any data processing.
-type testBackend struct {
- db ethdb.Database
- chain *BlockChain
-}
-
-// newTestBackend creates an empty chain and wraps it into a mock backend.
-func newTestBackend(blocks int, light bool) *testBackend {
- return newTestBackendWithGenerator(blocks, light)
-}
-
-// newTestBackend creates a chain with a number of explicitly defined blocks and
-// wraps it into a mock backend.
-func newTestBackendWithGenerator(blocks int, lightProcess bool) *testBackend {
- signer := types.HomesteadSigner{}
- // Create a database pre-initialize with a genesis block
- db := rawdb.NewMemoryDatabase()
- db.SetDiffStore(memorydb.New())
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- chain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil, EnablePersistDiff(860000))
- generator := func(i int, block *BlockGen) {
- // The chain maker doesn't have access to a chain, so the difficulty will be
- // lets unset (nil). Set it here to the correct value.
- block.SetCoinbase(testAddr)
-
- for idx, testBlock := range testBlocks {
- // Specific block setting, the index in this generator has 1 diff from specified blockNr.
- if i+1 == testBlock.blockNr {
- for _, testTransaction := range testBlock.txs {
- var transaction *types.Transaction
- if testTransaction.to == nil {
- transaction = types.NewContractCreation(block.TxNonce(testAddr),
- testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data)
- } else {
- transaction = types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to,
- testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data)
- }
- tx, err := types.SignTx(transaction, signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTxWithChain(chain, tx)
- }
- break
- }
-
- // Default block setting.
- if idx == len(testBlocks)-1 {
- // We want to simulate an empty middle block, having the same state as the
- // first one. The last is needs a state change again to force a reorg.
- for _, testTransaction := range testBlocks[0].txs {
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to,
- testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data), signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTxWithChain(chain, tx)
- }
- }
- }
- }
- bs, _ := GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, blocks, generator)
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
-
- return &testBackend{
- db: db,
- chain: chain,
- }
-}
-
-// close tears down the transaction pool and chain behind the mock backend.
-func (b *testBackend) close() {
- b.chain.Stop()
-}
-
-func (b *testBackend) Chain() *BlockChain { return b.chain }
-
-func TestFreezeDiffLayer(t *testing.T) {
- blockNum := 1024
- fullBackend := newTestBackend(blockNum, true)
- defer fullBackend.close()
- for len(fullBackend.chain.diffQueueBuffer) > 0 {
- // Wait for the buffer to be zero.
- }
- // Minus one empty block.
- if fullBackend.chain.diffQueue.Size() > blockNum-1 && fullBackend.chain.diffQueue.Size() < blockNum-2 {
- t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum-1, fullBackend.chain.diffQueue.Size())
- }
-
- time.Sleep(diffLayerFreezerRecheckInterval + 2*time.Second)
- if fullBackend.chain.diffQueue.Size() != int(fullBackend.chain.triesInMemory) {
- t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum, fullBackend.chain.diffQueue.Size())
- }
-
- block := fullBackend.chain.GetBlockByNumber(uint64(blockNum / 2))
- diffStore := fullBackend.chain.db.DiffStore()
- rawData := rawdb.ReadDiffLayerRLP(diffStore, block.Hash())
- if len(rawData) == 0 {
- t.Error("do not find diff layer in db")
- }
-}
-
-// newTwoForkedBlockchains returns two blockchains, these two chains are generated by different
-// generators, they have some same parent blocks, the number of same blocks are determined by
-// testBlocks, once chain1 inserted a non-default block, chain1 and chain2 get forked.
-func newTwoForkedBlockchains(len1, len2 int) (chain1 *BlockChain, chain2 *BlockChain) {
- signer := types.HomesteadSigner{}
- // Create a database pre-initialize with a genesis block
- db1 := rawdb.NewMemoryDatabase()
- db1.SetDiffStore(memorydb.New())
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- engine1 := ethash.NewFaker()
- chain1, _ = NewBlockChain(db1, nil, gspec, nil, engine1, vm.Config{}, nil, nil, EnablePersistDiff(860000), EnableBlockValidator(params.TestChainConfig, engine1, 0, nil))
- generator1 := func(i int, block *BlockGen) {
- // The chain maker doesn't have access to a chain, so the difficulty will be
- // lets unset (nil). Set it here to the correct value.
- block.SetCoinbase(testAddr)
-
- for idx, testBlock := range testBlocks {
- // Specific block setting, the index in this generator has 1 diff from specified blockNr.
- if i+1 == testBlock.blockNr {
- for _, testTransaction := range testBlock.txs {
- var transaction *types.Transaction
- if testTransaction.to == nil {
- transaction = types.NewContractCreation(block.TxNonce(testAddr),
- testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data)
- } else {
- transaction = types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to,
- testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data)
- }
- tx, err := types.SignTx(transaction, signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTxWithChain(chain1, tx)
- }
- break
- }
-
- // Default block setting.
- if idx == len(testBlocks)-1 {
- // We want to simulate an empty middle block, having the same state as the
- // first one. The last is needs a state change again to force a reorg.
- for _, testTransaction := range testBlocks[0].txs {
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to,
- testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data), signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTxWithChain(chain1, tx)
- }
- }
- }
- }
- bs1, _ := GenerateChain(params.TestChainConfig, chain1.Genesis(), ethash.NewFaker(), db1, len1, generator1)
- if _, err := chain1.InsertChain(bs1); err != nil {
- panic(err)
- }
- waitDifflayerCached(chain1, bs1)
-
- // Create a database pre-initialize with a genesis block
- db2 := rawdb.NewMemoryDatabase()
- db2.SetDiffStore(memorydb.New())
- gspec2 := &Genesis{
- Config: params.TestChainConfig,
- Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- engine2 := ethash.NewFaker()
- chain2, _ = NewBlockChain(db2, nil, gspec2, nil, ethash.NewFaker(), vm.Config{}, nil, nil, EnablePersistDiff(860000), EnableBlockValidator(params.TestChainConfig, engine2, 0, nil))
- generator2 := func(i int, block *BlockGen) {
- // The chain maker doesn't have access to a chain, so the difficulty will be
- // lets unset (nil). Set it here to the correct value.
- block.SetCoinbase(testAddr)
- // We want to simulate an empty middle block, having the same state as the
- // first one. The last is needs a state change again to force a reorg.
- for _, testTransaction := range testBlocks[0].txs {
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to,
- testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data), signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTxWithChain(chain1, tx)
- }
- }
- bs2, _ := GenerateChain(params.TestChainConfig, chain2.Genesis(), ethash.NewFaker(), db2, len2, generator2)
- if _, err := chain2.InsertChain(bs2); err != nil {
- panic(err)
- }
- waitDifflayerCached(chain2, bs2)
-
- return chain1, chain2
-}
-
-func waitDifflayerCached(chain *BlockChain, bs types.Blocks) {
- for _, block := range bs {
- // wait for all difflayers to be cached
- for block.Header().TxHash != types.EmptyRootHash &&
- chain.GetTrustedDiffLayer(block.Hash()) == nil {
- time.Sleep(time.Second)
- }
- }
-}
-
-func testGetRootByDiffHash(t *testing.T, chain1, chain2 *BlockChain, blockNumber uint64, status types.VerifyStatus) {
- block2 := chain2.GetBlockByNumber(blockNumber)
- if block2 == nil {
- t.Fatalf("failed to find block, number: %v", blockNumber)
- }
- expect := VerifyResult{
- Status: status,
- BlockNumber: blockNumber,
- BlockHash: block2.Hash(),
- }
- if status.Code&0xff00 == types.StatusVerified.Code {
- expect.Root = block2.Root()
- }
-
- diffLayer2 := chain2.GetTrustedDiffLayer(block2.Hash())
- if diffLayer2 == nil {
- t.Fatal("failed to find diff layer")
- }
- diffHash2 := types.EmptyRootHash
- if status != types.StatusDiffHashMismatch {
- var err error
- diffHash2, err = CalculateDiffHash(diffLayer2)
- if err != nil {
- t.Fatalf("failed to compute diff hash: %v", err)
- }
- }
-
- if status == types.StatusPartiallyVerified {
- block1 := chain1.GetBlockByNumber(blockNumber)
- if block1 == nil {
- t.Fatalf("failed to find block, number: %v", blockNumber)
- }
- chain1.diffLayerCache.Remove(block1.Hash())
- }
-
- result := chain1.GetVerifyResult(blockNumber, block2.Hash(), diffHash2)
- if result.Status != expect.Status {
- t.Fatalf("failed to verify block, number: %v, expect status: %v, real status: %v", blockNumber, expect.Status, result.Status)
- }
- if result.Root != expect.Root {
- t.Fatalf("failed to verify block, number: %v, expect root: %v, real root: %v", blockNumber, expect.Root, result.Root)
- }
-}
-
-func TestGetRootByDiffHash(t *testing.T) {
- len1 := 23 // length of blockchain1
- len2 := 35 // length of blockchain2
- plen := 11 // length of same parent blocks, which determined by testBlocks.
-
- chain1, chain2 := newTwoForkedBlockchains(len1, len2)
- defer chain1.Stop()
- defer chain2.Stop()
-
- hash1 := chain1.GetBlockByNumber(uint64(plen)).Hash()
- hash2 := chain2.GetBlockByNumber(uint64(plen)).Hash()
- if hash1 != hash2 {
- t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", plen, hash2, hash1)
- }
-
- testGetRootByDiffHash(t, chain1, chain2, 10, types.StatusFullVerified)
- testGetRootByDiffHash(t, chain1, chain2, 2, types.StatusPartiallyVerified)
- testGetRootByDiffHash(t, chain1, chain2, 10, types.StatusDiffHashMismatch)
- testGetRootByDiffHash(t, chain1, chain2, 12, types.StatusImpossibleFork)
- testGetRootByDiffHash(t, chain1, chain2, 20, types.StatusPossibleFork)
- testGetRootByDiffHash(t, chain1, chain2, 24, types.StatusBlockNewer)
- testGetRootByDiffHash(t, chain1, chain2, 35, types.StatusBlockTooNew)
-}
diff --git a/core/blockchain_notries_test.go b/core/blockchain_notries_test.go
deleted file mode 100644
index 57b150701a..0000000000
--- a/core/blockchain_notries_test.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Tests that abnormal program termination (i.e.crash) and restart doesn't leave
-// the database in some strange state with gaps in the chain, nor with block data
-// dangling in the future.
-
-package core
-
-import (
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
- "github.com/ethereum/go-ethereum/params"
-)
-
-func newMockVerifyPeer() *mockVerifyPeer {
- return &mockVerifyPeer{}
-}
-
-type requestRoot struct {
- blockNumber uint64
- blockHash common.Hash
- diffHash common.Hash
-}
-
-type verifFailedStatus struct {
- status types.VerifyStatus
- blockNumber uint64
-}
-
-// mockVerifyPeer is a mocking struct that simulates p2p signals for verification tasks.
-type mockVerifyPeer struct {
- callback func(*requestRoot)
-}
-
-func (peer *mockVerifyPeer) setCallBack(callback func(*requestRoot)) {
- peer.callback = callback
-}
-
-func (peer *mockVerifyPeer) RequestRoot(blockNumber uint64, blockHash common.Hash, diffHash common.Hash) error {
- if peer.callback != nil {
- peer.callback(&requestRoot{blockNumber, blockHash, diffHash})
- }
- return nil
-}
-
-func (peer *mockVerifyPeer) ID() string {
- return "mock_peer"
-}
-
-type mockVerifyPeers struct {
- peers []VerifyPeer
-}
-
-func (peers *mockVerifyPeers) GetVerifyPeers() []VerifyPeer {
- return peers.peers
-}
-
-func newMockRemoteVerifyPeer(peers []VerifyPeer) *mockVerifyPeers {
- return &mockVerifyPeers{peers}
-}
-
-func makeTestBackendWithRemoteValidator(blocks int, mode VerifyMode, failed *verifFailedStatus) (*testBackend, *testBackend, []*types.Block, error) {
- signer := types.HomesteadSigner{}
-
- // Create a database pre-initialize with a genesis block
- db := rawdb.NewMemoryDatabase()
- db.SetDiffStore(memorydb.New())
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}},
- }
- engine := ethash.NewFaker()
-
- db2 := rawdb.NewMemoryDatabase()
- db2.SetDiffStore(memorydb.New())
- gspec2 := &Genesis{
- Config: params.TestChainConfig,
- Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}},
- }
- engine2 := ethash.NewFaker()
-
- peer := newMockVerifyPeer()
- peers := []VerifyPeer{peer}
-
- verifier, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{},
- nil, nil, EnablePersistDiff(100000), EnableBlockValidator(params.TestChainConfig, engine2, LocalVerify, nil))
- if err != nil {
- return nil, nil, nil, err
- }
-
- fastnode, err := NewBlockChain(db2, nil, gspec2, nil, engine2, vm.Config{},
- nil, nil, EnableBlockValidator(params.TestChainConfig, engine2, mode, newMockRemoteVerifyPeer(peers)))
- if err != nil {
- return nil, nil, nil, err
- }
-
- generator := func(i int, block *BlockGen) {
- // The chain maker doesn't have access to a chain, so the difficulty will be
- // lets unset (nil). Set it here to the correct value.
- block.SetCoinbase(testAddr)
-
- for idx, testBlock := range testBlocks {
- // Specific block setting, the index in this generator has 1 diff from specified blockNr.
- if i+1 == testBlock.blockNr {
- for _, testTransaction := range testBlock.txs {
- var transaction *types.Transaction
- if testTransaction.to == nil {
- transaction = types.NewContractCreation(block.TxNonce(testAddr),
- testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data)
- } else {
- transaction = types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to,
- testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data)
- }
- tx, err := types.SignTx(transaction, signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTxWithChain(verifier, tx)
- }
- break
- }
-
- // Default block setting.
- if idx == len(testBlocks)-1 {
- // We want to simulate an empty middle block, having the same state as the
- // first one. The last is needs a state change again to force a reorg.
- for _, testTransaction := range testBlocks[0].txs {
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to,
- testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data), signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTxWithChain(verifier, tx)
- }
- }
- }
- }
- peer.setCallBack(func(req *requestRoot) {
- if fastnode.validator != nil && fastnode.validator.RemoteVerifyManager() != nil {
- resp := verifier.GetVerifyResult(req.blockNumber, req.blockHash, req.diffHash)
- if failed != nil && req.blockNumber == failed.blockNumber {
- resp.Status = failed.status
- }
- fastnode.validator.RemoteVerifyManager().
- HandleRootResponse(
- resp, peer.ID())
- }
- })
-
- bs, _ := GenerateChain(params.TestChainConfig, verifier.Genesis(), ethash.NewFaker(), db, blocks, generator)
- if _, err := verifier.InsertChain(bs); err != nil {
- return nil, nil, nil, err
- }
- waitDifflayerCached(verifier, bs)
-
- return &testBackend{
- db: db,
- chain: verifier,
- },
- &testBackend{
- db: db2,
- chain: fastnode,
- }, bs, nil
-}
-
-func TestFastNode(t *testing.T) {
- // test full mode and succeed
- _, fastnode, blocks, err := makeTestBackendWithRemoteValidator(2048, FullVerify, nil)
- if err != nil {
- t.Fatalf(err.Error())
- }
- _, err = fastnode.chain.InsertChain(blocks)
- if err != nil {
- t.Fatalf(err.Error())
- }
- // test full mode and failed
- failed := &verifFailedStatus{status: types.StatusDiffHashMismatch, blockNumber: 204}
- _, fastnode, blocks, err = makeTestBackendWithRemoteValidator(2048, FullVerify, failed)
- if err != nil {
- t.Fatalf(err.Error())
- }
- _, err = fastnode.chain.InsertChain(blocks)
- if err == nil || fastnode.chain.CurrentBlock().Number.Uint64() != failed.blockNumber+10 {
- t.Fatalf("blocks insert should be failed at height %d", failed.blockNumber+11)
- }
- // test insecure mode and succeed
- _, fastnode, blocks, err = makeTestBackendWithRemoteValidator(2048, InsecureVerify, nil)
- if err != nil {
- t.Fatalf(err.Error())
- }
- _, err = fastnode.chain.InsertChain(blocks)
- if err != nil {
- t.Fatalf(err.Error())
- }
- // test insecure mode and failed
- failed = &verifFailedStatus{status: types.StatusImpossibleFork, blockNumber: 204}
- _, fastnode, blocks, err = makeTestBackendWithRemoteValidator(2048, FullVerify, failed)
- if err != nil {
- t.Fatalf(err.Error())
- }
- _, err = fastnode.chain.InsertChain(blocks)
- if err == nil || fastnode.chain.CurrentBlock().Number.Uint64() != failed.blockNumber+10 {
- t.Fatalf("blocks insert should be failed at height %d", failed.blockNumber+11)
- }
-}
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
deleted file mode 100644
index 3cfcdafe4a..0000000000
--- a/core/blockchain_repair_test.go
+++ /dev/null
@@ -1,2018 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Tests that abnormal program termination (i.e.crash) and restart doesn't leave
-// the database in some strange state with gaps in the chain, nor with block data
-// dangling in the future.
-
-package core
-
-import (
- "math/big"
- "path"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/params"
-)
-
-// Tests a recovery for a short canonical chain where a recent block was already
-// committed to disk and then the process crashed. In this case we expect the full
-// chain to be rolled back to the committed block, but the chain data itself left
-// in the database for replaying.
-func TestShortRepair(t *testing.T) { testShortRepair(t, false) }
-func TestShortRepairWithSnapshots(t *testing.T) { testShortRepair(t, true) }
-
-func testShortRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 8,
- expSidechainBlocks: 0,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain where the fast sync pivot point was
-// already committed, after which the process crashed. In this case we expect the full
-// chain to be rolled back to the committed block, but the chain data itself left in
-// the database for replaying.
-func TestShortSnapSyncedRepair(t *testing.T) { testShortSnapSyncedRepair(t, false) }
-func TestShortSnapSyncedRepairWithSnapshots(t *testing.T) { testShortSnapSyncedRepair(t, true) }
-
-func testShortSnapSyncedRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 8,
- expSidechainBlocks: 0,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain where the fast sync pivot point was
-// not yet committed, but the process crashed. In this case we expect the chain to
-// detect that it was fast syncing and not delete anything, since we can just pick
-// up directly where we left off.
-func TestShortSnapSyncingRepair(t *testing.T) { testShortSnapSyncingRepair(t, false) }
-func TestShortSnapSyncingRepairWithSnapshots(t *testing.T) { testShortSnapSyncingRepair(t, true) }
-
-func testShortSnapSyncingRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Frozen: none
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 8,
- expSidechainBlocks: 0,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain and a shorter side chain, where a
-// recent block was already committed to disk and then the process crashed. In this
-// test scenario the side chain is below the committed block. In this case we expect
-// the canonical chain to be rolled back to the committed block, but the chain data
-// itself left in the database for replaying.
-func TestShortOldForkedRepair(t *testing.T) { testShortOldForkedRepair(t, false) }
-func TestShortOldForkedRepairWithSnapshots(t *testing.T) { testShortOldForkedRepair(t, true) }
-
-func testShortOldForkedRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- // └->S1->S2->S3
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 8,
- expSidechainBlocks: 3,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain and a shorter side chain, where
-// the fast sync pivot point was already committed to disk and then the process
-// crashed. In this test scenario the side chain is below the committed block. In
-// this case we expect the canonical chain to be rolled back to the committed block,
-// but the chain data itself left in the database for replaying.
-func TestShortOldForkedSnapSyncedRepair(t *testing.T) {
- testShortOldForkedSnapSyncedRepair(t, false)
-}
-func TestShortOldForkedSnapSyncedRepairWithSnapshots(t *testing.T) {
- testShortOldForkedSnapSyncedRepair(t, true)
-}
-
-func testShortOldForkedSnapSyncedRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- // └->S1->S2->S3
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 8,
- expSidechainBlocks: 3,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain and a shorter side chain, where
-// the fast sync pivot point was not yet committed, but the process crashed. In this
-// test scenario the side chain is below the committed block. In this case we expect
-// the chain to detect that it was fast syncing and not delete anything, since we
-// can just pick up directly where we left off.
-func TestShortOldForkedSnapSyncingRepair(t *testing.T) {
- testShortOldForkedSnapSyncingRepair(t, false)
-}
-func TestShortOldForkedSnapSyncingRepairWithSnapshots(t *testing.T) {
- testShortOldForkedSnapSyncingRepair(t, true)
-}
-
-func testShortOldForkedSnapSyncingRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen: none
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- // └->S1->S2->S3
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 8,
- expSidechainBlocks: 3,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain and a shorter side chain, where a
-// recent block was already committed to disk and then the process crashed. In this
-// test scenario the side chain reaches above the committed block. In this case we
-// expect the canonical chain to be rolled back to the committed block, but the
-// chain data itself left in the database for replaying.
-func TestShortNewlyForkedRepair(t *testing.T) { testShortNewlyForkedRepair(t, false) }
-func TestShortNewlyForkedRepairWithSnapshots(t *testing.T) { testShortNewlyForkedRepair(t, true) }
-
-func testShortNewlyForkedRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3->S4->S5->S6
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- // └->S1->S2->S3->S4->S5->S6
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 6,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 8,
- expSidechainBlocks: 6,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain and a shorter side chain, where
-// the fast sync pivot point was already committed to disk and then the process
-// crashed. In this test scenario the side chain reaches above the committed block.
-// In this case we expect the canonical chain to be rolled back to the committed
-// block, but the chain data itself left in the database for replaying.
-func TestShortNewlyForkedSnapSyncedRepair(t *testing.T) {
- testShortNewlyForkedSnapSyncedRepair(t, false)
-}
-func TestShortNewlyForkedSnapSyncedRepairWithSnapshots(t *testing.T) {
- testShortNewlyForkedSnapSyncedRepair(t, true)
-}
-
-func testShortNewlyForkedSnapSyncedRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3->S4->S5->S6
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- // └->S1->S2->S3->S4->S5->S6
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 6,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 8,
- expSidechainBlocks: 6,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain and a shorter side chain, where
-// the fast sync pivot point was not yet committed, but the process crashed. In
-// this test scenario the side chain reaches above the committed block. In this
-// case we expect the chain to detect that it was fast syncing and not delete
-// anything, since we can just pick up directly where we left off.
-func TestShortNewlyForkedSnapSyncingRepair(t *testing.T) {
- testShortNewlyForkedSnapSyncingRepair(t, false)
-}
-func TestShortNewlyForkedSnapSyncingRepairWithSnapshots(t *testing.T) {
- testShortNewlyForkedSnapSyncingRepair(t, true)
-}
-
-func testShortNewlyForkedSnapSyncingRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3->S4->S5->S6
- //
- // Frozen: none
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- // └->S1->S2->S3->S4->S5->S6
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 6,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 8,
- expSidechainBlocks: 6,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain and a longer side chain, where a
-// recent block was already committed to disk and then the process crashed. In this
-// case we expect the canonical chain to be rolled back to the committed block, but
-// the chain data itself left in the database for replaying.
-func TestShortReorgedRepair(t *testing.T) { testShortReorgedRepair(t, false) }
-func TestShortReorgedRepairWithSnapshots(t *testing.T) { testShortReorgedRepair(t, true) }
-
-func testShortReorgedRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 10,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 8,
- expSidechainBlocks: 10,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain and a longer side chain, where
-// the fast sync pivot point was already committed to disk and then the process
-// crashed. In this case we expect the canonical chain to be rolled back to the
-// committed block, but the chain data itself left in the database for replaying.
-func TestShortReorgedSnapSyncedRepair(t *testing.T) {
- testShortReorgedSnapSyncedRepair(t, false)
-}
-func TestShortReorgedSnapSyncedRepairWithSnapshots(t *testing.T) {
- testShortReorgedSnapSyncedRepair(t, true)
-}
-
-func testShortReorgedSnapSyncedRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 10,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 8,
- expSidechainBlocks: 10,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a short canonical chain and a longer side chain, where
-// the fast sync pivot point was not yet committed, but the process crashed. In
-// this case we expect the chain to detect that it was fast syncing and not delete
-// anything, since we can just pick up directly where we left off.
-func TestShortReorgedSnapSyncingRepair(t *testing.T) {
- testShortReorgedSnapSyncingRepair(t, false)
-}
-func TestShortReorgedSnapSyncingRepairWithSnapshots(t *testing.T) {
- testShortReorgedSnapSyncingRepair(t, true)
-}
-
-func testShortReorgedSnapSyncingRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
- //
- // Frozen: none
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 10,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 8,
- expSidechainBlocks: 10,
- expFrozen: 0,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks where a recent
-// block - newer than the ancient limit - was already committed to disk and then
-// the process crashed. In this case we expect the chain to be rolled back to the
-// committed block, with everything afterwards kept as fast sync data.
-func TestLongShallowRepair(t *testing.T) { testLongShallowRepair(t, false) }
-func TestLongShallowRepairWithSnapshots(t *testing.T) { testLongShallowRepair(t, true) }
-
-func testLongShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks where a recent
-// block - older than the ancient limit - was already committed to disk and then
-// the process crashed. In this case we expect the chain to be rolled back to the
-// committed block, with everything afterwards deleted.
-func TestLongDeepRepair(t *testing.T) { testLongDeepRepair(t, false) }
-func TestLongDeepRepairWithSnapshots(t *testing.T) { testLongDeepRepair(t, true) }
-
-func testLongDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks where the fast
-// sync pivot point - newer than the ancient limit - was already committed, after
-// which the process crashed. In this case we expect the chain to be rolled back
-// to the committed block, with everything afterwards kept as fast sync data.
-func TestLongSnapSyncedShallowRepair(t *testing.T) {
- testLongSnapSyncedShallowRepair(t, false)
-}
-func TestLongSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
- testLongSnapSyncedShallowRepair(t, true)
-}
-
-func testLongSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks where the fast
-// sync pivot point - older than the ancient limit - was already committed, after
-// which the process crashed. In this case we expect the chain to be rolled back
-// to the committed block, with everything afterwards deleted.
-func TestLongSnapSyncedDeepRepair(t *testing.T) { testLongSnapSyncedDeepRepair(t, false) }
-func TestLongSnapSyncedDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncedDeepRepair(t, true) }
-
-func testLongSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks where the fast
-// sync pivot point - older than the ancient limit - was not yet committed, but the
-// process crashed. In this case we expect the chain to detect that it was fast
-// syncing and not delete anything, since we can just pick up directly where we
-// left off.
-func TestLongSnapSyncingShallowRepair(t *testing.T) {
- testLongSnapSyncingShallowRepair(t, false)
-}
-func TestLongSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
- testLongSnapSyncingShallowRepair(t, true)
-}
-
-func testLongSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks where the fast
-// sync pivot point - newer than the ancient limit - was not yet committed, but the
-// process crashed. In this case we expect the chain to detect that it was fast
-// syncing and not delete anything, since we can just pick up directly where we
-// left off.
-func TestLongSnapSyncingDeepRepair(t *testing.T) { testLongSnapSyncingDeepRepair(t, false) }
-func TestLongSnapSyncingDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncingDeepRepair(t, true) }
-
-func testLongSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected in leveldb:
- // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
- //
- // Expected head header : C24
- // Expected head fast block: C24
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 24,
- expSidechainBlocks: 0,
- expFrozen: 9,
- expHeadHeader: 24,
- expHeadFastBlock: 24,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where a recent block - newer than the ancient limit - was already
-// committed to disk and then the process crashed. In this test scenario the side
-// chain is below the committed block. In this case we expect the chain to be
-// rolled back to the committed block, with everything afterwards kept as fast
-// sync data; the side chain completely nuked by the freezer.
-func TestLongOldForkedShallowRepair(t *testing.T) {
- testLongOldForkedShallowRepair(t, false)
-}
-func TestLongOldForkedShallowRepairWithSnapshots(t *testing.T) {
- testLongOldForkedShallowRepair(t, true)
-}
-
-func testLongOldForkedShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where a recent block - older than the ancient limit - was already
-// committed to disk and then the process crashed. In this test scenario the side
-// chain is below the committed block. In this case we expect the canonical chain
-// to be rolled back to the committed block, with everything afterwards deleted;
-// the side chain completely nuked by the freezer.
-func TestLongOldForkedDeepRepair(t *testing.T) { testLongOldForkedDeepRepair(t, false) }
-func TestLongOldForkedDeepRepairWithSnapshots(t *testing.T) { testLongOldForkedDeepRepair(t, true) }
-
-func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - newer than the ancient limit -
-// was already committed to disk and then the process crashed. In this test scenario
-// the side chain is below the committed block. In this case we expect the chain
-// to be rolled back to the committed block, with everything afterwards kept as
-// fast sync data; the side chain completely nuked by the freezer.
-func TestLongOldForkedSnapSyncedShallowRepair(t *testing.T) {
- testLongOldForkedSnapSyncedShallowRepair(t, false)
-}
-func TestLongOldForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
- testLongOldForkedSnapSyncedShallowRepair(t, true)
-}
-
-func testLongOldForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was already committed to disk and then the process crashed. In this test scenario
-// the side chain is below the committed block. In this case we expect the canonical
-// chain to be rolled back to the committed block, with everything afterwards deleted;
-// the side chain completely nuked by the freezer.
-func TestLongOldForkedSnapSyncedDeepRepair(t *testing.T) {
- testLongOldForkedSnapSyncedDeepRepair(t, false)
-}
-func TestLongOldForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
- testLongOldForkedSnapSyncedDeepRepair(t, true)
-}
-
-func testLongOldForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was not yet committed, but the process crashed. In this test scenario the side
-// chain is below the committed block. In this case we expect the chain to detect
-// that it was fast syncing and not delete anything. The side chain is completely
-// nuked by the freezer.
-func TestLongOldForkedSnapSyncingShallowRepair(t *testing.T) {
- testLongOldForkedSnapSyncingShallowRepair(t, false)
-}
-func TestLongOldForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
- testLongOldForkedSnapSyncingShallowRepair(t, true)
-}
-
-func testLongOldForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was not yet committed, but the process crashed. In this test scenario the side
-// chain is below the committed block. In this case we expect the chain to detect
-// that it was fast syncing and not delete anything. The side chain is completely
-// nuked by the freezer.
-func TestLongOldForkedSnapSyncingDeepRepair(t *testing.T) {
- testLongOldForkedSnapSyncingDeepRepair(t, false)
-}
-func TestLongOldForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
- testLongOldForkedSnapSyncingDeepRepair(t, true)
-}
-
-func testLongOldForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected in leveldb:
- // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
- //
- // Expected head header : C24
- // Expected head fast block: C24
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 24,
- expSidechainBlocks: 0,
- expFrozen: 9,
- expHeadHeader: 24,
- expHeadFastBlock: 24,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where a recent block - newer than the ancient limit - was already
-// committed to disk and then the process crashed. In this test scenario the side
-// chain is above the committed block. In this case we expect the chain to be
-// rolled back to the committed block, with everything afterwards kept as fast
-// sync data; the side chain completely nuked by the freezer.
-func TestLongNewerForkedShallowRepair(t *testing.T) {
- testLongNewerForkedShallowRepair(t, false)
-}
-func TestLongNewerForkedShallowRepairWithSnapshots(t *testing.T) {
- testLongNewerForkedShallowRepair(t, true)
-}
-
-func testLongNewerForkedShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where a recent block - older than the ancient limit - was already
-// committed to disk and then the process crashed. In this test scenario the side
-// chain is above the committed block. In this case we expect the canonical chain
-// to be rolled back to the committed block, with everything afterwards deleted;
-// the side chain completely nuked by the freezer.
-func TestLongNewerForkedDeepRepair(t *testing.T) { testLongNewerForkedDeepRepair(t, false) }
-func TestLongNewerForkedDeepRepairWithSnapshots(t *testing.T) { testLongNewerForkedDeepRepair(t, true) }
-
-func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - newer than the ancient limit -
-// was already committed to disk and then the process crashed. In this test scenario
-// the side chain is above the committed block. In this case we expect the chain
-// to be rolled back to the committed block, with everything afterwards kept as fast
-// sync data; the side chain completely nuked by the freezer.
-func TestLongNewerForkedSnapSyncedShallowRepair(t *testing.T) {
- testLongNewerForkedSnapSyncedShallowRepair(t, false)
-}
-func TestLongNewerForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
- testLongNewerForkedSnapSyncedShallowRepair(t, true)
-}
-
-func testLongNewerForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was already committed to disk and then the process crashed. In this test scenario
-// the side chain is above the committed block. In this case we expect the canonical
-// chain to be rolled back to the committed block, with everything afterwards deleted;
-// the side chain completely nuked by the freezer.
-func TestLongNewerForkedSnapSyncedDeepRepair(t *testing.T) {
- testLongNewerForkedSnapSyncedDeepRepair(t, false)
-}
-func TestLongNewerForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
- testLongNewerForkedSnapSyncedDeepRepair(t, true)
-}
-
-func testLongNewerForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was not yet committed, but the process crashed. In this test scenario the side
-// chain is above the committed block. In this case we expect the chain to detect
-// that it was fast syncing and not delete anything. The side chain is completely
-// nuked by the freezer.
-func TestLongNewerForkedSnapSyncingShallowRepair(t *testing.T) {
- testLongNewerForkedSnapSyncingShallowRepair(t, false)
-}
-func TestLongNewerForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
- testLongNewerForkedSnapSyncingShallowRepair(t, true)
-}
-
-func testLongNewerForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was not yet committed, but the process crashed. In this test scenario the side
-// chain is above the committed block. In this case we expect the chain to detect
-// that it was fast syncing and not delete anything. The side chain is completely
-// nuked by the freezer.
-func TestLongNewerForkedSnapSyncingDeepRepair(t *testing.T) {
- testLongNewerForkedSnapSyncingDeepRepair(t, false)
-}
-func TestLongNewerForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
- testLongNewerForkedSnapSyncingDeepRepair(t, true)
-}
-
-func testLongNewerForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected in leveldb:
- // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
- //
- // Expected head header : C24
- // Expected head fast block: C24
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 24,
- expSidechainBlocks: 0,
- expFrozen: 9,
- expHeadHeader: 24,
- expHeadFastBlock: 24,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a longer side
-// chain, where a recent block - newer than the ancient limit - was already committed
-// to disk and then the process crashed. In this case we expect the chain to be
-// rolled back to the committed block, with everything afterwards kept as fast sync
-// data. The side chain completely nuked by the freezer.
-func TestLongReorgedShallowRepair(t *testing.T) { testLongReorgedShallowRepair(t, false) }
-func TestLongReorgedShallowRepairWithSnapshots(t *testing.T) { testLongReorgedShallowRepair(t, true) }
-
-func testLongReorgedShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a longer side
-// chain, where a recent block - older than the ancient limit - was already committed
-// to disk and then the process crashed. In this case we expect the canonical chains
-// to be rolled back to the committed block, with everything afterwards deleted. The
-// side chain completely nuked by the freezer.
-func TestLongReorgedDeepRepair(t *testing.T) { testLongReorgedDeepRepair(t, false) }
-func TestLongReorgedDeepRepairWithSnapshots(t *testing.T) { testLongReorgedDeepRepair(t, true) }
-
-func testLongReorgedDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : none
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a longer
-// side chain, where the fast sync pivot point - newer than the ancient limit -
-// was already committed to disk and then the process crashed. In this case we
-// expect the chain to be rolled back to the committed block, with everything
-// afterwards kept as fast sync data. The side chain completely nuked by the
-// freezer.
-func TestLongReorgedSnapSyncedShallowRepair(t *testing.T) {
- testLongReorgedSnapSyncedShallowRepair(t, false)
-}
-func TestLongReorgedSnapSyncedShallowRepairWithSnapshots(t *testing.T) {
- testLongReorgedSnapSyncedShallowRepair(t, true)
-}
-
-func testLongReorgedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a longer
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was already committed to disk and then the process crashed. In this case we
-// expect the canonical chains to be rolled back to the committed block, with
-// everything afterwards deleted. The side chain completely nuked by the freezer.
-func TestLongReorgedSnapSyncedDeepRepair(t *testing.T) {
- testLongReorgedSnapSyncedDeepRepair(t, false)
-}
-func TestLongReorgedSnapSyncedDeepRepairWithSnapshots(t *testing.T) {
- testLongReorgedSnapSyncedDeepRepair(t, true)
-}
-
-func testLongReorgedSnapSyncedDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a longer
-// side chain, where the fast sync pivot point - newer than the ancient limit -
-// was not yet committed, but the process crashed. In this case we expect the
-// chain to detect that it was fast syncing and not delete anything, since we
-// can just pick up directly where we left off.
-func TestLongReorgedSnapSyncingShallowRepair(t *testing.T) {
- testLongReorgedSnapSyncingShallowRepair(t, false)
-}
-func TestLongReorgedSnapSyncingShallowRepairWithSnapshots(t *testing.T) {
- testLongReorgedSnapSyncingShallowRepair(t, true)
-}
-
-func testLongReorgedSnapSyncingShallowRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
- //
- // Expected head header : C18
- // Expected head fast block: C18
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 18,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 18,
- expHeadFastBlock: 18,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a recovery for a long canonical chain with frozen blocks and a longer
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was not yet committed, but the process crashed. In this case we expect the
-// chain to detect that it was fast syncing and not delete anything, since we
-// can just pick up directly where we left off.
-func TestLongReorgedSnapSyncingDeepRepair(t *testing.T) {
- testLongReorgedSnapSyncingDeepRepair(t, false)
-}
-func TestLongReorgedSnapSyncingDeepRepairWithSnapshots(t *testing.T) {
- testLongReorgedSnapSyncingDeepRepair(t, true)
-}
-
-func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G
- // Pivot : C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected in leveldb:
- // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
- //
- // Expected head header : C24
- // Expected head fast block: C24
- // Expected head block : G
- testRepair(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- expCanonicalBlocks: 24,
- expSidechainBlocks: 0,
- expFrozen: 9,
- expHeadHeader: 24,
- expHeadFastBlock: 24,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
- for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
- testRepairWithScheme(t, tt, snapshots, scheme)
- }
-}
-
-func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) {
- // It's hard to follow the test case, visualize the input
- //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
- // fmt.Println(tt.dump(true))
-
- // Create a temporary persistent database
- datadir := t.TempDir()
- ancient := path.Join(datadir, "ancient")
-
- db, err := rawdb.Open(rawdb.OpenOptions{
- Directory: datadir,
- AncientsDirectory: ancient,
- Ephemeral: true,
- })
- if err != nil {
- t.Fatalf("Failed to create persistent database: %v", err)
- }
- defer db.Close() // Might double close, should be fine
-
- // Initialize a fresh chain
- var (
- gspec = &Genesis{
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: params.AllEthashProtocolChanges,
- }
- engine = ethash.NewFullFaker()
- config = &CacheConfig{
- TrieCleanLimit: 256,
- TrieDirtyLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
- SnapshotLimit: 0, // Disable snapshot by default
- StateScheme: scheme,
- }
- )
- defer engine.Close()
- if snapshots {
- config.SnapshotLimit = 256
- config.SnapshotWait = true
- }
- config.TriesInMemory = 128
- chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to create chain: %v", err)
- }
- // If sidechain blocks are needed, make a light chain and import it
- var sideblocks types.Blocks
- if tt.sidechainBlocks > 0 {
- sideblocks, _ = GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{0x01})
- })
- if _, err := chain.InsertChain(sideblocks); err != nil {
- t.Fatalf("Failed to import side chain: %v", err)
- }
- }
- canonblocks, _ := GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{0x02})
- b.SetDifficulty(big.NewInt(1000000))
- })
- if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
- t.Fatalf("Failed to import canonical chain start: %v", err)
- }
- if tt.commitBlock > 0 {
- if err := chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false); err != nil {
- t.Fatalf("Failed to flush trie state: %v", err)
- }
- if snapshots {
- if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
- t.Fatalf("Failed to flatten snapshots: %v", err)
- }
- }
- }
-
- if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
- t.Fatalf("Failed to import canonical chain tail: %v", err)
- }
- // Force run a freeze cycle
- type freezer interface {
- Freeze(threshold uint64) error
- Ancients() (uint64, error)
- }
- db.(freezer).Freeze(tt.freezeThreshold)
-
- // Set the simulated pivot block
- if tt.pivotBlock != nil {
- rawdb.WriteLastPivotNumber(db, *tt.pivotBlock)
- }
- // Pull the plug on the database, simulating a hard crash
- chain.triedb.Close()
- db.Close()
- chain.stopWithoutSaving()
-
- // Start a new blockchain back up and see where the repair leads us
- db, err = rawdb.Open(rawdb.OpenOptions{
- Directory: datadir,
- AncientsDirectory: ancient,
- Ephemeral: true,
- })
- if err != nil {
- t.Fatalf("Failed to reopen persistent database: %v", err)
- }
- defer db.Close()
-
- newChain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- defer newChain.Stop()
-
- // Iterate over all the remaining blocks and ensure there are no gaps
- verifyNoGaps(t, newChain, true, canonblocks)
- verifyNoGaps(t, newChain, false, sideblocks)
- verifyCutoff(t, newChain, true, canonblocks, tt.expCanonicalBlocks)
- verifyCutoff(t, newChain, false, sideblocks, tt.expSidechainBlocks)
-
- if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
- t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
- }
- if head := newChain.CurrentSnapBlock(); head.Number.Uint64() != tt.expHeadFastBlock {
- t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, tt.expHeadFastBlock)
- }
- if head := newChain.CurrentBlock(); head.Number.Uint64() != tt.expHeadBlock {
- t.Errorf("Head block mismatch: have %d, want %d", head.Number, tt.expHeadBlock)
- }
- if frozen, err := db.(freezer).Ancients(); err != nil {
- t.Errorf("Failed to retrieve ancient count: %v\n", err)
- } else if int(frozen) != tt.expFrozen {
- t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
- }
-}
-
-// TestIssue23496 tests scenario described in https://github.com/ethereum/go-ethereum/pull/23496#issuecomment-926393893
-// Credits to @zzyalbert for finding the issue.
-//
-// Local chain owns these blocks:
-// G B1 B2 B3 B4
-// B1: state committed
-// B2: snapshot disk layer
-// B3: state committed
-// B4: head block
-//
-// Crash happens without fully persisting snapshot and in-memory states,
-// chain rewinds itself to the B1 (skip B3 in order to recover snapshot)
-// In this case the snapshot layer of B3 is not created because of existent
-// state.
-func TestIssue23496(t *testing.T) {
- testIssue23496(t, rawdb.HashScheme)
- testIssue23496(t, rawdb.PathScheme)
-}
-
-func testIssue23496(t *testing.T, scheme string) {
- // It's hard to follow the test case, visualize the input
- //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
-
- // Create a temporary persistent database
- datadir := t.TempDir()
- ancient := path.Join(datadir, "ancient")
-
- db, err := rawdb.Open(rawdb.OpenOptions{
- Directory: datadir,
- AncientsDirectory: ancient,
- })
- if err != nil {
- t.Fatalf("Failed to create persistent database: %v", err)
- }
- defer db.Close() // Might double close, should be fine
-
- // Initialize a fresh chain
- var (
- gspec = &Genesis{
- Config: params.TestChainConfig,
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- engine = ethash.NewFullFaker()
- )
- chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to create chain: %v", err)
- }
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 4, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{0x02})
- b.SetDifficulty(big.NewInt(1000000))
- })
-
- // Insert block B1 and commit the state into disk
- if _, err := chain.InsertChain(blocks[:1]); err != nil {
- t.Fatalf("Failed to import canonical chain start: %v", err)
- }
- chain.triedb.Commit(blocks[0].Root(), false)
-
- // Insert block B2 and commit the snapshot into disk
- if _, err := chain.InsertChain(blocks[1:2]); err != nil {
- t.Fatalf("Failed to import canonical chain start: %v", err)
- }
- if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
- t.Fatalf("Failed to flatten snapshots: %v", err)
- }
-
- // Insert block B3 and commit the state into disk
- if _, err := chain.InsertChain(blocks[2:3]); err != nil {
- t.Fatalf("Failed to import canonical chain start: %v", err)
- }
- chain.triedb.Commit(blocks[2].Root(), false)
-
- // Insert the remaining blocks
- if _, err := chain.InsertChain(blocks[3:]); err != nil {
- t.Fatalf("Failed to import canonical chain tail: %v", err)
- }
-
- // Pull the plug on the database, simulating a hard crash
- chain.triedb.Close()
- db.Close()
- chain.stopWithoutSaving()
-
- // Start a new blockchain back up and see where the repair leads us
- db, err = rawdb.Open(rawdb.OpenOptions{
- Directory: datadir,
- AncientsDirectory: ancient,
- Ephemeral: true,
- })
- if err != nil {
- t.Fatalf("Failed to reopen persistent database: %v", err)
- }
- defer db.Close()
-
- chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- defer chain.Stop()
-
- if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
- t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
- }
- if head := chain.CurrentSnapBlock(); head.Number.Uint64() != uint64(4) {
- t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, uint64(4))
- }
- expHead := uint64(1)
- if scheme == rawdb.PathScheme {
- expHead = uint64(2)
- }
- if head := chain.CurrentBlock(); head.Number.Uint64() != expHead {
- t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead)
- }
-
- // Reinsert B2-B4
- if _, err := chain.InsertChain(blocks[1:]); err != nil {
- t.Fatalf("Failed to import canonical chain tail: %v", err)
- }
- if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
- t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
- }
- if head := chain.CurrentSnapBlock(); head.Number.Uint64() != uint64(4) {
- t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, uint64(4))
- }
- if head := chain.CurrentBlock(); head.Number.Uint64() != uint64(4) {
- t.Errorf("Head block mismatch: have %d, want %d", head.Number, uint64(4))
- }
- if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
- t.Error("Failed to regenerate the snapshot of known state")
- }
-}
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
deleted file mode 100644
index 217610c33a..0000000000
--- a/core/blockchain_sethead_test.go
+++ /dev/null
@@ -1,2191 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Tests that setting the chain head backwards doesn't leave the database in some
-// strange state with gaps in the chain, nor with block data dangling in the future.
-
-package core
-
-import (
- "fmt"
- "math/big"
- "path"
- "strings"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/triedb"
- "github.com/ethereum/go-ethereum/triedb/hashdb"
- "github.com/ethereum/go-ethereum/triedb/pathdb"
-)
-
-// rewindTest is a test case for chain rollback upon user request.
-type rewindTest struct {
- canonicalBlocks int // Number of blocks to generate for the canonical chain (heavier)
- sidechainBlocks int // Number of blocks to generate for the side chain (lighter)
- freezeThreshold uint64 // Block number until which to move things into the freezer
- commitBlock uint64 // Block number for which to commit the state to disk
- pivotBlock *uint64 // Pivot block number in case of fast sync
-
- setheadBlock uint64 // Block number to set head back to
- expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis)
- expSidechainBlocks int // Number of sidechain blocks expected to remain in the database (excl. genesis)
- expFrozen int // Number of canonical blocks expected to be in the freezer (incl. genesis)
- expHeadHeader uint64 // Block number of the expected head header
- expHeadFastBlock uint64 // Block number of the expected head fast sync block
- expHeadBlock uint64 // Block number of the expected head full block
-}
-
-//nolint:unused
-func (tt *rewindTest) dump(crash bool) string {
- buffer := new(strings.Builder)
-
- fmt.Fprint(buffer, "Chain:\n G")
- for i := 0; i < tt.canonicalBlocks; i++ {
- fmt.Fprintf(buffer, "->C%d", i+1)
- }
- fmt.Fprint(buffer, " (HEAD)\n")
- if tt.sidechainBlocks > 0 {
- fmt.Fprintf(buffer, " └")
- for i := 0; i < tt.sidechainBlocks; i++ {
- fmt.Fprintf(buffer, "->S%d", i+1)
- }
- fmt.Fprintf(buffer, "\n")
- }
- fmt.Fprintf(buffer, "\n")
-
- if tt.canonicalBlocks > int(tt.freezeThreshold) {
- fmt.Fprint(buffer, "Frozen:\n G")
- for i := 0; i < tt.canonicalBlocks-int(tt.freezeThreshold); i++ {
- fmt.Fprintf(buffer, "->C%d", i+1)
- }
- fmt.Fprintf(buffer, "\n\n")
- } else {
- fmt.Fprintf(buffer, "Frozen: none\n")
- }
- fmt.Fprintf(buffer, "Commit: G")
- if tt.commitBlock > 0 {
- fmt.Fprintf(buffer, ", C%d", tt.commitBlock)
- }
- fmt.Fprint(buffer, "\n")
-
- if tt.pivotBlock == nil {
- fmt.Fprintf(buffer, "Pivot : none\n")
- } else {
- fmt.Fprintf(buffer, "Pivot : C%d\n", *tt.pivotBlock)
- }
- if crash {
- fmt.Fprintf(buffer, "\nCRASH\n\n")
- } else {
- fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setheadBlock)
- }
- fmt.Fprintf(buffer, "------------------------------\n\n")
-
- if tt.expFrozen > 0 {
- fmt.Fprint(buffer, "Expected in freezer:\n G")
- for i := 0; i < tt.expFrozen-1; i++ {
- fmt.Fprintf(buffer, "->C%d", i+1)
- }
- fmt.Fprintf(buffer, "\n\n")
- }
- if tt.expFrozen > 0 {
- if tt.expFrozen >= tt.expCanonicalBlocks {
- fmt.Fprintf(buffer, "Expected in leveldb: none\n")
- } else {
- fmt.Fprintf(buffer, "Expected in leveldb:\n C%d)", tt.expFrozen-1)
- for i := tt.expFrozen - 1; i < tt.expCanonicalBlocks; i++ {
- fmt.Fprintf(buffer, "->C%d", i+1)
- }
- fmt.Fprint(buffer, "\n")
- if tt.expSidechainBlocks > tt.expFrozen {
- fmt.Fprintf(buffer, " └")
- for i := tt.expFrozen - 1; i < tt.expSidechainBlocks; i++ {
- fmt.Fprintf(buffer, "->S%d", i+1)
- }
- fmt.Fprintf(buffer, "\n")
- }
- }
- } else {
- fmt.Fprint(buffer, "Expected in leveldb:\n G")
- for i := tt.expFrozen; i < tt.expCanonicalBlocks; i++ {
- fmt.Fprintf(buffer, "->C%d", i+1)
- }
- fmt.Fprint(buffer, "\n")
- if tt.expSidechainBlocks > tt.expFrozen {
- fmt.Fprintf(buffer, " └")
- for i := tt.expFrozen; i < tt.expSidechainBlocks; i++ {
- fmt.Fprintf(buffer, "->S%d", i+1)
- }
- fmt.Fprintf(buffer, "\n")
- }
- }
- fmt.Fprintf(buffer, "\n")
- fmt.Fprintf(buffer, "Expected head header : C%d\n", tt.expHeadHeader)
- fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock)
- if tt.expHeadBlock == 0 {
- fmt.Fprintf(buffer, "Expected head block : G\n")
- } else {
- fmt.Fprintf(buffer, "Expected head block : C%d\n", tt.expHeadBlock)
- }
- return buffer.String()
-}
-
-// Tests a sethead for a short canonical chain where a recent block was already
-// committed to disk and then the sethead called. In this case we expect the full
-// chain to be rolled back to the committed block. Everything above the sethead
-// point should be deleted. In between the committed block and the requested head
-// the data can remain as "fast sync" data to avoid redownloading it.
-func TestShortSetHead(t *testing.T) { testShortSetHead(t, false) }
-func TestShortSetHeadWithSnapshots(t *testing.T) { testShortSetHead(t, true) }
-
-func testShortSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 0,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain where the fast sync pivot point was
-// already committed, after which sethead was called. In this case we expect the
-// chain to behave like in full sync mode, rolling back to the committed block
-// Everything above the sethead point should be deleted. In between the committed
-// block and the requested head the data can remain as "fast sync" data to avoid
-// redownloading it.
-func TestShortSnapSyncedSetHead(t *testing.T) { testShortSnapSyncedSetHead(t, false) }
-func TestShortSnapSyncedSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncedSetHead(t, true) }
-
-func testShortSnapSyncedSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 0,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain where the fast sync pivot point was
-// not yet committed, but sethead was called. In this case we expect the chain to
-// detect that it was fast syncing and delete everything from the new head, since
-// we can just pick up fast syncing from there. The head full block should be set
-// to the genesis.
-func TestShortSnapSyncingSetHead(t *testing.T) { testShortSnapSyncingSetHead(t, false) }
-func TestShortSnapSyncingSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncingSetHead(t, true) }
-
-func testShortSnapSyncingSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Frozen: none
- // Commit: G
- // Pivot : C4
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 0,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain and a shorter side chain, where a
-// recent block was already committed to disk and then sethead was called. In this
-// test scenario the side chain is below the committed block. In this case we expect
-// the canonical full chain to be rolled back to the committed block. Everything
-// above the sethead point should be deleted. In between the committed block and
-// the requested head the data can remain as "fast sync" data to avoid redownloading
-// it. The side chain should be left alone as it was shorter.
-func TestShortOldForkedSetHead(t *testing.T) { testShortOldForkedSetHead(t, false) }
-func TestShortOldForkedSetHeadWithSnapshots(t *testing.T) { testShortOldForkedSetHead(t, true) }
-
-func testShortOldForkedSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- // └->S1->S2->S3
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 3,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain and a shorter side chain, where
-// the fast sync pivot point was already committed to disk and then sethead was
-// called. In this test scenario the side chain is below the committed block. In
-// this case we expect the canonical full chain to be rolled back to the committed
-// block. Everything above the sethead point should be deleted. In between the
-// committed block and the requested head the data can remain as "fast sync" data
-// to avoid redownloading it. The side chain should be left alone as it was shorter.
-func TestShortOldForkedSnapSyncedSetHead(t *testing.T) {
- testShortOldForkedSnapSyncedSetHead(t, false)
-}
-func TestShortOldForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
- testShortOldForkedSnapSyncedSetHead(t, true)
-}
-
-func testShortOldForkedSnapSyncedSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- // └->S1->S2->S3
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 3,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain and a shorter side chain, where
-// the fast sync pivot point was not yet committed, but sethead was called. In this
-// test scenario the side chain is below the committed block. In this case we expect
-// the chain to detect that it was fast syncing and delete everything from the new
-// head, since we can just pick up fast syncing from there. The head full block
-// should be set to the genesis.
-func TestShortOldForkedSnapSyncingSetHead(t *testing.T) {
- testShortOldForkedSnapSyncingSetHead(t, false)
-}
-func TestShortOldForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
- testShortOldForkedSnapSyncingSetHead(t, true)
-}
-
-func testShortOldForkedSnapSyncingSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen: none
- // Commit: G
- // Pivot : C4
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- // └->S1->S2->S3
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 3,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain and a shorter side chain, where a
-// recent block was already committed to disk and then sethead was called. In this
-// test scenario the side chain reaches above the committed block. In this case we
-// expect the canonical full chain to be rolled back to the committed block. All
-// data above the sethead point should be deleted. In between the committed block
-// and the requested head the data can remain as "fast sync" data to avoid having
-// to redownload it. The side chain should be truncated to the head set.
-//
-// The side chain could be left to be if the fork point was before the new head
-// we are deleting to, but it would be exceedingly hard to detect that case and
-// properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortNewlyForkedSetHead(t *testing.T) { testShortNewlyForkedSetHead(t, false) }
-func TestShortNewlyForkedSetHeadWithSnapshots(t *testing.T) { testShortNewlyForkedSetHead(t, true) }
-
-func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- // └->S1->S2->S3->S4->S5->S6->S7
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 10,
- sidechainBlocks: 8,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 7,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain and a shorter side chain, where
-// the fast sync pivot point was already committed to disk and then sethead was
-// called. In this case we expect the canonical full chain to be rolled back to
-// between the committed block and the requested head the data can remain as
-// "fast sync" data to avoid having to redownload it. The side chain should be
-// truncated to the head set.
-//
-// The side chain could be left to be if the fork point was before the new head
-// we are deleting to, but it would be exceedingly hard to detect that case and
-// properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortNewlyForkedSnapSyncedSetHead(t *testing.T) {
- testShortNewlyForkedSnapSyncedSetHead(t, false)
-}
-func TestShortNewlyForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
- testShortNewlyForkedSnapSyncedSetHead(t, true)
-}
-
-func testShortNewlyForkedSnapSyncedSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- // └->S1->S2->S3->S4->S5->S6->S7
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 10,
- sidechainBlocks: 8,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 7,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain and a shorter side chain, where
-// the fast sync pivot point was not yet committed, but sethead was called. In
-// this test scenario the side chain reaches above the committed block. In this
-// case we expect the chain to detect that it was fast syncing and delete
-// everything from the new head, since we can just pick up fast syncing from
-// there.
-//
-// The side chain could be left to be if the fork point was before the new head
-// we are deleting to, but it would be exceedingly hard to detect that case and
-// properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortNewlyForkedSnapSyncingSetHead(t *testing.T) {
- testShortNewlyForkedSnapSyncingSetHead(t, false)
-}
-func TestShortNewlyForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
- testShortNewlyForkedSnapSyncingSetHead(t, true)
-}
-
-func testShortNewlyForkedSnapSyncingSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8
- //
- // Frozen: none
- // Commit: G
- // Pivot : C4
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- // └->S1->S2->S3->S4->S5->S6->S7
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 10,
- sidechainBlocks: 8,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 7,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain and a longer side chain, where a
-// recent block was already committed to disk and then sethead was called. In this
-// case we expect the canonical full chain to be rolled back to the committed block.
-// All data above the sethead point should be deleted. In between the committed
-// block and the requested head the data can remain as "fast sync" data to avoid
-// having to redownload it. The side chain should be truncated to the head set.
-//
-// The side chain could be left to be if the fork point was before the new head
-// we are deleting to, but it would be exceedingly hard to detect that case and
-// properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortReorgedSetHead(t *testing.T) { testShortReorgedSetHead(t, false) }
-func TestShortReorgedSetHeadWithSnapshots(t *testing.T) { testShortReorgedSetHead(t, true) }
-
-func testShortReorgedSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- // └->S1->S2->S3->S4->S5->S6->S7
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 10,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 7,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain and a longer side chain, where
-// the fast sync pivot point was already committed to disk and then sethead was
-// called. In this case we expect the canonical full chain to be rolled back to
-// the committed block. All data above the sethead point should be deleted. In
-// between the committed block and the requested head the data can remain as
-// "fast sync" data to avoid having to redownload it. The side chain should be
-// truncated to the head set.
-//
-// The side chain could be left to be if the fork point was before the new head
-// we are deleting to, but it would be exceedingly hard to detect that case and
-// properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortReorgedSnapSyncedSetHead(t *testing.T) {
- testShortReorgedSnapSyncedSetHead(t, false)
-}
-func TestShortReorgedSnapSyncedSetHeadWithSnapshots(t *testing.T) {
- testShortReorgedSnapSyncedSetHead(t, true)
-}
-
-func testShortReorgedSnapSyncedSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
- //
- // Frozen: none
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- // └->S1->S2->S3->S4->S5->S6->S7
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 10,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 7,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a short canonical chain and a longer side chain, where
-// the fast sync pivot point was not yet committed, but sethead was called. In
-// this case we expect the chain to detect that it was fast syncing and delete
-// everything from the new head, since we can just pick up fast syncing from
-// there.
-//
-// The side chain could be left to be if the fork point was before the new head
-// we are deleting to, but it would be exceedingly hard to detect that case and
-// properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortReorgedSnapSyncingSetHead(t *testing.T) {
- testShortReorgedSnapSyncingSetHead(t, false)
-}
-func TestShortReorgedSnapSyncingSetHeadWithSnapshots(t *testing.T) {
- testShortReorgedSnapSyncingSetHead(t, true)
-}
-
-func testShortReorgedSnapSyncingSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
- //
- // Frozen: none
- // Commit: G
- // Pivot : C4
- //
- // SetHead(7)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7
- // └->S1->S2->S3->S4->S5->S6->S7
- //
- // Expected head header : C7
- // Expected head fast block: C7
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 8,
- sidechainBlocks: 10,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 7,
- expCanonicalBlocks: 7,
- expSidechainBlocks: 7,
- expFrozen: 0,
- expHeadHeader: 7,
- expHeadFastBlock: 7,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks where a recent
-// block - newer than the ancient limit - was already committed to disk and then
-// sethead was called. In this case we expect the full chain to be rolled back
-// to the committed block. Everything above the sethead point should be deleted.
-// In between the committed block and the requested head the data can remain as
-// "fast sync" data to avoid redownloading it.
-func TestLongShallowSetHead(t *testing.T) { testLongShallowSetHead(t, false) }
-func TestLongShallowSetHeadWithSnapshots(t *testing.T) { testLongShallowSetHead(t, true) }
-
-func testLongShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks where a recent
-// block - older than the ancient limit - was already committed to disk and then
-// sethead was called. In this case we expect the full chain to be rolled back
-// to the committed block. Since the ancient limit was underflown, everything
-// needs to be deleted onwards to avoid creating a gap.
-func TestLongDeepSetHead(t *testing.T) { testLongDeepSetHead(t, false) }
-func TestLongDeepSetHeadWithSnapshots(t *testing.T) { testLongDeepSetHead(t, true) }
-
-func testLongDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 6,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks where the fast
-// sync pivot point - newer than the ancient limit - was already committed, after
-// which sethead was called. In this case we expect the full chain to be rolled
-// back to the committed block. Everything above the sethead point should be
-// deleted. In between the committed block and the requested head the data can
-// remain as "fast sync" data to avoid redownloading it.
-func TestLongSnapSyncedShallowSetHead(t *testing.T) {
- testLongSnapSyncedShallowSetHead(t, false)
-}
-func TestLongSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
- testLongSnapSyncedShallowSetHead(t, true)
-}
-
-func testLongSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks where the fast
-// sync pivot point - older than the ancient limit - was already committed, after
-// which sethead was called. In this case we expect the full chain to be rolled
-// back to the committed block. Since the ancient limit was underflown, everything
-// needs to be deleted onwards to avoid creating a gap.
-func TestLongSnapSyncedDeepSetHead(t *testing.T) { testLongSnapSyncedDeepSetHead(t, false) }
-func TestLongSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongSnapSyncedDeepSetHead(t, true) }
-
-func testLongSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks where the fast
-// sync pivot point - newer than the ancient limit - was not yet committed, but
-// sethead was called. In this case we expect the chain to detect that it was fast
-// syncing and delete everything from the new head, since we can just pick up fast
-// syncing from there.
-func TestLongSnapSyncingShallowSetHead(t *testing.T) {
- testLongSnapSyncingShallowSetHead(t, false)
-}
-func TestLongSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
- testLongSnapSyncingShallowSetHead(t, true)
-}
-
-func testLongSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks where the fast
-// sync pivot point - older than the ancient limit - was not yet committed, but
-// sethead was called. In this case we expect the chain to detect that it was fast
-// syncing and delete everything from the new head, since we can just pick up fast
-// syncing from there.
-func TestLongSnapSyncingDeepSetHead(t *testing.T) {
- testLongSnapSyncingDeepSetHead(t, false)
-}
-func TestLongSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
- testLongSnapSyncingDeepSetHead(t, true)
-}
-
-func testLongSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4->C5->C6
- //
- // Expected in leveldb: none
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 0,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 7,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
-// chain, where a recent block - newer than the ancient limit - was already committed
-// to disk and then sethead was called. In this case we expect the canonical full
-// chain to be rolled back to the committed block. Everything above the sethead point
-// should be deleted. In between the committed block and the requested head the data
-// can remain as "fast sync" data to avoid redownloading it. The side chain is nuked
-// by the freezer.
-func TestLongOldForkedShallowSetHead(t *testing.T) {
- testLongOldForkedShallowSetHead(t, false)
-}
-func TestLongOldForkedShallowSetHeadWithSnapshots(t *testing.T) {
- testLongOldForkedShallowSetHead(t, true)
-}
-
-func testLongOldForkedShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
-// chain, where a recent block - older than the ancient limit - was already committed
-// to disk and then sethead was called. In this case we expect the canonical full
-// chain to be rolled back to the committed block. Since the ancient limit was
-// underflown, everything needs to be deleted onwards to avoid creating a gap. The
-// side chain is nuked by the freezer.
-func TestLongOldForkedDeepSetHead(t *testing.T) { testLongOldForkedDeepSetHead(t, false) }
-func TestLongOldForkedDeepSetHeadWithSnapshots(t *testing.T) { testLongOldForkedDeepSetHead(t, true) }
-
-func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 6,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - newer than the ancient limit -
-// was already committed to disk and then sethead was called. In this test scenario
-// the side chain is below the committed block. In this case we expect the canonical
-// full chain to be rolled back to the committed block. Everything above the
-// sethead point should be deleted. In between the committed block and the
-// requested head the data can remain as "fast sync" data to avoid redownloading
-// it. The side chain is nuked by the freezer.
-func TestLongOldForkedSnapSyncedShallowSetHead(t *testing.T) {
- testLongOldForkedSnapSyncedShallowSetHead(t, false)
-}
-func TestLongOldForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
- testLongOldForkedSnapSyncedShallowSetHead(t, true)
-}
-
-func testLongOldForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was already committed to disk and then sethead was called. In this test scenario
-// the side chain is below the committed block. In this case we expect the canonical
-// full chain to be rolled back to the committed block. Since the ancient limit was
-// underflown, everything needs to be deleted onwards to avoid creating a gap. The
-// side chain is nuked by the freezer.
-func TestLongOldForkedSnapSyncedDeepSetHead(t *testing.T) {
- testLongOldForkedSnapSyncedDeepSetHead(t, false)
-}
-func TestLongOldForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
- testLongOldForkedSnapSyncedDeepSetHead(t, true)
-}
-
-func testLongOldForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4->C5->C6
- //
- // Expected in leveldb: none
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - newer than the ancient limit -
-// was not yet committed, but sethead was called. In this test scenario the side
-// chain is below the committed block. In this case we expect the chain to detect
-// that it was fast syncing and delete everything from the new head, since we can
-// just pick up fast syncing from there. The side chain is completely nuked by the
-// freezer.
-func TestLongOldForkedSnapSyncingShallowSetHead(t *testing.T) {
- testLongOldForkedSnapSyncingShallowSetHead(t, false)
-}
-func TestLongOldForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
- testLongOldForkedSnapSyncingShallowSetHead(t, true)
-}
-
-func testLongOldForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was not yet committed, but sethead was called. In this test scenario the side
-// chain is below the committed block. In this case we expect the chain to detect
-// that it was fast syncing and delete everything from the new head, since we can
-// just pick up fast syncing from there. The side chain is completely nuked by the
-// freezer.
-func TestLongOldForkedSnapSyncingDeepSetHead(t *testing.T) {
- testLongOldForkedSnapSyncingDeepSetHead(t, false)
-}
-func TestLongOldForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
- testLongOldForkedSnapSyncingDeepSetHead(t, true)
-}
-
-func testLongOldForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4->C5->C6
- //
- // Expected in leveldb: none
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 3,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 7,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter
-// side chain, where a recent block - newer than the ancient limit - was already
-// committed to disk and then sethead was called. In this test scenario the side
-// chain is above the committed block. In this case the freezer will delete the
-// sidechain since it's dangling, reverting to TestLongShallowSetHead.
-func TestLongNewerForkedShallowSetHead(t *testing.T) {
- testLongNewerForkedShallowSetHead(t, false)
-}
-func TestLongNewerForkedShallowSetHeadWithSnapshots(t *testing.T) {
- testLongNewerForkedShallowSetHead(t, true)
-}
-
-func testLongNewerForkedShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter
-// side chain, where a recent block - older than the ancient limit - was already
-// committed to disk and then sethead was called. In this test scenario the side
-// chain is above the committed block. In this case the freezer will delete the
-// sidechain since it's dangling, reverting to TestLongDeepSetHead.
-func TestLongNewerForkedDeepSetHead(t *testing.T) {
- testLongNewerForkedDeepSetHead(t, false)
-}
-func TestLongNewerForkedDeepSetHeadWithSnapshots(t *testing.T) {
- testLongNewerForkedDeepSetHead(t, true)
-}
-
-func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 6,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - newer than the ancient limit -
-// was already committed to disk and then sethead was called. In this test scenario
-// the side chain is above the committed block. In this case the freezer will delete
-// the sidechain since it's dangling, reverting to TestLongSnapSyncedShallowSetHead.
-func TestLongNewerForkedSnapSyncedShallowSetHead(t *testing.T) {
- testLongNewerForkedSnapSyncedShallowSetHead(t, false)
-}
-func TestLongNewerForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
- testLongNewerForkedSnapSyncedShallowSetHead(t, true)
-}
-
-func testLongNewerForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was already committed to disk and then sethead was called. In this test scenario
-// the side chain is above the committed block. In this case the freezer will delete
-// the sidechain since it's dangling, reverting to TestLongSnapSyncedDeepSetHead.
-func TestLongNewerForkedSnapSyncedDeepSetHead(t *testing.T) {
- testLongNewerForkedSnapSyncedDeepSetHead(t, false)
-}
-func TestLongNewerForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
- testLongNewerForkedSnapSyncedDeepSetHead(t, true)
-}
-
-func testLongNewerForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - newer than the ancient limit -
-// was not yet committed, but sethead was called. In this test scenario the side
-// chain is above the committed block. In this case the freezer will delete the
-// sidechain since it's dangling, reverting to TestLongSnapSyncinghallowSetHead.
-func TestLongNewerForkedSnapSyncingShallowSetHead(t *testing.T) {
- testLongNewerForkedSnapSyncingShallowSetHead(t, false)
-}
-func TestLongNewerForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
- testLongNewerForkedSnapSyncingShallowSetHead(t, true)
-}
-
-func testLongNewerForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a shorter
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was not yet committed, but sethead was called. In this test scenario the side
-// chain is above the committed block. In this case the freezer will delete the
-// sidechain since it's dangling, reverting to TestLongSnapSyncingDeepSetHead.
-func TestLongNewerForkedSnapSyncingDeepSetHead(t *testing.T) {
- testLongNewerForkedSnapSyncingDeepSetHead(t, false)
-}
-func TestLongNewerForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
- testLongNewerForkedSnapSyncingDeepSetHead(t, true)
-}
-
-func testLongNewerForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4->C5->C6
- //
- // Expected in leveldb: none
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 12,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 7,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a longer side
-// chain, where a recent block - newer than the ancient limit - was already committed
-// to disk and then sethead was called. In this case the freezer will delete the
-// sidechain since it's dangling, reverting to TestLongShallowSetHead.
-func TestLongReorgedShallowSetHead(t *testing.T) { testLongReorgedShallowSetHead(t, false) }
-func TestLongReorgedShallowSetHeadWithSnapshots(t *testing.T) { testLongReorgedShallowSetHead(t, true) }
-
-func testLongReorgedShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a longer side
-// chain, where a recent block - older than the ancient limit - was already committed
-// to disk and then sethead was called. In this case the freezer will delete the
-// sidechain since it's dangling, reverting to TestLongDeepSetHead.
-func TestLongReorgedDeepSetHead(t *testing.T) { testLongReorgedDeepSetHead(t, false) }
-func TestLongReorgedDeepSetHeadWithSnapshots(t *testing.T) { testLongReorgedDeepSetHead(t, true) }
-
-func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : none
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: nil,
- setheadBlock: 6,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a longer
-// side chain, where the fast sync pivot point - newer than the ancient limit -
-// was already committed to disk and then sethead was called. In this case the
-// freezer will delete the sidechain since it's dangling, reverting to
-// TestLongSnapSyncedShallowSetHead.
-func TestLongReorgedSnapSyncedShallowSetHead(t *testing.T) {
- testLongReorgedSnapSyncedShallowSetHead(t, false)
-}
-func TestLongReorgedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) {
- testLongReorgedSnapSyncedShallowSetHead(t, true)
-}
-
-func testLongReorgedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a longer
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was already committed to disk and then sethead was called. In this case the
-// freezer will delete the sidechain since it's dangling, reverting to
-// TestLongSnapSyncedDeepSetHead.
-func TestLongReorgedSnapSyncedDeepSetHead(t *testing.T) {
- testLongReorgedSnapSyncedDeepSetHead(t, false)
-}
-func TestLongReorgedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) {
- testLongReorgedSnapSyncedDeepSetHead(t, true)
-}
-
-func testLongReorgedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G, C4
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4
- //
- // Expected in leveldb: none
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 4,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 4,
- expSidechainBlocks: 0,
- expFrozen: 5,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a longer
-// side chain, where the fast sync pivot point - newer than the ancient limit -
-// was not yet committed, but sethead was called. In this case we expect the
-// chain to detect that it was fast syncing and delete everything from the new
-// head, since we can just pick up fast syncing from there. The side chain is
-// completely nuked by the freezer.
-func TestLongReorgedSnapSyncingShallowSetHead(t *testing.T) {
- testLongReorgedSnapSyncingShallowSetHead(t, false)
-}
-func TestLongReorgedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) {
- testLongReorgedSnapSyncingShallowSetHead(t, true)
-}
-
-func testLongReorgedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2
- //
- // Commit: G
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2
- //
- // Expected in leveldb:
- // C2)->C3->C4->C5->C6
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 18,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 3,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-// Tests a sethead for a long canonical chain with frozen blocks and a longer
-// side chain, where the fast sync pivot point - older than the ancient limit -
-// was not yet committed, but sethead was called. In this case we expect the
-// chain to detect that it was fast syncing and delete everything from the new
-// head, since we can just pick up fast syncing from there. The side chain is
-// completely nuked by the freezer.
-func TestLongReorgedSnapSyncingDeepSetHead(t *testing.T) {
- testLongReorgedSnapSyncingDeepSetHead(t, false)
-}
-func TestLongReorgedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) {
- testLongReorgedSnapSyncingDeepSetHead(t, true)
-}
-
-func testLongReorgedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
- // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
- //
- // Frozen:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Commit: G
- // Pivot : C4
- //
- // SetHead(6)
- //
- // ------------------------------
- //
- // Expected in freezer:
- // G->C1->C2->C3->C4->C5->C6
- //
- // Expected in leveldb: none
- //
- // Expected head header : C6
- // Expected head fast block: C6
- // Expected head block : G
- testSetHead(t, &rewindTest{
- canonicalBlocks: 24,
- sidechainBlocks: 26,
- freezeThreshold: 16,
- commitBlock: 0,
- pivotBlock: uint64ptr(4),
- setheadBlock: 6,
- expCanonicalBlocks: 6,
- expSidechainBlocks: 0,
- expFrozen: 7,
- expHeadHeader: 6,
- expHeadFastBlock: 6,
- expHeadBlock: 0,
- }, snapshots)
-}
-
-func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
- for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
- testSetHeadWithScheme(t, tt, snapshots, scheme)
- }
-}
-
-func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) {
- // It's hard to follow the test case, visualize the input
- // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
- // fmt.Println(tt.dump(false))
-
- // Create a temporary persistent database
- datadir := t.TempDir()
- ancient := path.Join(datadir, "ancient")
-
- db, err := rawdb.Open(rawdb.OpenOptions{
- Directory: datadir,
- AncientsDirectory: ancient,
- Ephemeral: true,
- })
- if err != nil {
- t.Fatalf("Failed to create persistent database: %v", err)
- }
- defer db.Close()
-
- // Initialize a fresh chain
- var (
- gspec = &Genesis{
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: params.AllEthashProtocolChanges,
- }
- engine = ethash.NewFullFaker()
- config = &CacheConfig{
- TrieCleanLimit: 256,
- TrieDirtyLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
- SnapshotLimit: 0, // Disable snapshot
- StateScheme: scheme,
- }
- )
- if snapshots {
- config.SnapshotLimit = 256
- config.SnapshotWait = true
- }
- config.TriesInMemory = 128
- chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to create chain: %v", err)
- }
- defer chain.Stop()
-
- // If sidechain blocks are needed, make a light chain and import it
- var sideblocks types.Blocks
- if tt.sidechainBlocks > 0 {
- sideblocks, _ = GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{0x01})
- })
- if _, err := chain.InsertChain(sideblocks); err != nil {
- t.Fatalf("Failed to import side chain: %v", err)
- }
- }
- canonblocks, _ := GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{0x02})
- b.SetDifficulty(big.NewInt(1000000))
- })
- if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
- t.Fatalf("Failed to import canonical chain start: %v", err)
- }
- if tt.commitBlock > 0 {
- chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false)
- if snapshots {
- if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
- t.Fatalf("Failed to flatten snapshots: %v", err)
- }
- }
- }
- if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
- t.Fatalf("Failed to import canonical chain tail: %v", err)
- }
- // Reopen the trie database without persisting in-memory dirty nodes.
- chain.triedb.Close()
- dbconfig := &triedb.Config{}
- if scheme == rawdb.PathScheme {
- dbconfig.PathDB = pathdb.Defaults
- } else {
- dbconfig.HashDB = hashdb.Defaults
- }
- chain.triedb = triedb.NewDatabase(chain.db, dbconfig)
- chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb)
-
- // Force run a freeze cycle
- type freezer interface {
- Freeze(threshold uint64) error
- Ancients() (uint64, error)
- }
- db.(freezer).Freeze(tt.freezeThreshold)
-
- // Set the simulated pivot block
- if tt.pivotBlock != nil {
- rawdb.WriteLastPivotNumber(db, *tt.pivotBlock)
- }
- // Set the head of the chain back to the requested number
- chain.SetHead(tt.setheadBlock)
-
- // Iterate over all the remaining blocks and ensure there are no gaps
- verifyNoGaps(t, chain, true, canonblocks)
- verifyNoGaps(t, chain, false, sideblocks)
- verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
- verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
-
- if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
- t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
- }
- if head := chain.CurrentSnapBlock(); head.Number.Uint64() != tt.expHeadFastBlock {
- t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, tt.expHeadFastBlock)
- }
- if head := chain.CurrentBlock(); head.Number.Uint64() != tt.expHeadBlock {
- t.Errorf("Head block mismatch: have %d, want %d", head.Number, tt.expHeadBlock)
- }
- if frozen, err := db.(freezer).Ancients(); err != nil {
- t.Errorf("Failed to retrieve ancient count: %v\n", err)
- } else if int(frozen) != tt.expFrozen {
- t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
- }
-}
-
-// verifyNoGaps checks that there are no gaps after the initial set of blocks in
-// the database and errors if found.
-func verifyNoGaps(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks) {
- t.Helper()
-
- var end uint64
- for i := uint64(0); i <= uint64(len(inserted)); i++ {
- header := chain.GetHeaderByNumber(i)
- if header == nil && end == 0 {
- end = i
- }
- if header != nil && end > 0 {
- if canonical {
- t.Errorf("Canonical header gap between #%d-#%d", end, i-1)
- } else {
- t.Errorf("Sidechain header gap between #%d-#%d", end, i-1)
- }
- end = 0 // Reset for further gap detection
- }
- }
- end = 0
- for i := uint64(0); i <= uint64(len(inserted)); i++ {
- block := chain.GetBlockByNumber(i)
- if block == nil && end == 0 {
- end = i
- }
- if block != nil && end > 0 {
- if canonical {
- t.Errorf("Canonical block gap between #%d-#%d", end, i-1)
- } else {
- t.Errorf("Sidechain block gap between #%d-#%d", end, i-1)
- }
- end = 0 // Reset for further gap detection
- }
- }
- end = 0
- for i := uint64(1); i <= uint64(len(inserted)); i++ {
- receipts := chain.GetReceiptsByHash(inserted[i-1].Hash())
- if receipts == nil && end == 0 {
- end = i
- }
- if receipts != nil && end > 0 {
- if canonical {
- t.Errorf("Canonical receipt gap between #%d-#%d", end, i-1)
- } else {
- t.Errorf("Sidechain receipt gap between #%d-#%d", end, i-1)
- }
- end = 0 // Reset for further gap detection
- }
- }
-}
-
-// verifyCutoff checks that there are no chain data available in the chain after
-// the specified limit, but that it is available before.
-func verifyCutoff(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks, head int) {
- t.Helper()
-
- for i := 1; i <= len(inserted); i++ {
- if i <= head {
- if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header == nil {
- if canonical {
- t.Errorf("Canonical header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- } else {
- t.Errorf("Sidechain header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- }
- }
- if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block == nil {
- if canonical {
- t.Errorf("Canonical block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- } else {
- t.Errorf("Sidechain block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- }
- }
- if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts == nil {
- if canonical {
- t.Errorf("Canonical receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- } else {
- t.Errorf("Sidechain receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- }
- }
- } else {
- if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header != nil {
- if canonical {
- t.Errorf("Canonical header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- } else {
- t.Errorf("Sidechain header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- }
- }
- if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block != nil {
- if canonical {
- t.Errorf("Canonical block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- } else {
- t.Errorf("Sidechain block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- }
- }
- if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts != nil {
- if canonical {
- t.Errorf("Canonical receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- } else {
- t.Errorf("Sidechain receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
- }
- }
- }
- }
-}
-
-// uint64ptr is a weird helper to allow 1-line constant pointer creation.
-func uint64ptr(n uint64) *uint64 {
- return &n
-}
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
deleted file mode 100644
index b2dbe5cb2a..0000000000
--- a/core/blockchain_snapshot_test.go
+++ /dev/null
@@ -1,720 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Tests that abnormal program termination (i.e.crash) and restart can recovery
-// the snapshot properly if the snapshot is enabled.
-
-package core
-
-import (
- "bytes"
- "fmt"
- "math/big"
- "os"
- "path"
- "strings"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/params"
-)
-
-// snapshotTestBasic wraps the common testing fields in the snapshot tests.
-type snapshotTestBasic struct {
- scheme string // Disk scheme used for storing trie nodes
- chainBlocks int // Number of blocks to generate for the canonical chain
- snapshotBlock uint64 // Block number of the relevant snapshot disk layer
- commitBlock uint64 // Block number for which to commit the state to disk
-
- expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis)
- expHeadHeader uint64 // Block number of the expected head header
- expHeadFastBlock uint64 // Block number of the expected head fast sync block
- expHeadBlock uint64 // Block number of the expected head full block
- expSnapshotBottom uint64 // The block height corresponding to the snapshot disk layer
-
- // share fields, set in runtime
- datadir string
- ancient string
- db ethdb.Database
- genDb ethdb.Database
- engine consensus.Engine
- gspec *Genesis
-}
-
-func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) {
- // Create a temporary persistent database
- datadir := t.TempDir()
- ancient := path.Join(datadir, "ancient")
-
- db, err := rawdb.Open(rawdb.OpenOptions{
- Directory: datadir,
- AncientsDirectory: ancient,
- Ephemeral: true,
- })
- if err != nil {
- t.Fatalf("Failed to create persistent database: %v", err)
- }
- // Initialize a fresh chain
- var (
- gspec = &Genesis{
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: params.AllEthashProtocolChanges,
- }
- engine = ethash.NewFullFaker()
- )
- chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to create chain: %v", err)
- }
- genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, basic.chainBlocks, func(i int, b *BlockGen) {})
-
- // Insert the blocks with configured settings.
- var breakpoints []uint64
- if basic.commitBlock > basic.snapshotBlock {
- breakpoints = append(breakpoints, basic.snapshotBlock, basic.commitBlock)
- } else {
- breakpoints = append(breakpoints, basic.commitBlock, basic.snapshotBlock)
- }
- var startPoint uint64
- for _, point := range breakpoints {
- if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil {
- t.Fatalf("Failed to import canonical chain start: %v", err)
- }
- startPoint = point
-
- if basic.commitBlock > 0 && basic.commitBlock == point {
- chain.TrieDB().Commit(blocks[point-1].Root(), false)
- }
- if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
- // Flushing the entire snap tree into the disk, the
- // relevant (a) snapshot root and (b) snapshot generator
- // will be persisted atomically.
- chain.snaps.Cap(blocks[point-1].Root(), 0)
- diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
- if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
- t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
- }
- }
- }
- if _, err := chain.InsertChain(blocks[startPoint:]); err != nil {
- t.Fatalf("Failed to import canonical chain tail: %v", err)
- }
-
- // Set runtime fields
- basic.datadir = datadir
- basic.ancient = ancient
- basic.db = db
- basic.genDb = genDb
- basic.engine = engine
- basic.gspec = gspec
- return chain, blocks
-}
-
-func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks []*types.Block) {
- // Iterate over all the remaining blocks and ensure there are no gaps
- verifyNoGaps(t, chain, true, blocks)
- verifyCutoff(t, chain, true, blocks, basic.expCanonicalBlocks)
-
- if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadHeader {
- t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadHeader)
- }
- if head := chain.CurrentSnapBlock(); head.Number.Uint64() != basic.expHeadFastBlock {
- t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, basic.expHeadFastBlock)
- }
- if head := chain.CurrentBlock(); head.Number.Uint64() != basic.expHeadBlock {
- t.Errorf("Head block mismatch: have %d, want %d", head.Number, basic.expHeadBlock)
- }
-
- // Check the disk layer, ensure they are matched
- block := chain.GetBlockByNumber(basic.expSnapshotBottom)
- if block == nil {
- t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
- } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
- t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
- }
-
- // Check the snapshot, ensure it's integrated
- if err := chain.snaps.Verify(block.Root()); err != nil {
- t.Errorf("The disk layer is not integrated %v", err)
- }
-}
-
-//nolint:unused
-func (basic *snapshotTestBasic) dump() string {
- buffer := new(strings.Builder)
-
- fmt.Fprint(buffer, "Chain:\n G")
- for i := 0; i < basic.chainBlocks; i++ {
- fmt.Fprintf(buffer, "->C%d", i+1)
- }
- fmt.Fprint(buffer, " (HEAD)\n\n")
-
- fmt.Fprintf(buffer, "Commit: G")
- if basic.commitBlock > 0 {
- fmt.Fprintf(buffer, ", C%d", basic.commitBlock)
- }
- fmt.Fprint(buffer, "\n")
-
- fmt.Fprintf(buffer, "Snapshot: G")
- if basic.snapshotBlock > 0 {
- fmt.Fprintf(buffer, ", C%d", basic.snapshotBlock)
- }
- fmt.Fprint(buffer, "\n")
-
- //if crash {
- // fmt.Fprintf(buffer, "\nCRASH\n\n")
- //} else {
- // fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", basic.setHead)
- //}
- fmt.Fprintf(buffer, "------------------------------\n\n")
-
- fmt.Fprint(buffer, "Expected in leveldb:\n G")
- for i := 0; i < basic.expCanonicalBlocks; i++ {
- fmt.Fprintf(buffer, "->C%d", i+1)
- }
- fmt.Fprintf(buffer, "\n\n")
- fmt.Fprintf(buffer, "Expected head header : C%d\n", basic.expHeadHeader)
- fmt.Fprintf(buffer, "Expected head fast block: C%d\n", basic.expHeadFastBlock)
- if basic.expHeadBlock == 0 {
- fmt.Fprintf(buffer, "Expected head block : G\n")
- } else {
- fmt.Fprintf(buffer, "Expected head block : C%d\n", basic.expHeadBlock)
- }
- if basic.expSnapshotBottom == 0 {
- fmt.Fprintf(buffer, "Expected snapshot disk : G\n")
- } else {
- fmt.Fprintf(buffer, "Expected snapshot disk : C%d\n", basic.expSnapshotBottom)
- }
- return buffer.String()
-}
-
-func (basic *snapshotTestBasic) teardown() {
- basic.db.Close()
- basic.genDb.Close()
- os.RemoveAll(basic.datadir)
- os.RemoveAll(basic.ancient)
-}
-
-// snapshotTest is a test case type for normal snapshot recovery.
-// It can be used for testing that restart Geth normally.
-type snapshotTest struct {
- snapshotTestBasic
-}
-
-func (snaptest *snapshotTest) test(t *testing.T) {
- // It's hard to follow the test case, visualize the input
- // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
- // fmt.Println(tt.dump())
- chain, blocks := snaptest.prepare(t)
-
- // Restart the chain normally
- chain.Stop()
- newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- defer newchain.Stop()
-
- snaptest.verify(t, newchain, blocks)
-}
-
-// crashSnapshotTest is a test case type for irregular snapshot recovery.
-// It can be used for testing that restart Geth after the crash.
-type crashSnapshotTest struct {
- snapshotTestBasic
-}
-
-func (snaptest *crashSnapshotTest) test(t *testing.T) {
- // It's hard to follow the test case, visualize the input
- // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
- // fmt.Println(tt.dump())
- chain, blocks := snaptest.prepare(t)
-
- // Pull the plug on the database, simulating a hard crash
- db := chain.db
- db.Close()
- chain.stopWithoutSaving()
- chain.triedb.Close()
-
- // Start a new blockchain back up and see where the repair leads us
- newdb, err := rawdb.Open(rawdb.OpenOptions{
- Directory: snaptest.datadir,
- AncientsDirectory: snaptest.ancient,
- Ephemeral: true,
- })
- if err != nil {
- t.Fatalf("Failed to reopen persistent database: %v", err)
- }
- defer newdb.Close()
-
- // The interesting thing is: instead of starting the blockchain after
- // the crash, we do restart twice here: one after the crash and one
- // after the normal stop. It's used to ensure the broken snapshot
- // can be detected all the time.
- newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- newchain.Stop()
-
- newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- defer newchain.Stop()
-
- snaptest.verify(t, newchain, blocks)
-}
-
-// gappedSnapshotTest is a test type used to test this scenario:
-// - have a complete snapshot
-// - restart without enabling the snapshot
-// - insert a few blocks
-// - restart with enabling the snapshot again
-type gappedSnapshotTest struct {
- snapshotTestBasic
- gapped int // Number of blocks to insert without enabling snapshot
-}
-
-func (snaptest *gappedSnapshotTest) test(t *testing.T) {
- // It's hard to follow the test case, visualize the input
- // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
- // fmt.Println(tt.dump())
- chain, blocks := snaptest.prepare(t)
-
- // Insert blocks without enabling snapshot if gapping is required.
- chain.Stop()
- gappedBlocks, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.gapped, func(i int, b *BlockGen) {})
-
- // Insert a few more blocks without enabling snapshot
- var cacheConfig = &CacheConfig{
- TrieCleanLimit: 256,
- TrieDirtyLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
- TriesInMemory: 128,
- SnapshotLimit: 0,
- StateScheme: snaptest.scheme,
- }
- newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- newchain.InsertChain(gappedBlocks)
- newchain.Stop()
-
- // Restart the chain with enabling the snapshot
- newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- defer newchain.Stop()
-
- snaptest.verify(t, newchain, blocks)
-}
-
-// setHeadSnapshotTest is the test type used to test this scenario:
-// - have a complete snapshot
-// - set the head to a lower point
-// - restart
-type setHeadSnapshotTest struct {
- snapshotTestBasic
- setHead uint64 // Block number to set head back to
-}
-
-func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
- // It's hard to follow the test case, visualize the input
- // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
- // fmt.Println(tt.dump())
- chain, blocks := snaptest.prepare(t)
-
- // Rewind the chain if setHead operation is required.
- chain.SetHead(snaptest.setHead)
- chain.Stop()
-
- newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- defer newchain.Stop()
-
- snaptest.verify(t, newchain, blocks)
-}
-
-// wipeCrashSnapshotTest is the test type used to test this scenario:
-// - have a complete snapshot
-// - restart, insert more blocks without enabling the snapshot
-// - restart again with enabling the snapshot
-// - crash
-type wipeCrashSnapshotTest struct {
- snapshotTestBasic
- newBlocks int
-}
-
-func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
- // It's hard to follow the test case, visualize the input
- // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
- // fmt.Println(tt.dump())
- chain, blocks := snaptest.prepare(t)
-
- // Firstly, stop the chain properly, with all snapshot journal
- // and state committed.
- chain.Stop()
-
- config := &CacheConfig{
- TrieCleanLimit: 256,
- TrieDirtyLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
- SnapshotLimit: 0,
- TriesInMemory: 128,
- StateScheme: snaptest.scheme,
- }
- newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- newBlocks, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.newBlocks, func(i int, b *BlockGen) {})
- newchain.InsertChain(newBlocks)
- newchain.Stop()
-
- // Restart the chain, the wiper should start working
- config = &CacheConfig{
- TrieCleanLimit: 256,
- TrieDirtyLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
- SnapshotLimit: 256,
- SnapshotWait: false, // Don't wait rebuild
- TriesInMemory: 128,
- StateScheme: snaptest.scheme,
- }
- tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
-
- // Simulate the blockchain crash.
- tmp.triedb.Close()
- tmp.stopWithoutSaving()
-
- newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- snaptest.verify(t, newchain, blocks)
- newchain.Stop()
-}
-
-// Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot
-// journal will be persisted correctly. In this case no snapshot recovery is
-// required.
-func TestRestartWithNewSnapshot(t *testing.T) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Commit: G
- // Snapshot: G
- //
- // SetHead(0)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : C8
- // Expected snapshot disk : G
- for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
- test := &snapshotTest{
- snapshotTestBasic{
- scheme: scheme,
- chainBlocks: 8,
- snapshotBlock: 0,
- commitBlock: 0,
- expCanonicalBlocks: 8,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 8,
- expSnapshotBottom: 0, // Initial disk layer built from genesis
- },
- }
- test.test(t)
- test.teardown()
- }
-}
-
-// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
-// chain head should be rewound to the point with available state. And also the
-// new head should must be lower than disk layer. But there is no committed point
-// so the chain should be rewound to genesis and the disk layer should be left
-// for recovery.
-func TestNoCommitCrashWithNewSnapshot(t *testing.T) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Commit: G
- // Snapshot: G, C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : G
- // Expected snapshot disk : C4
- // TODO
- //for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
- for _, scheme := range []string{rawdb.HashScheme} {
- test := &crashSnapshotTest{
- snapshotTestBasic{
- scheme: scheme,
- chainBlocks: 8,
- snapshotBlock: 4,
- commitBlock: 0,
- expCanonicalBlocks: 8,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 0,
- expSnapshotBottom: 4, // Last committed disk layer, wait recovery
- },
- }
- test.test(t)
- test.teardown()
- }
-}
-
-// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
-// chain head should be rewound to the point with available state. And also the
-// new head should must be lower than disk layer. But there is only a low committed
-// point so the chain should be rewound to committed point and the disk layer
-// should be left for recovery.
-func TestLowCommitCrashWithNewSnapshot(t *testing.T) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Commit: G, C2
- // Snapshot: G, C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : C2
- // Expected snapshot disk : C4
- // TODO
- //for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
- for _, scheme := range []string{rawdb.HashScheme} {
- test := &crashSnapshotTest{
- snapshotTestBasic{
- scheme: scheme,
- chainBlocks: 8,
- snapshotBlock: 4,
- commitBlock: 2,
- expCanonicalBlocks: 8,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 2,
- expSnapshotBottom: 4, // Last committed disk layer, wait recovery
- },
- }
- test.test(t)
- test.teardown()
- }
-}
-
-// Tests a Geth was crashed and restarts with a broken snapshot. In this case
-// the chain head should be rewound to the point with available state. And also
-// the new head should must be lower than disk layer. But there is only a high
-// committed point so the chain should be rewound to genesis and the disk layer
-// should be left for recovery.
-func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Commit: G, C6
- // Snapshot: G, C4
- //
- // CRASH
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8
- //
- // Expected head header : C8
- // Expected head fast block: C8
- // Expected head block : G
- // Expected snapshot disk : C4
- for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
- expHead := uint64(0)
- if scheme == rawdb.PathScheme {
- expHead = uint64(4)
- }
- test := &crashSnapshotTest{
- snapshotTestBasic{
- scheme: scheme,
- chainBlocks: 8,
- snapshotBlock: 4,
- commitBlock: 6,
- expCanonicalBlocks: 8,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: expHead,
- expSnapshotBottom: 4, // Last committed disk layer, wait recovery
- },
- }
- test.test(t)
- test.teardown()
- }
-}
-
-// Tests a Geth was running with snapshot enabled. Then restarts without
-// enabling snapshot and after that re-enable the snapshot again. In this
-// case the snapshot should be rebuilt with latest chain head.
-func TestGappedNewSnapshot(t *testing.T) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Commit: G
- // Snapshot: G
- //
- // SetHead(0)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
- //
- // Expected head header : C10
- // Expected head fast block: C10
- // Expected head block : C10
- // Expected snapshot disk : C10
- for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
- test := &gappedSnapshotTest{
- snapshotTestBasic: snapshotTestBasic{
- scheme: scheme,
- chainBlocks: 8,
- snapshotBlock: 0,
- commitBlock: 0,
- expCanonicalBlocks: 10,
- expHeadHeader: 10,
- expHeadFastBlock: 10,
- expHeadBlock: 10,
- expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD
- },
- gapped: 2,
- }
- test.test(t)
- test.teardown()
- }
-}
-
-// Tests the Geth was running with snapshot enabled and resetHead is applied.
-// In this case the head is rewound to the target(with state available). After
-// that the chain is restarted and the original disk layer is kept.
-func TestSetHeadWithNewSnapshot(t *testing.T) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Commit: G
- // Snapshot: G
- //
- // SetHead(4)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4
- //
- // Expected head header : C4
- // Expected head fast block: C4
- // Expected head block : C4
- // Expected snapshot disk : G
- for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
- test := &setHeadSnapshotTest{
- snapshotTestBasic: snapshotTestBasic{
- scheme: scheme,
- chainBlocks: 8,
- snapshotBlock: 0,
- commitBlock: 0,
- expCanonicalBlocks: 4,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- expSnapshotBottom: 0, // The initial disk layer is built from the genesis
- },
- setHead: 4,
- }
- test.test(t)
- test.teardown()
- }
-}
-
-// Tests the Geth was running with a complete snapshot and then imports a few
-// more new blocks on top without enabling the snapshot. After the restart,
-// crash happens. Check everything is ok after the restart.
-func TestRecoverSnapshotFromWipingCrash(t *testing.T) {
- // Chain:
- // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
- //
- // Commit: G
- // Snapshot: G
- //
- // SetHead(0)
- //
- // ------------------------------
- //
- // Expected in leveldb:
- // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
- //
- // Expected head header : C10
- // Expected head fast block: C10
- // Expected head block : C8
- // Expected snapshot disk : C10
- for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
- test := &wipeCrashSnapshotTest{
- snapshotTestBasic: snapshotTestBasic{
- scheme: scheme,
- chainBlocks: 8,
- snapshotBlock: 4,
- commitBlock: 0,
- expCanonicalBlocks: 10,
- expHeadHeader: 10,
- expHeadFastBlock: 10,
- expHeadBlock: 10,
- expSnapshotBottom: 10,
- },
- newBlocks: 2,
- }
- test.test(t)
- test.teardown()
- }
-}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
deleted file mode 100644
index 3917117b91..0000000000
--- a/core/blockchain_test.go
+++ /dev/null
@@ -1,4566 +0,0 @@
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "crypto/ecdsa"
- "errors"
- "fmt"
- "math/big"
- "math/rand"
- "os"
- "sync"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
- "github.com/ethereum/go-ethereum/crypto/kzg4844"
- "github.com/stretchr/testify/require"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/beacon"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/eth/tracers/logger"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/holiman/uint256"
-)
-
-// So we can deterministically seed different blockchains
-var (
- canonicalSeed = 1
- forkSeed1 = 2
- forkSeed2 = 3
-
- TestTriesInMemory = 128
-)
-
-// newCanonical creates a chain database, and injects a deterministic canonical
-// chain. Depending on the full flag, it creates either a full block chain or a
-// header only chain. The database and genesis specification for block generation
-// are also returned in case more test blocks are needed later.
-func newCanonical(engine consensus.Engine, n int, full bool, scheme string, pipeline bool) (ethdb.Database, *Genesis, *BlockChain, error) {
- var (
- genesis = &Genesis{
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: params.AllEthashProtocolChanges,
- }
- )
-
- // Initialize a fresh chain with only a genesis block
- var ops []BlockChainOption
- if pipeline {
- ops = append(ops, EnablePipelineCommit)
- }
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil, ops...)
- // Create and inject the requested chain
- if n == 0 {
- return rawdb.NewMemoryDatabase(), genesis, blockchain, nil
- }
- if full {
- // Full block-chain requested
- genDb, blocks := makeBlockChainWithGenesis(genesis, n, engine, canonicalSeed)
- _, err := blockchain.InsertChain(blocks)
- return genDb, genesis, blockchain, err
- }
- // Header-only chain requested
- genDb, headers := makeHeaderChainWithGenesis(genesis, n, engine, canonicalSeed)
- _, err := blockchain.InsertHeaderChain(headers)
- return genDb, genesis, blockchain, err
-}
-
-func newGwei(n int64) *big.Int {
- return new(big.Int).Mul(big.NewInt(n), big.NewInt(params.GWei))
-}
-
-// Test fork of length N starting from block i
-func testInvalidStateRootBlockImport(t *testing.T, blockchain *BlockChain, i, n int, pipeline bool) {
- // Copy old chain up to #i into a new db
- db, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline)
- if err != nil {
- t.Fatal("could not make new canonical in testFork", err)
- }
- defer blockchain2.Stop()
-
- // Assert the chains have the same header/block at #i
- hash1 := blockchain.GetBlockByNumber(uint64(i)).Hash()
- hash2 := blockchain2.GetBlockByNumber(uint64(i)).Hash()
- if hash1 != hash2 {
- t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
- }
- // Extend the newly created chain
- blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed1)
- for idx, block := range blockChainB {
- block.SetRoot(common.Hash{0: byte(forkSeed1), 19: byte(idx)})
- }
- previousBlock := blockchain.CurrentBlock()
- // Sanity check that the forked chain can be imported into the original
- if _, err := blockchain.InsertChain(blockChainB); err == nil {
- t.Fatalf("failed to report insert error")
- }
-
- time.Sleep(2 * rewindBadBlockInterval)
- latestBlock := blockchain.CurrentBlock()
- if latestBlock.Hash() != previousBlock.Hash() || latestBlock.Number.Uint64() != previousBlock.Number.Uint64() {
- t.Fatalf("rewind do not take effect")
- }
- db, _, blockchain3, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline)
- if err != nil {
- t.Fatal("could not make new canonical in testFork", err)
- }
- defer blockchain3.Stop()
-
- blockChainC := makeBlockChain(blockchain3.chainConfig, blockchain3.GetBlockByHash(blockchain3.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed2)
-
- if _, err := blockchain.InsertChain(blockChainC); err != nil {
- t.Fatalf("failed to insert forking chain: %v", err)
- }
-}
-
-// Test fork of length N starting from block i
-func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string, pipeline bool) {
- // Copy old chain up to #i into a new db
- genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, pipeline)
- if err != nil {
- t.Fatal("could not make new canonical in testFork", err)
- }
- defer blockchain2.Stop()
-
- // Assert the chains have the same header/block at #i
- var hash1, hash2 common.Hash
- if full {
- hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
- hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
- } else {
- hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
- hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
- }
- if hash1 != hash2 {
- t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
- }
- // Extend the newly created chain
- var (
- blockChainB []*types.Block
- headerChainB []*types.Header
- )
- if full {
- blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1)
- if _, err := blockchain2.InsertChain(blockChainB); err != nil {
- t.Fatalf("failed to insert forking chain: %v", err)
- }
- } else {
- headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1)
- if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
- t.Fatalf("failed to insert forking chain: %v", err)
- }
- }
- // Sanity check that the forked chain can be imported into the original
- var tdPre, tdPost *big.Int
-
- if full {
- cur := blockchain.CurrentBlock()
- tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
- if err := testBlockChainImport(blockChainB, pipeline, blockchain); err != nil {
- t.Fatalf("failed to import forked block chain: %v", err)
- }
- last := blockChainB[len(blockChainB)-1]
- tdPost = blockchain.GetTd(last.Hash(), last.NumberU64())
- } else {
- cur := blockchain.CurrentHeader()
- tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
- if err := testHeaderChainImport(headerChainB, blockchain); err != nil {
- t.Fatalf("failed to import forked header chain: %v", err)
- }
- last := headerChainB[len(headerChainB)-1]
- tdPost = blockchain.GetTd(last.Hash(), last.Number.Uint64())
- }
- // Compare the total difficulties of the chains
- comparator(tdPre, tdPost)
-}
-
-// testBlockChainImport tries to process a chain of blocks, writing them into
-// the database if successful.
-func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *BlockChain) error {
- for _, block := range chain {
- // Try and process the block
- err := blockchain.engine.VerifyHeader(blockchain, block.Header())
- if err == nil {
- err = blockchain.validator.ValidateBody(block)
- }
- if err != nil {
- if err == ErrKnownBlock {
- continue
- }
- return err
- }
- statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.stateCache, nil)
- if err != nil {
- return err
- }
- statedb.SetExpectedStateRoot(block.Root())
- if pipelineCommit {
- statedb.EnablePipeCommit()
- }
- statedb, receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
- if err != nil {
- blockchain.reportBlock(block, receipts, err)
- return err
- }
- err = blockchain.validator.ValidateState(block, statedb, receipts, usedGas)
- if err != nil {
- blockchain.reportBlock(block, receipts, err)
- return err
- }
-
- blockchain.chainmu.MustLock()
- rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
- rawdb.WriteBlock(blockchain.db, block)
- statedb.Finalise(false)
- statedb.AccountsIntermediateRoot()
- statedb.Commit(block.NumberU64(), nil)
- blockchain.chainmu.Unlock()
- }
- return nil
-}
-
-// testHeaderChainImport tries to process a chain of header, writing them into
-// the database if successful.
-func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
- for _, header := range chain {
- // Try and validate the header
- if err := blockchain.engine.VerifyHeader(blockchain, header); err != nil {
- return err
- }
- // Manually insert the header into the database, but don't reorganise (allows subsequent testing)
- blockchain.chainmu.MustLock()
- rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTd(header.ParentHash, header.Number.Uint64()-1)))
- rawdb.WriteHeader(blockchain.db, header)
- blockchain.chainmu.Unlock()
- }
- return nil
-}
-
-func TestBlockImportVerification(t *testing.T) {
- length := 5
-
- // Make first chain starting from genesis
- _, _, processor, err := newCanonical(ethash.NewFaker(), length, true, rawdb.HashScheme, true)
- if err != nil {
- t.Fatalf("failed to make new canonical chain: %v", err)
- }
- defer processor.Stop()
- // Start fork from current height
- processor, _ = EnablePipelineCommit(processor)
- testInvalidStateRootBlockImport(t, processor, length, 10, true)
-}
-func TestLastBlock(t *testing.T) {
- testLastBlock(t, rawdb.HashScheme)
- testLastBlock(t, rawdb.PathScheme)
-}
-
-func testLastBlock(t *testing.T, scheme string) {
- genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false)
- if err != nil {
- t.Fatalf("failed to create pristine chain: %v", err)
- }
- defer blockchain.Stop()
-
- blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 1, ethash.NewFullFaker(), genDb, 0)
- if _, err := blockchain.InsertChain(blocks); err != nil {
- t.Fatalf("Failed to insert block: %v", err)
- }
- if blocks[len(blocks)-1].Hash() != rawdb.ReadHeadBlockHash(blockchain.db) {
- t.Fatalf("Write/Get HeadBlockHash failed")
- }
-}
-
-// Test inserts the blocks/headers after the fork choice rule is changed.
-// The chain is reorged to whatever specified.
-func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool, scheme string) {
- // Copy old chain up to #i into a new db
- genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, false)
- if err != nil {
- t.Fatal("could not make new canonical in testFork", err)
- }
- defer blockchain2.Stop()
-
- // Assert the chains have the same header/block at #i
- var hash1, hash2 common.Hash
- if full {
- hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
- hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
- } else {
- hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
- hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
- }
- if hash1 != hash2 {
- t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
- }
-
- // Extend the newly created chain
- if full {
- blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1)
- if _, err := blockchain2.InsertChain(blockChainB); err != nil {
- t.Fatalf("failed to insert forking chain: %v", err)
- }
- if blockchain2.CurrentBlock().Number.Uint64() != blockChainB[len(blockChainB)-1].NumberU64() {
- t.Fatalf("failed to reorg to the given chain")
- }
- if blockchain2.CurrentBlock().Hash() != blockChainB[len(blockChainB)-1].Hash() {
- t.Fatalf("failed to reorg to the given chain")
- }
- } else {
- headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1)
- if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
- t.Fatalf("failed to insert forking chain: %v", err)
- }
- if blockchain2.CurrentHeader().Number.Uint64() != headerChainB[len(headerChainB)-1].Number.Uint64() {
- t.Fatalf("failed to reorg to the given chain")
- }
- if blockchain2.CurrentHeader().Hash() != headerChainB[len(headerChainB)-1].Hash() {
- t.Fatalf("failed to reorg to the given chain")
- }
- }
-}
-
-// Tests that given a starting canonical chain of a given size, it can be extended
-// with various length chains.
-func TestExtendCanonicalHeaders(t *testing.T) {
- testExtendCanonical(t, false, rawdb.HashScheme, false)
- testExtendCanonical(t, false, rawdb.PathScheme, false)
-}
-
-func TestExtendCanonicalBlocks(t *testing.T) {
- testExtendCanonical(t, true, rawdb.HashScheme, false)
- testExtendCanonical(t, true, rawdb.PathScheme, false)
- testExtendCanonical(t, true, rawdb.HashScheme, true)
-}
-
-func testExtendCanonical(t *testing.T, full bool, scheme string, pipeline bool) {
- length := 5
-
- // Make first chain starting from genesis
- _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
- if err != nil {
- t.Fatalf("failed to make new canonical chain: %v", err)
- }
- defer processor.Stop()
-
- // Define the difficulty comparator
- better := func(td1, td2 *big.Int) {
- if td2.Cmp(td1) <= 0 {
- t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
- }
- }
- // Start fork from current height
- testFork(t, processor, length, 1, full, better, scheme, pipeline)
- testFork(t, processor, length, 2, full, better, scheme, pipeline)
- testFork(t, processor, length, 5, full, better, scheme, pipeline)
- testFork(t, processor, length, 10, full, better, scheme, pipeline)
-}
-
-// Tests that given a starting canonical chain of a given size, it can be extended
-// with various length chains.
-func TestExtendCanonicalHeadersAfterMerge(t *testing.T) {
- testExtendCanonicalAfterMerge(t, false, rawdb.HashScheme)
- testExtendCanonicalAfterMerge(t, false, rawdb.PathScheme)
-}
-func TestExtendCanonicalBlocksAfterMerge(t *testing.T) {
- testExtendCanonicalAfterMerge(t, true, rawdb.HashScheme)
- testExtendCanonicalAfterMerge(t, true, rawdb.PathScheme)
-}
-
-func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) {
- length := 5
-
- // Make first chain starting from genesis
- _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
- if err != nil {
- t.Fatalf("failed to make new canonical chain: %v", err)
- }
- defer processor.Stop()
-
- testInsertAfterMerge(t, processor, length, 1, full, scheme)
- testInsertAfterMerge(t, processor, length, 10, full, scheme)
-}
-
-// Tests that given a starting canonical chain of a given size, creating shorter
-// forks do not take canonical ownership.
-func TestShorterForkHeaders(t *testing.T) {
- testShorterFork(t, false, rawdb.HashScheme, false)
- testShorterFork(t, false, rawdb.PathScheme, false)
-}
-func TestShorterForkBlocks(t *testing.T) {
- testShorterFork(t, true, rawdb.HashScheme, false)
- testShorterFork(t, true, rawdb.PathScheme, false)
- testShorterFork(t, true, rawdb.HashScheme, true)
-}
-
-func testShorterFork(t *testing.T, full bool, scheme string, pipeline bool) {
- length := 10
-
- // Make first chain starting from genesis
- _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
- if err != nil {
- t.Fatalf("failed to make new canonical chain: %v", err)
- }
- defer processor.Stop()
-
- // Define the difficulty comparator
- worse := func(td1, td2 *big.Int) {
- if td2.Cmp(td1) >= 0 {
- t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1)
- }
- }
- // Sum of numbers must be less than `length` for this to be a shorter fork
- testFork(t, processor, 0, 3, full, worse, scheme, pipeline)
- testFork(t, processor, 0, 7, full, worse, scheme, pipeline)
- testFork(t, processor, 1, 1, full, worse, scheme, pipeline)
- testFork(t, processor, 1, 7, full, worse, scheme, pipeline)
- testFork(t, processor, 5, 3, full, worse, scheme, pipeline)
- testFork(t, processor, 5, 4, full, worse, scheme, pipeline)
-}
-
-// Tests that given a starting canonical chain of a given size, creating shorter
-// forks do not take canonical ownership.
-func TestShorterForkHeadersAfterMerge(t *testing.T) {
- testShorterForkAfterMerge(t, false, rawdb.HashScheme)
- testShorterForkAfterMerge(t, false, rawdb.PathScheme)
-}
-func TestShorterForkBlocksAfterMerge(t *testing.T) {
- testShorterForkAfterMerge(t, true, rawdb.HashScheme)
- testShorterForkAfterMerge(t, true, rawdb.PathScheme)
-}
-
-func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) {
- length := 10
-
- // Make first chain starting from genesis
- _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
- if err != nil {
- t.Fatalf("failed to make new canonical chain: %v", err)
- }
- defer processor.Stop()
-
- testInsertAfterMerge(t, processor, 0, 3, full, scheme)
- testInsertAfterMerge(t, processor, 0, 7, full, scheme)
- testInsertAfterMerge(t, processor, 1, 1, full, scheme)
- testInsertAfterMerge(t, processor, 1, 7, full, scheme)
- testInsertAfterMerge(t, processor, 5, 3, full, scheme)
- testInsertAfterMerge(t, processor, 5, 4, full, scheme)
-}
-
-// Tests that given a starting canonical chain of a given size, creating longer
-// forks do take canonical ownership.
-func TestLongerForkHeaders(t *testing.T) {
- testLongerFork(t, false, rawdb.HashScheme, false)
- testLongerFork(t, false, rawdb.PathScheme, false)
-}
-func TestLongerForkBlocks(t *testing.T) {
- testLongerFork(t, true, rawdb.HashScheme, false)
- testLongerFork(t, true, rawdb.PathScheme, false)
- testLongerFork(t, true, rawdb.HashScheme, true)
-}
-
-func testLongerFork(t *testing.T, full bool, scheme string, pipeline bool) {
- length := 10
-
- // Make first chain starting from genesis
- _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
- if err != nil {
- t.Fatalf("failed to make new canonical chain: %v", err)
- }
- defer processor.Stop()
-
- testInsertAfterMerge(t, processor, 0, 11, full, scheme)
- testInsertAfterMerge(t, processor, 0, 15, full, scheme)
- testInsertAfterMerge(t, processor, 1, 10, full, scheme)
- testInsertAfterMerge(t, processor, 1, 12, full, scheme)
- testInsertAfterMerge(t, processor, 5, 6, full, scheme)
- testInsertAfterMerge(t, processor, 5, 8, full, scheme)
-}
-
-// Tests that given a starting canonical chain of a given size, creating longer
-// forks do take canonical ownership.
-func TestLongerForkHeadersAfterMerge(t *testing.T) {
- testLongerForkAfterMerge(t, false, rawdb.HashScheme)
- testLongerForkAfterMerge(t, false, rawdb.PathScheme)
-}
-func TestLongerForkBlocksAfterMerge(t *testing.T) {
- testLongerForkAfterMerge(t, true, rawdb.HashScheme)
- testLongerForkAfterMerge(t, true, rawdb.PathScheme)
-}
-
-func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) {
- length := 10
-
- // Make first chain starting from genesis
- _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
- if err != nil {
- t.Fatalf("failed to make new canonical chain: %v", err)
- }
- defer processor.Stop()
-
- testInsertAfterMerge(t, processor, 0, 11, full, scheme)
- testInsertAfterMerge(t, processor, 0, 15, full, scheme)
- testInsertAfterMerge(t, processor, 1, 10, full, scheme)
- testInsertAfterMerge(t, processor, 1, 12, full, scheme)
- testInsertAfterMerge(t, processor, 5, 6, full, scheme)
- testInsertAfterMerge(t, processor, 5, 8, full, scheme)
-}
-
-// Tests that given a starting canonical chain of a given size, creating equal
-// forks do take canonical ownership.
-func TestEqualForkHeaders(t *testing.T) {
- testEqualFork(t, false, rawdb.HashScheme, false)
- testEqualFork(t, false, rawdb.PathScheme, false)
-}
-func TestEqualForkBlocks(t *testing.T) {
- testEqualFork(t, true, rawdb.HashScheme, false)
- testEqualFork(t, true, rawdb.PathScheme, false)
- testEqualFork(t, true, rawdb.HashScheme, true)
-}
-
-func testEqualFork(t *testing.T, full bool, scheme string, pipeline bool) {
- length := 10
-
- // Make first chain starting from genesis
- _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
- if err != nil {
- t.Fatalf("failed to make new canonical chain: %v", err)
- }
- defer processor.Stop()
-
- // Define the difficulty comparator
- equal := func(td1, td2 *big.Int) {
- if td2.Cmp(td1) != 0 {
- t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1)
- }
- }
- // Sum of numbers must be equal to `length` for this to be an equal fork
- testFork(t, processor, 0, 10, full, equal, scheme, pipeline)
- testFork(t, processor, 1, 9, full, equal, scheme, pipeline)
- testFork(t, processor, 2, 8, full, equal, scheme, pipeline)
- testFork(t, processor, 5, 5, full, equal, scheme, pipeline)
- testFork(t, processor, 6, 4, full, equal, scheme, pipeline)
- testFork(t, processor, 9, 1, full, equal, scheme, pipeline)
-}
-
-// Tests that given a starting canonical chain of a given size, creating equal
-// forks do take canonical ownership.
-func TestEqualForkHeadersAfterMerge(t *testing.T) {
- testEqualForkAfterMerge(t, false, rawdb.HashScheme)
- testEqualForkAfterMerge(t, false, rawdb.PathScheme)
-}
-func TestEqualForkBlocksAfterMerge(t *testing.T) {
- testEqualForkAfterMerge(t, true, rawdb.HashScheme)
- testEqualForkAfterMerge(t, true, rawdb.PathScheme)
-}
-
-func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) {
- length := 10
-
- // Make first chain starting from genesis
- _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
- if err != nil {
- t.Fatalf("failed to make new canonical chain: %v", err)
- }
- defer processor.Stop()
-
- testInsertAfterMerge(t, processor, 0, 10, full, scheme)
- testInsertAfterMerge(t, processor, 1, 9, full, scheme)
- testInsertAfterMerge(t, processor, 2, 8, full, scheme)
- testInsertAfterMerge(t, processor, 5, 5, full, scheme)
- testInsertAfterMerge(t, processor, 6, 4, full, scheme)
- testInsertAfterMerge(t, processor, 9, 1, full, scheme)
-}
-
-// Tests that chains missing links do not get accepted by the processor.
-func TestBrokenHeaderChain(t *testing.T) {
- testBrokenChain(t, false, rawdb.HashScheme, false)
- testBrokenChain(t, false, rawdb.PathScheme, false)
-}
-func TestBrokenBlockChain(t *testing.T) {
- testBrokenChain(t, true, rawdb.HashScheme, false)
- testBrokenChain(t, true, rawdb.PathScheme, false)
- testBrokenChain(t, true, rawdb.HashScheme, true)
-}
-
-func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) {
- // Make chain starting from genesis
- genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme, pipeline)
- if err != nil {
- t.Fatalf("failed to make new canonical chain: %v", err)
- }
- defer blockchain.Stop()
-
- // Create a forked chain, and try to insert with a missing link
- if full {
- chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed1)[1:]
- if err := testBlockChainImport(chain, pipeline, blockchain); err == nil {
- t.Errorf("broken block chain not reported")
- }
- } else {
- chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed1)[1:]
- if err := testHeaderChainImport(chain, blockchain); err == nil {
- t.Errorf("broken header chain not reported")
- }
- }
-}
-
-// Tests that reorganising a long difficult chain after a short easy one
-// overwrites the canonical numbers and links in the database.
-func TestReorgLongHeaders(t *testing.T) {
- testReorgLong(t, false, rawdb.HashScheme, false)
- testReorgLong(t, false, rawdb.PathScheme, false)
-}
-func TestReorgLongBlocks(t *testing.T) {
- testReorgLong(t, true, rawdb.HashScheme, false)
- testReorgLong(t, true, rawdb.PathScheme, false)
- testReorgLong(t, true, rawdb.HashScheme, true)
-}
-
-func testReorgLong(t *testing.T, full bool, scheme string, pipeline bool) {
- testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme, pipeline)
-}
-
-// Tests that reorganising a short difficult chain after a long easy one
-// overwrites the canonical numbers and links in the database.
-func TestReorgShortHeaders(t *testing.T) {
- testReorgShort(t, false, rawdb.HashScheme, false)
- testReorgShort(t, false, rawdb.PathScheme, false)
-}
-func TestReorgShortBlocks(t *testing.T) {
- testReorgShort(t, true, rawdb.HashScheme, false)
- testReorgShort(t, true, rawdb.PathScheme, false)
- testReorgShort(t, true, rawdb.HashScheme, true)
-}
-
-func testReorgShort(t *testing.T, full bool, scheme string, pipeline bool) {
- // Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
- // we need a fairly long chain of blocks with different difficulties for a short
- // one to become heavier than a long one. The 96 is an empirical value.
- easy := make([]int64, 96)
- for i := 0; i < len(easy); i++ {
- easy[i] = 60
- }
- diff := make([]int64, len(easy)-1)
- for i := 0; i < len(diff); i++ {
- diff[i] = -9
- }
- testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme, pipeline)
-}
-
-func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string, pipeline bool) {
- // Create a pristine chain and database
- genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
- if err != nil {
- t.Fatalf("failed to create pristine chain: %v", err)
- }
- defer blockchain.Stop()
-
- // Insert an easy and a difficult chain afterwards
- easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), genDb, len(first), func(i int, b *BlockGen) {
- b.OffsetTime(first[i])
- })
- diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), genDb, len(second), func(i int, b *BlockGen) {
- b.OffsetTime(second[i])
- })
- if full {
- if _, err := blockchain.InsertChain(easyBlocks); err != nil {
- t.Fatalf("failed to insert easy chain: %v", err)
- }
- if _, err := blockchain.InsertChain(diffBlocks); err != nil {
- t.Fatalf("failed to insert difficult chain: %v", err)
- }
- } else {
- easyHeaders := make([]*types.Header, len(easyBlocks))
- for i, block := range easyBlocks {
- easyHeaders[i] = block.Header()
- }
- diffHeaders := make([]*types.Header, len(diffBlocks))
- for i, block := range diffBlocks {
- diffHeaders[i] = block.Header()
- }
- if _, err := blockchain.InsertHeaderChain(easyHeaders); err != nil {
- t.Fatalf("failed to insert easy chain: %v", err)
- }
- if _, err := blockchain.InsertHeaderChain(diffHeaders); err != nil {
- t.Fatalf("failed to insert difficult chain: %v", err)
- }
- }
- // Check that the chain is valid number and link wise
- if full {
- prev := blockchain.CurrentBlock()
- for block := blockchain.GetBlockByNumber(blockchain.CurrentBlock().Number.Uint64() - 1); block.NumberU64() != 0; prev, block = block.Header(), blockchain.GetBlockByNumber(block.NumberU64()-1) {
- if prev.ParentHash != block.Hash() {
- t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash, block.Hash())
- }
- }
- } else {
- prev := blockchain.CurrentHeader()
- for header := blockchain.GetHeaderByNumber(blockchain.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, blockchain.GetHeaderByNumber(header.Number.Uint64()-1) {
- if prev.ParentHash != header.Hash() {
- t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
- }
- }
- }
- // Make sure the chain total difficulty is the correct one
- want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td))
- if full {
- cur := blockchain.CurrentBlock()
- if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
- t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
- }
- } else {
- cur := blockchain.CurrentHeader()
- if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
- t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
- }
- }
-}
-
-// Tests that the insertion functions detect banned hashes.
-func TestBadHeaderHashes(t *testing.T) {
- testBadHashes(t, false, rawdb.HashScheme, false)
- testBadHashes(t, false, rawdb.PathScheme, false)
-}
-
-func TestBadBlockHashes(t *testing.T) {
- testBadHashes(t, true, rawdb.HashScheme, false)
- testBadHashes(t, true, rawdb.HashScheme, true)
- testBadHashes(t, true, rawdb.PathScheme, false)
-}
-
-func testBadHashes(t *testing.T, full bool, scheme string, pipeline bool) {
- // Create a pristine chain and database
- genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
- if err != nil {
- t.Fatalf("failed to create pristine chain: %v", err)
- }
- defer blockchain.Stop()
-
- // Create a chain, ban a hash and try to import
- if full {
- blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 3, ethash.NewFaker(), genDb, 10)
-
- BadHashes[blocks[2].Header().Hash()] = true
- defer func() { delete(BadHashes, blocks[2].Header().Hash()) }()
-
- _, err = blockchain.InsertChain(blocks)
- } else {
- headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 3, ethash.NewFaker(), genDb, 10)
-
- BadHashes[headers[2].Hash()] = true
- defer func() { delete(BadHashes, headers[2].Hash()) }()
-
- _, err = blockchain.InsertHeaderChain(headers)
- }
- if !errors.Is(err, ErrBannedHash) {
- t.Errorf("error mismatch: have: %v, want: %v", err, ErrBannedHash)
- }
-}
-
-// Tests that bad hashes are detected on boot, and the chain rolled back to a
-// good state prior to the bad hash.
-func TestReorgBadHeaderHashes(t *testing.T) {
- testReorgBadHashes(t, false, rawdb.HashScheme, false)
- testReorgBadHashes(t, false, rawdb.PathScheme, false)
-}
-func TestReorgBadBlockHashes(t *testing.T) {
- testReorgBadHashes(t, true, rawdb.HashScheme, false)
- testReorgBadHashes(t, true, rawdb.HashScheme, true)
- testReorgBadHashes(t, true, rawdb.PathScheme, false)
-}
-
-func testReorgBadHashes(t *testing.T, full bool, scheme string, pipeline bool) {
- // Create a pristine chain and database
- genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
- if err != nil {
- t.Fatalf("failed to create pristine chain: %v", err)
- }
- // Create a chain, import and ban afterwards
- headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 4, ethash.NewFaker(), genDb, 10)
- blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 4, ethash.NewFaker(), genDb, 10)
-
- if full {
- if _, err = blockchain.InsertChain(blocks); err != nil {
- t.Errorf("failed to import blocks: %v", err)
- }
- if blockchain.CurrentBlock().Hash() != blocks[3].Hash() {
- t.Errorf("last block hash mismatch: have: %x, want %x", blockchain.CurrentBlock().Hash(), blocks[3].Header().Hash())
- }
- BadHashes[blocks[3].Header().Hash()] = true
- defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
- } else {
- if _, err = blockchain.InsertHeaderChain(headers); err != nil {
- t.Errorf("failed to import headers: %v", err)
- }
- if blockchain.CurrentHeader().Hash() != headers[3].Hash() {
- t.Errorf("last header hash mismatch: have: %x, want %x", blockchain.CurrentHeader().Hash(), headers[3].Hash())
- }
- BadHashes[headers[3].Hash()] = true
- defer func() { delete(BadHashes, headers[3].Hash()) }()
- }
- blockchain.Stop()
-
- // Create a new BlockChain and check that it rolled back the state.
- ncm, err := NewBlockChain(blockchain.db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create new chain manager: %v", err)
- }
- if full {
- if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
- t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
- }
- if blocks[2].Header().GasLimit != ncm.GasLimit() {
- t.Errorf("last block gasLimit mismatch: have: %d, want %d", ncm.GasLimit(), blocks[2].Header().GasLimit)
- }
- } else {
- if ncm.CurrentHeader().Hash() != headers[2].Hash() {
- t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash())
- }
- }
- ncm.Stop()
-}
-
-// Tests chain insertions in the face of one entity containing an invalid nonce.
-func TestHeadersInsertNonceError(t *testing.T) {
- testInsertNonceError(t, false, rawdb.HashScheme, false)
- testInsertNonceError(t, false, rawdb.PathScheme, false)
-}
-func TestBlocksInsertNonceError(t *testing.T) {
- testInsertNonceError(t, true, rawdb.HashScheme, false)
- testInsertNonceError(t, true, rawdb.HashScheme, true)
- testInsertNonceError(t, true, rawdb.PathScheme, false)
-}
-
-func testInsertNonceError(t *testing.T, full bool, scheme string, pipeline bool) {
- doTest := func(i int) {
- // Create a pristine chain and database
- genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
- if err != nil {
- t.Fatalf("failed to create pristine chain: %v", err)
- }
- defer blockchain.Stop()
-
- // Create and insert a chain with a failing nonce
- var (
- failAt int
- failRes int
- failNum uint64
- )
- if full {
- blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), i, ethash.NewFaker(), genDb, 0)
-
- failAt = rand.Int() % len(blocks)
- failNum = blocks[failAt].NumberU64()
-
- blockchain.engine = ethash.NewFakeFailer(failNum)
- failRes, err = blockchain.InsertChain(blocks)
- } else {
- headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), i, ethash.NewFaker(), genDb, 0)
-
- failAt = rand.Int() % len(headers)
- failNum = headers[failAt].Number.Uint64()
-
- blockchain.engine = ethash.NewFakeFailer(failNum)
- blockchain.hc.engine = blockchain.engine
- failRes, err = blockchain.InsertHeaderChain(headers)
- }
- // Check that the returned error indicates the failure
- if failRes != failAt {
- t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt)
- }
- // Check that all blocks after the failing block have been inserted
- for j := 0; j < i-failAt; j++ {
- if full {
- if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
- t.Errorf("test %d: invalid block in chain: %v", i, block)
- }
- } else {
- if header := blockchain.GetHeaderByNumber(failNum + uint64(j)); header != nil {
- t.Errorf("test %d: invalid header in chain: %v", i, header)
- }
- }
- }
- }
- for i := 1; i < 25 && !t.Failed(); i++ {
- doTest(i)
- }
-}
-
-// Tests that fast importing a block chain produces the same chain data as the
-// classical full block processing.
-func TestFastVsFullChains(t *testing.T) {
- testFastVsFullChains(t, rawdb.HashScheme)
- testFastVsFullChains(t, rawdb.PathScheme)
-}
-
-func testFastVsFullChains(t *testing.T, scheme string) {
- // Configure and generate a sample block chain
- var (
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000000000)
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{address: {Balance: funds}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- signer = types.LatestSigner(gspec.Config)
- )
- _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 1024, func(i int, block *BlockGen) {
- block.SetCoinbase(common.Address{0x00})
-
- // If the block number is multiple of 3, send a few bonus transactions to the miner
- if i%3 == 2 {
- for j := 0; j < i%4+1; j++ {
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
- if err != nil {
- panic(err)
- }
- block.AddTx(tx)
- }
- }
- // If the block number is a multiple of 5, add an uncle to the block
- if i%5 == 4 {
- block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i))})
- }
- })
- // Import the chain as an archive node for the comparison baseline
- archiveDb := rawdb.NewMemoryDatabase()
- archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer archive.Stop()
-
- if n, err := archive.InsertChain(blocks); err != nil {
- t.Fatalf("failed to process block %d: %v", n, err)
- }
- // Fast import the chain as a non-archive node to test
- fastDb := rawdb.NewMemoryDatabase()
- fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer fast.Stop()
-
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- if n, err := fast.InsertHeaderChain(headers); err != nil {
- t.Fatalf("failed to insert header %d: %v", n, err)
- }
- if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
- t.Fatalf("failed to insert receipt %d: %v", n, err)
- }
- // Freezer style fast import the chain.
- ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
- if err != nil {
- t.Fatalf("failed to create temp freezer db: %v", err)
- }
- defer ancientDb.Close()
-
- ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer ancient.Stop()
-
- if n, err := ancient.InsertHeaderChain(headers); err != nil {
- t.Fatalf("failed to insert header %d: %v", n, err)
- }
- if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(len(blocks)/2)); err != nil {
- t.Fatalf("failed to insert receipt %d: %v", n, err)
- }
-
- // Iterate over all chain data components, and cross reference
- for i := 0; i < len(blocks); i++ {
- num, hash, time := blocks[i].NumberU64(), blocks[i].Hash(), blocks[i].Time()
-
- if ftd, atd := fast.GetTd(hash, num), archive.GetTd(hash, num); ftd.Cmp(atd) != 0 {
- t.Errorf("block #%d [%x]: td mismatch: fastdb %v, archivedb %v", num, hash, ftd, atd)
- }
- if antd, artd := ancient.GetTd(hash, num), archive.GetTd(hash, num); antd.Cmp(artd) != 0 {
- t.Errorf("block #%d [%x]: td mismatch: ancientdb %v, archivedb %v", num, hash, antd, artd)
- }
- if fheader, aheader := fast.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); fheader.Hash() != aheader.Hash() {
- t.Errorf("block #%d [%x]: header mismatch: fastdb %v, archivedb %v", num, hash, fheader, aheader)
- }
- if anheader, arheader := ancient.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); anheader.Hash() != arheader.Hash() {
- t.Errorf("block #%d [%x]: header mismatch: ancientdb %v, archivedb %v", num, hash, anheader, arheader)
- }
- if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() {
- t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock)
- } else if types.DeriveSha(fblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) || types.DeriveSha(anblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) {
- t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions())
- } else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) {
- t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles())
- }
-
- // Check receipts.
- freceipts := rawdb.ReadReceipts(fastDb, hash, num, time, fast.Config())
- anreceipts := rawdb.ReadReceipts(ancientDb, hash, num, time, fast.Config())
- areceipts := rawdb.ReadReceipts(archiveDb, hash, num, time, fast.Config())
- if types.DeriveSha(freceipts, trie.NewStackTrie(nil)) != types.DeriveSha(areceipts, trie.NewStackTrie(nil)) {
- t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts)
- }
-
- // Check that hash-to-number mappings are present in all databases.
- if m := rawdb.ReadHeaderNumber(fastDb, hash); m == nil || *m != num {
- t.Errorf("block #%d [%x]: wrong hash-to-number mapping in fastdb: %v", num, hash, m)
- }
- if m := rawdb.ReadHeaderNumber(ancientDb, hash); m == nil || *m != num {
- t.Errorf("block #%d [%x]: wrong hash-to-number mapping in ancientdb: %v", num, hash, m)
- }
- if m := rawdb.ReadHeaderNumber(archiveDb, hash); m == nil || *m != num {
- t.Errorf("block #%d [%x]: wrong hash-to-number mapping in archivedb: %v", num, hash, m)
- }
- }
-
- // Check that the canonical chains are the same between the databases
- for i := 0; i < len(blocks)+1; i++ {
- if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
- t.Errorf("block #%d: canonical hash mismatch: fastdb %v, archivedb %v", i, fhash, ahash)
- }
- if anhash, arhash := rawdb.ReadCanonicalHash(ancientDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); anhash != arhash {
- t.Errorf("block #%d: canonical hash mismatch: ancientdb %v, archivedb %v", i, anhash, arhash)
- }
- }
-}
-
-// Tests that various import methods move the chain head pointers to the correct
-// positions.
-func TestLightVsFastVsFullChainHeads(t *testing.T) {
- testLightVsFastVsFullChainHeads(t, rawdb.HashScheme)
- testLightVsFastVsFullChainHeads(t, rawdb.PathScheme)
-}
-
-func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
- // Configure and generate a sample block chain
- var (
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000000000)
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{address: {Balance: funds}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- )
- height := uint64(1024)
- _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
-
- // makeDb creates a db instance for testing.
- makeDb := func() ethdb.Database {
- db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
- if err != nil {
- t.Fatalf("failed to create temp freezer db: %v", err)
- }
- return db
- }
- // Configure a subchain to roll back
- remove := blocks[height/2].NumberU64()
-
- // Create a small assertion method to check the three heads
- assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
- t.Helper()
-
- if num := chain.CurrentBlock().Number.Uint64(); num != block {
- t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
- }
- if num := chain.CurrentSnapBlock().Number.Uint64(); num != fast {
- t.Errorf("%s head snap-block mismatch: have #%v, want #%v", kind, num, fast)
- }
- if num := chain.CurrentHeader().Number.Uint64(); num != header {
- t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header)
- }
- }
- // Import the chain as an archive node and ensure all pointers are updated
- archiveDb := makeDb()
- defer archiveDb.Close()
-
- archiveCaching := *defaultCacheConfig
- archiveCaching.TrieDirtyDisabled = true
- archiveCaching.StateScheme = scheme
-
- archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- if n, err := archive.InsertChain(blocks); err != nil {
- t.Fatalf("failed to process block %d: %v", n, err)
- }
- defer archive.Stop()
-
- assert(t, "archive", archive, height, height, height)
- archive.SetHead(remove - 1)
- assert(t, "archive", archive, height/2, height/2, height/2)
-
- // Import the chain as a non-archive node and ensure all pointers are updated
- fastDb := makeDb()
- defer fastDb.Close()
- fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer fast.Stop()
-
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- if n, err := fast.InsertHeaderChain(headers); err != nil {
- t.Fatalf("failed to insert header %d: %v", n, err)
- }
- if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
- t.Fatalf("failed to insert receipt %d: %v", n, err)
- }
- assert(t, "fast", fast, height, height, 0)
- fast.SetHead(remove - 1)
- assert(t, "fast", fast, height/2, height/2, 0)
-
- // Import the chain as a ancient-first node and ensure all pointers are updated
- ancientDb := makeDb()
- defer ancientDb.Close()
- ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer ancient.Stop()
-
- if n, err := ancient.InsertHeaderChain(headers); err != nil {
- t.Fatalf("failed to insert header %d: %v", n, err)
- }
- if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
- t.Fatalf("failed to insert receipt %d: %v", n, err)
- }
- assert(t, "ancient", ancient, height, height, 0)
- ancient.SetHead(remove - 1)
- assert(t, "ancient", ancient, 0, 0, 0)
-
- if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 {
- t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
- }
- // Import the chain as a light node and ensure all pointers are updated
- lightDb := makeDb()
- defer lightDb.Close()
- light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- if n, err := light.InsertHeaderChain(headers); err != nil {
- t.Fatalf("failed to insert header %d: %v", n, err)
- }
- defer light.Stop()
-
- assert(t, "light", light, height, 0, 0)
- light.SetHead(remove - 1)
- assert(t, "light", light, height/2, 0, 0)
-}
-
-// Tests that chain reorganisations handle transaction removals and reinsertions.
-func TestChainTxReorgs(t *testing.T) {
- testChainTxReorgs(t, rawdb.HashScheme)
- testChainTxReorgs(t, rawdb.PathScheme)
-}
-
-func testChainTxReorgs(t *testing.T, scheme string) {
- var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
- key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- addr2 = crypto.PubkeyToAddress(key2.PublicKey)
- addr3 = crypto.PubkeyToAddress(key3.PublicKey)
- gspec = &Genesis{
- Config: params.TestChainConfig,
- GasLimit: 3141592,
- Alloc: types.GenesisAlloc{
- addr1: {Balance: big.NewInt(1000000000000000)},
- addr2: {Balance: big.NewInt(1000000000000000)},
- addr3: {Balance: big.NewInt(1000000000000000)},
- },
- }
- signer = types.LatestSigner(gspec.Config)
- )
-
- // Create two transactions shared between the chains:
- // - postponed: transaction included at a later block in the forked chain
- // - swapped: transaction included at the same block number in the forked chain
- postponed, _ := types.SignTx(types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key1)
- swapped, _ := types.SignTx(types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key1)
-
- // Create two transactions that will be dropped by the forked chain:
- // - pastDrop: transaction dropped retroactively from a past block
- // - freshDrop: transaction dropped exactly at the block where the reorg is detected
- var pastDrop, freshDrop *types.Transaction
-
- // Create three transactions that will be added in the forked chain:
- // - pastAdd: transaction added before the reorganization is detected
- // - freshAdd: transaction added at the exact block the reorg is detected
- // - futureAdd: transaction added after the reorg has already finished
- var pastAdd, freshAdd, futureAdd *types.Transaction
-
- _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {
- switch i {
- case 0:
- pastDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key2)
-
- gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point
- gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork
-
- case 2:
- freshDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key2)
-
- gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point
- gen.AddTx(swapped) // This transaction will be swapped out at the exact height
-
- gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain
- }
- })
- // Import the chain. This runs all block validation rules.
- db := rawdb.NewMemoryDatabase()
- blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- if i, err := blockchain.InsertChain(chain); err != nil {
- t.Fatalf("failed to insert original chain[%d]: %v", i, err)
- }
- defer blockchain.Stop()
-
- // overwrite the old chain
- _, chain, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 5, func(i int, gen *BlockGen) {
- switch i {
- case 0:
- pastAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
- gen.AddTx(pastAdd) // This transaction needs to be injected during reorg
-
- case 2:
- gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain
- gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain
-
- freshAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
- gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time
-
- case 3:
- futureAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
- gen.AddTx(futureAdd) // This transaction will be added after a full reorg
- }
- })
- if _, err := blockchain.InsertChain(chain); err != nil {
- t.Fatalf("failed to insert forked chain: %v", err)
- }
-
- // removed tx
- for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
- if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn != nil {
- t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn)
- }
- if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt != nil {
- t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt)
- }
- }
- // added tx
- for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
- if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
- t.Errorf("add %d: expected tx to be found", i)
- }
- if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil {
- t.Errorf("add %d: expected receipt to be found", i)
- }
- }
- // shared tx
- for i, tx := range (types.Transactions{postponed, swapped}) {
- if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
- t.Errorf("share %d: expected tx to be found", i)
- }
- if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil {
- t.Errorf("share %d: expected receipt to be found", i)
- }
- }
-}
-
-func TestLogReorgs(t *testing.T) {
- testLogReorgs(t, rawdb.HashScheme)
- testLogReorgs(t, rawdb.PathScheme)
-}
-
-func testLogReorgs(t *testing.T, scheme string) {
- var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
-
- // this code generates a log
- code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
- signer = types.LatestSigner(gspec.Config)
- )
-
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer blockchain.Stop()
-
- rmLogsCh := make(chan RemovedLogsEvent)
- blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
- _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
- if i == 1 {
- tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, code), signer, key1)
- if err != nil {
- t.Fatalf("failed to create tx: %v", err)
- }
- gen.AddTx(tx)
- }
- })
- if _, err := blockchain.InsertChain(chain); err != nil {
- t.Fatalf("failed to insert chain: %v", err)
- }
-
- _, chain, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
- done := make(chan struct{})
- go func() {
- ev := <-rmLogsCh
- if len(ev.Logs) == 0 {
- t.Error("expected logs")
- }
- close(done)
- }()
- if _, err := blockchain.InsertChain(chain); err != nil {
- t.Fatalf("failed to insert forked chain: %v", err)
- }
- timeout := time.NewTimer(1 * time.Second)
- defer timeout.Stop()
- select {
- case <-done:
- case <-timeout.C:
- t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
- }
-}
-
-// This EVM code generates a log when the contract is created.
-var logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
-
-// This test checks that log events and RemovedLogsEvent are sent
-// when the chain reorganizes.
-func TestLogRebirth(t *testing.T) {
- testLogRebirth(t, rawdb.HashScheme)
- testLogRebirth(t, rawdb.PathScheme)
-}
-
-func testLogRebirth(t *testing.T, scheme string) {
- var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
- signer = types.LatestSigner(gspec.Config)
- engine = ethash.NewFaker()
- blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
- )
- defer blockchain.Stop()
-
- // The event channels.
- newLogCh := make(chan []*types.Log, 10)
- rmLogsCh := make(chan RemovedLogsEvent, 10)
- blockchain.SubscribeLogsEvent(newLogCh)
- blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
-
- // This chain contains 10 logs.
- genDb, chain, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) {
- if i < 2 {
- for ii := 0; ii < 5; ii++ {
- tx, err := types.SignNewTx(key1, signer, &types.LegacyTx{
- Nonce: gen.TxNonce(addr1),
- GasPrice: gen.header.BaseFee,
- Gas: uint64(1000001),
- Data: logCode,
- })
- if err != nil {
- t.Fatalf("failed to create tx: %v", err)
- }
- gen.AddTx(tx)
- }
- }
- })
- if _, err := blockchain.InsertChain(chain); err != nil {
- t.Fatalf("failed to insert chain: %v", err)
- }
- checkLogEvents(t, newLogCh, rmLogsCh, 10, 0)
-
- // Generate long reorg chain containing more logs. Inserting the
- // chain removes one log and adds four.
- _, forkChain, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) {
- if i == 2 {
- // The last (head) block is not part of the reorg-chain, we can ignore it
- return
- }
- for ii := 0; ii < 5; ii++ {
- tx, err := types.SignNewTx(key1, signer, &types.LegacyTx{
- Nonce: gen.TxNonce(addr1),
- GasPrice: gen.header.BaseFee,
- Gas: uint64(1000000),
- Data: logCode,
- })
- if err != nil {
- t.Fatalf("failed to create tx: %v", err)
- }
- gen.AddTx(tx)
- }
- gen.OffsetTime(-9) // higher block difficulty
- })
- if _, err := blockchain.InsertChain(forkChain); err != nil {
- t.Fatalf("failed to insert forked chain: %v", err)
- }
- checkLogEvents(t, newLogCh, rmLogsCh, 10, 10)
-
- // This chain segment is rooted in the original chain, but doesn't contain any logs.
- // When inserting it, the canonical chain switches away from forkChain and re-emits
- // the log event for the old chain, as well as a RemovedLogsEvent for forkChain.
- newBlocks, _ := GenerateChain(gspec.Config, chain[len(chain)-1], engine, genDb, 1, func(i int, gen *BlockGen) {})
- if _, err := blockchain.InsertChain(newBlocks); err != nil {
- t.Fatalf("failed to insert forked chain: %v", err)
- }
- checkLogEvents(t, newLogCh, rmLogsCh, 10, 10)
-}
-
-// This test is a variation of TestLogRebirth. It verifies that log events are emitted
-// when a side chain containing log events overtakes the canonical chain.
-func TestSideLogRebirth(t *testing.T) {
- testSideLogRebirth(t, rawdb.HashScheme)
- testSideLogRebirth(t, rawdb.PathScheme)
-}
-
-func testSideLogRebirth(t *testing.T, scheme string) {
- var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
- signer = types.LatestSigner(gspec.Config)
- blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- )
- defer blockchain.Stop()
-
- newLogCh := make(chan []*types.Log, 10)
- rmLogsCh := make(chan RemovedLogsEvent, 10)
- blockchain.SubscribeLogsEvent(newLogCh)
- blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
-
- _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
- if i == 1 {
- gen.OffsetTime(-9) // higher block difficulty
- }
- })
- if _, err := blockchain.InsertChain(chain); err != nil {
- t.Fatalf("failed to insert forked chain: %v", err)
- }
- checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
-
- // Generate side chain with lower difficulty
- genDb, sideChain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
- if i == 1 {
- tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, logCode), signer, key1)
- if err != nil {
- t.Fatalf("failed to create tx: %v", err)
- }
- gen.AddTx(tx)
- }
- })
- if _, err := blockchain.InsertChain(sideChain); err != nil {
- t.Fatalf("failed to insert forked chain: %v", err)
- }
- checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
-
- // Generate a new block based on side chain.
- newBlocks, _ := GenerateChain(gspec.Config, sideChain[len(sideChain)-1], ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
- if _, err := blockchain.InsertChain(newBlocks); err != nil {
- t.Fatalf("failed to insert forked chain: %v", err)
- }
- checkLogEvents(t, newLogCh, rmLogsCh, 1, 0)
-}
-
-func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan RemovedLogsEvent, wantNew, wantRemoved int) {
- t.Helper()
- var (
- countNew int
- countRm int
- prev int
- )
- // Drain events.
- for len(logsCh) > 0 {
- x := <-logsCh
- countNew += len(x)
- for _, log := range x {
- // We expect added logs to be in ascending order: 0:0, 0:1, 1:0 ...
- have := 100*int(log.BlockNumber) + int(log.TxIndex)
- if have < prev {
- t.Fatalf("Expected new logs to arrive in ascending order (%d < %d)", have, prev)
- }
- prev = have
- }
- }
- prev = 0
- for len(rmLogsCh) > 0 {
- x := <-rmLogsCh
- countRm += len(x.Logs)
- for _, log := range x.Logs {
- // We expect removed logs to be in ascending order: 0:0, 0:1, 1:0 ...
- have := 100*int(log.BlockNumber) + int(log.TxIndex)
- if have < prev {
- t.Fatalf("Expected removed logs to arrive in ascending order (%d < %d)", have, prev)
- }
- prev = have
- }
- }
-
- if countNew != wantNew {
- t.Fatalf("wrong number of log events: got %d, want %d", countNew, wantNew)
- }
- if countRm != wantRemoved {
- t.Fatalf("wrong number of removed log events: got %d, want %d", countRm, wantRemoved)
- }
-}
-
-func TestReorgSideEvent(t *testing.T) {
- testReorgSideEvent(t, rawdb.HashScheme)
- testReorgSideEvent(t, rawdb.PathScheme)
-}
-
-func testReorgSideEvent(t *testing.T, scheme string) {
- var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
- }
- signer = types.LatestSigner(gspec.Config)
- )
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer blockchain.Stop()
-
- _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
- if _, err := blockchain.InsertChain(chain); err != nil {
- t.Fatalf("failed to insert chain: %v", err)
- }
-
- _, replacementBlocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, gen *BlockGen) {
- tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, nil), signer, key1)
- if i == 2 {
- gen.OffsetTime(-9)
- }
- if err != nil {
- t.Fatalf("failed to create tx: %v", err)
- }
- gen.AddTx(tx)
- })
- chainSideCh := make(chan ChainSideEvent, 64)
- blockchain.SubscribeChainSideEvent(chainSideCh)
- if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
- t.Fatalf("failed to insert chain: %v", err)
- }
-
- // first two block of the secondary chain are for a brief moment considered
- // side chains because up to that point the first one is considered the
- // heavier chain.
- expectedSideHashes := map[common.Hash]bool{
- replacementBlocks[0].Hash(): true,
- replacementBlocks[1].Hash(): true,
- chain[0].Hash(): true,
- chain[1].Hash(): true,
- chain[2].Hash(): true,
- }
-
- i := 0
-
- const timeoutDura = 10 * time.Second
- timeout := time.NewTimer(timeoutDura)
-done:
- for {
- select {
- case ev := <-chainSideCh:
- block := ev.Block
- if _, ok := expectedSideHashes[block.Hash()]; !ok {
- t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
- }
- i++
-
- if i == len(expectedSideHashes) {
- timeout.Stop()
-
- break done
- }
- timeout.Reset(timeoutDura)
-
- case <-timeout.C:
- t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent")
- }
- }
-
- // make sure no more events are fired
- select {
- case e := <-chainSideCh:
- t.Errorf("unexpected event fired: %v", e)
- case <-time.After(250 * time.Millisecond):
- }
-}
-
-// Tests if the canonical block can be fetched from the database during chain insertion.
-func TestCanonicalBlockRetrieval(t *testing.T) {
- testCanonicalBlockRetrieval(t, rawdb.HashScheme)
- testCanonicalBlockRetrieval(t, rawdb.PathScheme)
-}
-
-func testCanonicalBlockRetrieval(t *testing.T, scheme string) {
- _, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false)
- if err != nil {
- t.Fatalf("failed to create pristine chain: %v", err)
- }
- defer blockchain.Stop()
-
- _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 10, func(i int, gen *BlockGen) {})
-
- var pend sync.WaitGroup
- pend.Add(len(chain))
-
- for i := range chain {
- go func(block *types.Block) {
- defer pend.Done()
-
- // try to retrieve a block by its canonical hash and see if the block data can be retrieved.
- for {
- ch := rawdb.ReadCanonicalHash(blockchain.db, block.NumberU64())
- if ch == (common.Hash{}) {
- continue // busy wait for canonical hash to be written
- }
- if ch != block.Hash() {
- t.Errorf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
- return
- }
- fb := rawdb.ReadBlock(blockchain.db, ch, block.NumberU64())
- if fb == nil {
- t.Errorf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
- return
- }
- if fb.Hash() != block.Hash() {
- t.Errorf("invalid block hash for block %d, want %s, got %s", block.NumberU64(), block.Hash().Hex(), fb.Hash().Hex())
- return
- }
- return
- }
- }(chain[i])
-
- if _, err := blockchain.InsertChain(types.Blocks{chain[i]}); err != nil {
- t.Fatalf("failed to insert block %d: %v", i, err)
- }
- }
- pend.Wait()
-}
-func TestEIP155Transition(t *testing.T) {
- testEIP155Transition(t, rawdb.HashScheme)
- testEIP155Transition(t, rawdb.PathScheme)
-}
-
-func testEIP155Transition(t *testing.T, scheme string) {
- // Configure and generate a sample block chain
- var (
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000)
- deleteAddr = common.Address{1}
- gspec = &Genesis{
- Config: ¶ms.ChainConfig{
- ChainID: big.NewInt(1),
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(2),
- HomesteadBlock: new(big.Int),
- },
- Alloc: types.GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
- }
- )
- genDb, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, block *BlockGen) {
- var (
- tx *types.Transaction
- err error
- basicTx = func(signer types.Signer) (*types.Transaction, error) {
- return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key)
- }
- )
- switch i {
- case 0:
- tx, err = basicTx(types.HomesteadSigner{})
- if err != nil {
- t.Fatal(err)
- }
- block.AddTx(tx)
- case 2:
- tx, err = basicTx(types.HomesteadSigner{})
- if err != nil {
- t.Fatal(err)
- }
- block.AddTx(tx)
-
- tx, err = basicTx(types.LatestSigner(gspec.Config))
- if err != nil {
- t.Fatal(err)
- }
- block.AddTx(tx)
- case 3:
- tx, err = basicTx(types.HomesteadSigner{})
- if err != nil {
- t.Fatal(err)
- }
- block.AddTx(tx)
-
- tx, err = basicTx(types.LatestSigner(gspec.Config))
- if err != nil {
- t.Fatal(err)
- }
- block.AddTx(tx)
- }
- })
-
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer blockchain.Stop()
-
- if _, err := blockchain.InsertChain(blocks); err != nil {
- t.Fatal(err)
- }
- block := blockchain.GetBlockByNumber(1)
- if block.Transactions()[0].Protected() {
- t.Error("Expected block[0].txs[0] to not be replay protected")
- }
-
- block = blockchain.GetBlockByNumber(3)
- if block.Transactions()[0].Protected() {
- t.Error("Expected block[3].txs[0] to not be replay protected")
- }
- if !block.Transactions()[1].Protected() {
- t.Error("Expected block[3].txs[1] to be replay protected")
- }
- if _, err := blockchain.InsertChain(blocks[4:]); err != nil {
- t.Fatal(err)
- }
-
- // generate an invalid chain id transaction
- config := ¶ms.ChainConfig{
- ChainID: big.NewInt(2),
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(2),
- HomesteadBlock: new(big.Int),
- }
- blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), genDb, 4, func(i int, block *BlockGen) {
- var (
- tx *types.Transaction
- err error
- basicTx = func(signer types.Signer) (*types.Transaction, error) {
- return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key)
- }
- )
- if i == 0 {
- tx, err = basicTx(types.LatestSigner(config))
- if err != nil {
- t.Fatal(err)
- }
- block.AddTx(tx)
- }
- })
- _, err := blockchain.InsertChain(blocks)
- if have, want := err, types.ErrInvalidChainId; !errors.Is(have, want) {
- t.Errorf("have %v, want %v", have, want)
- }
-}
-func TestEIP161AccountRemoval(t *testing.T) {
- testEIP161AccountRemoval(t, rawdb.HashScheme)
- testEIP161AccountRemoval(t, rawdb.PathScheme)
-}
-
-func testEIP161AccountRemoval(t *testing.T, scheme string) {
- // Configure and generate a sample block chain
- var (
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000)
- theAddr = common.Address{1}
- gspec = &Genesis{
- Config: ¶ms.ChainConfig{
- ChainID: big.NewInt(1),
- HomesteadBlock: new(big.Int),
- EIP155Block: new(big.Int),
- EIP150Block: new(big.Int),
- EIP158Block: big.NewInt(2),
- },
- Alloc: types.GenesisAlloc{address: {Balance: funds}},
- }
- )
- _, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, block *BlockGen) {
- var (
- tx *types.Transaction
- err error
- signer = types.LatestSigner(gspec.Config)
- )
- switch i {
- case 0:
- tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
- case 1:
- tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
- case 2:
- tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
- }
- if err != nil {
- t.Fatal(err)
- }
- block.AddTx(tx)
- })
- // account must exist pre eip 161
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer blockchain.Stop()
-
- if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil {
- t.Fatal(err)
- }
- if st, _ := blockchain.State(); !st.Exist(theAddr) {
- t.Error("expected account to exist")
- }
-
- // account needs to be deleted post eip 161
- if _, err := blockchain.InsertChain(types.Blocks{blocks[1]}); err != nil {
- t.Fatal(err)
- }
- if st, _ := blockchain.State(); st.Exist(theAddr) {
- t.Error("account should not exist")
- }
-
- // account mustn't be created post eip 161
- if _, err := blockchain.InsertChain(types.Blocks{blocks[2]}); err != nil {
- t.Fatal(err)
- }
- if st, _ := blockchain.State(); st.Exist(theAddr) {
- t.Error("account should not exist")
- }
-}
-
-// This is a regression test (i.e. as weird as it is, don't delete it ever), which
-// tests that under weird reorg conditions the blockchain and its internal header-
-// chain return the same latest block/header.
-//
-// https://github.com/ethereum/go-ethereum/pull/15941
-func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
- testBlockchainHeaderchainReorgConsistency(t, rawdb.HashScheme)
- testBlockchainHeaderchainReorgConsistency(t, rawdb.PathScheme)
-}
-
-func testBlockchainHeaderchainReorgConsistency(t *testing.T, scheme string) {
- // Generate a canonical chain to act as the main dataset
- engine := ethash.NewFaker()
- genesis := &Genesis{
- Config: params.TestChainConfig,
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
-
- // Generate a bunch of fork blocks, each side forking from the canonical chain
- forks := make([]*types.Block, len(blocks))
- for i := 0; i < len(forks); i++ {
- parent := genesis.ToBlock()
- if i > 0 {
- parent = blocks[i-1]
- }
- fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
- forks[i] = fork[0]
- }
- // Import the canonical and fork chain side by side, verifying the current block
- // and current header consistency
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- for i := 0; i < len(blocks); i++ {
- if _, err := chain.InsertChain(blocks[i : i+1]); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", i, err)
- }
- if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
- t.Errorf("block %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number, chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
- }
- if _, err := chain.InsertChain(forks[i : i+1]); err != nil {
- t.Fatalf(" fork %d: failed to insert into chain: %v", i, err)
- }
- if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
- t.Errorf(" fork %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number, chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
- }
- }
-}
-
-// Tests that importing small side forks doesn't leave junk in the trie database
-// cache (which would eventually cause memory issues).
-func TestTrieForkGC(t *testing.T) {
- // Generate a canonical chain to act as the main dataset
- engine := ethash.NewFaker()
- genesis := &Genesis{
- Config: params.TestChainConfig,
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
-
- // Generate a bunch of fork blocks, each side forking from the canonical chain
- forks := make([]*types.Block, len(blocks))
- for i := 0; i < len(forks); i++ {
- parent := genesis.ToBlock()
- if i > 0 {
- parent = blocks[i-1]
- }
- fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
- forks[i] = fork[0]
- }
- // Import the canonical and fork chain side by side, forcing the trie cache to cache both
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- for i := 0; i < len(blocks); i++ {
- if _, err := chain.InsertChain(blocks[i : i+1]); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", i, err)
- }
- if _, err := chain.InsertChain(forks[i : i+1]); err != nil {
- t.Fatalf("fork %d: failed to insert into chain: %v", i, err)
- }
- }
- // Dereference all the recent tries and ensure no past trie is left in
- for i := 0; i < TriesInMemory; i++ {
- chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
- chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
- }
- if _, nodes, _, _ := chain.TrieDB().Size(); nodes > 0 { // all memory is returned in the nodes return for hashdb
- t.Fatalf("stale tries still alive after garbase collection")
- }
-}
-
-// Tests that doing large reorgs works even if the state associated with the
-// forking point is not available any more.
-func TestLargeReorgTrieGC(t *testing.T) {
- testLargeReorgTrieGC(t, rawdb.HashScheme)
- testLargeReorgTrieGC(t, rawdb.PathScheme)
-}
-
-func testLargeReorgTrieGC(t *testing.T, scheme string) {
- // Generate the original common chain segment and the two competing forks
- engine := ethash.NewFaker()
- genesis := &Genesis{
- Config: params.TestChainConfig,
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
- original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
- competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
-
- // Import the shared chain and the original canonical one
- db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
- defer db.Close()
-
- chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- if _, err := chain.InsertChain(shared); err != nil {
- t.Fatalf("failed to insert shared chain: %v", err)
- }
- if _, err := chain.InsertChain(original); err != nil {
- t.Fatalf("failed to insert original chain: %v", err)
- }
- // Ensure that the state associated with the forking point is pruned away
- if chain.HasState(shared[len(shared)-1].Root()) {
- t.Fatalf("common-but-old ancestor still cache")
- }
- // Import the competitor chain without exceeding the canonical's TD and ensure
- // we have not processed any of the blocks (protection against malicious blocks)
- if _, err := chain.InsertChain(competitor[:len(competitor)-2]); err != nil {
- t.Fatalf("failed to insert competitor chain: %v", err)
- }
- for i, block := range competitor[:len(competitor)-2] {
- if chain.HasState(block.Root()) {
- t.Fatalf("competitor %d: low TD chain became processed", i)
- }
- }
- // Import the head of the competitor chain, triggering the reorg and ensure we
- // successfully reprocess all the stashed away blocks.
- if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil {
- t.Fatalf("failed to finalize competitor chain: %v", err)
- }
- // In path-based trie database implementation, it will keep 128 diff + 1 disk
- // layers, totally 129 latest states available. In hash-based it's 128.
- states := TestTriesInMemory
- if scheme == rawdb.PathScheme {
- states = states + 1
- }
- for i, block := range competitor[:len(competitor)-states] {
- if chain.HasState(block.Root()) {
- t.Fatalf("competitor %d: unexpected competing chain state", i)
- }
- }
- for i, block := range competitor[len(competitor)-states:] {
- if !chain.HasState(block.Root()) {
- t.Fatalf("competitor %d: competing chain state missing", i)
- }
- }
-}
-
-func TestBlockchainRecovery(t *testing.T) {
- testBlockchainRecovery(t, rawdb.HashScheme)
- testBlockchainRecovery(t, rawdb.PathScheme)
-}
-
-func testBlockchainRecovery(t *testing.T, scheme string) {
- // Configure and generate a sample block chain
- var (
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000)
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: funds}}}
- )
- height := uint64(1024)
- _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
-
- // Import the chain as a ancient-first node and ensure all pointers are updated
- ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
- if err != nil {
- t.Fatalf("failed to create temp freezer db: %v", err)
- }
- defer ancientDb.Close()
- ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
-
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- if n, err := ancient.InsertHeaderChain(headers); err != nil {
- t.Fatalf("failed to insert header %d: %v", n, err)
- }
- if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
- t.Fatalf("failed to insert receipt %d: %v", n, err)
- }
- rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior
- ancient.Stop()
-
- // Destroy head fast block manually
- midBlock := blocks[len(blocks)/2]
- rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash())
-
- // Reopen broken blockchain again
- ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer ancient.Stop()
- if num := ancient.CurrentBlock().Number.Uint64(); num != 0 {
- t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
- }
- if num := ancient.CurrentSnapBlock().Number.Uint64(); num != midBlock.NumberU64() {
- t.Errorf("head snap-block mismatch: have #%v, want #%v", num, midBlock.NumberU64())
- }
- if num := ancient.CurrentHeader().Number.Uint64(); num != midBlock.NumberU64() {
- t.Errorf("head header mismatch: have #%v, want #%v", num, midBlock.NumberU64())
- }
-}
-
-// This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain.
-func TestInsertReceiptChainRollback(t *testing.T) {
- testInsertReceiptChainRollback(t, rawdb.HashScheme)
- testInsertReceiptChainRollback(t, rawdb.PathScheme)
-}
-
-func testInsertReceiptChainRollback(t *testing.T, scheme string) {
- // Generate forked chain. The returned BlockChain object is used to process the side chain blocks.
- tmpChain, sideblocks, canonblocks, gspec, err := getLongAndShortChains(scheme)
- if err != nil {
- t.Fatal(err)
- }
- defer tmpChain.Stop()
- // Get the side chain receipts.
- if _, err := tmpChain.InsertChain(sideblocks); err != nil {
- t.Fatal("processing side chain failed:", err)
- }
- t.Log("sidechain head:", tmpChain.CurrentBlock().Number, tmpChain.CurrentBlock().Hash())
- sidechainReceipts := make([]types.Receipts, len(sideblocks))
- for i, block := range sideblocks {
- sidechainReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
- }
- // Get the canon chain receipts.
- if _, err := tmpChain.InsertChain(canonblocks); err != nil {
- t.Fatal("processing canon chain failed:", err)
- }
- t.Log("canon head:", tmpChain.CurrentBlock().Number, tmpChain.CurrentBlock().Hash())
- canonReceipts := make([]types.Receipts, len(canonblocks))
- for i, block := range canonblocks {
- canonReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
- }
-
- // Set up a BlockChain that uses the ancient store.
- ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
- if err != nil {
- t.Fatalf("failed to create temp freezer db: %v", err)
- }
- defer ancientDb.Close()
-
- ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer ancientChain.Stop()
-
- // Import the canonical header chain.
- canonHeaders := make([]*types.Header, len(canonblocks))
- for i, block := range canonblocks {
- canonHeaders[i] = block.Header()
- }
- if _, err = ancientChain.InsertHeaderChain(canonHeaders); err != nil {
- t.Fatal("can't import canon headers:", err)
- }
-
- // Try to insert blocks/receipts of the side chain.
- _, err = ancientChain.InsertReceiptChain(sideblocks, sidechainReceipts, uint64(len(sideblocks)))
- if err == nil {
- t.Fatal("expected error from InsertReceiptChain.")
- }
- if ancientChain.CurrentSnapBlock().Number.Uint64() != 0 {
- t.Fatalf("failed to rollback ancient data, want %d, have %d", 0, ancientChain.CurrentSnapBlock().Number)
- }
- if frozen, err := ancientChain.db.Ancients(); err != nil || frozen != 1 {
- t.Fatalf("failed to truncate ancient data, frozen index is %d", frozen)
- }
-
- // Insert blocks/receipts of the canonical chain.
- _, err = ancientChain.InsertReceiptChain(canonblocks, canonReceipts, uint64(len(canonblocks)))
- if err != nil {
- t.Fatalf("can't import canon chain receipts: %v", err)
- }
- if ancientChain.CurrentSnapBlock().Number.Uint64() != canonblocks[len(canonblocks)-1].NumberU64() {
- t.Fatalf("failed to insert ancient recept chain after rollback")
- }
- if frozen, _ := ancientChain.db.Ancients(); frozen != uint64(len(canonblocks))+1 {
- t.Fatalf("wrong ancients count %d", frozen)
- }
-}
-
-// Tests that importing a very large side fork, which is larger than the canon chain,
-// but where the difficulty per block is kept low: this means that it will not
-// overtake the 'canon' chain until after it's passed canon by about 200 blocks.
-//
-// Details at:
-// - https://github.com/ethereum/go-ethereum/issues/18977
-// - https://github.com/ethereum/go-ethereum/pull/18988
-func TestLowDiffLongChain(t *testing.T) {
- testLowDiffLongChain(t, rawdb.HashScheme)
- testLowDiffLongChain(t, rawdb.PathScheme)
-}
-
-func testLowDiffLongChain(t *testing.T, scheme string) {
- // Generate a canonical chain to act as the main dataset
- engine := ethash.NewFaker()
- genesis := &Genesis{
- Config: params.TestChainConfig,
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- // We must use a pretty long chain to ensure that the fork doesn't overtake us
- // until after at least 128 blocks post tip
- genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*TriesInMemory, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
- b.OffsetTime(-9)
- })
-
- // Import the canonical chain
- diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
- defer diskdb.Close()
-
- chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
- // Generate fork chain, starting from an early block
- parent := blocks[10]
- fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*TriesInMemory, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{2})
- })
-
- // And now import the fork
- if i, err := chain.InsertChain(fork); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", i, err)
- }
- head := chain.CurrentBlock()
- if got := fork[len(fork)-1].Hash(); got != head.Hash() {
- t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
- }
- // Sanity check that all the canonical numbers are present
- header := chain.CurrentHeader()
- for number := head.Number.Uint64(); number > 0; number-- {
- if hash := chain.GetHeaderByNumber(number).Hash(); hash != header.Hash() {
- t.Fatalf("header %d: canonical hash mismatch: have %x, want %x", number, hash, header.Hash())
- }
- header = chain.GetHeader(header.ParentHash, number-1)
- }
-}
-
-// Tests that importing a sidechain (S), where
-// - S is sidechain, containing blocks [Sn...Sm]
-// - C is canon chain, containing blocks [G..Cn..Cm]
-// - A common ancestor is placed at prune-point + blocksBetweenCommonAncestorAndPruneblock
-// - The sidechain S is prepended with numCanonBlocksInSidechain blocks from the canon chain
-//
-// The mergePoint can be these values:
-// -1: the transition won't happen
-// 0: the transition happens since genesis
-// 1: the transition happens after some chain segments
-func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommonAncestorAndPruneblock int, mergePoint int) {
- // Generate a canonical chain to act as the main dataset
- chainConfig := *params.TestChainConfig
- var (
- merger = consensus.NewMerger(rawdb.NewMemoryDatabase())
- engine = beacon.New(ethash.NewFaker())
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr = crypto.PubkeyToAddress(key.PublicKey)
- nonce = uint64(0)
-
- gspec = &Genesis{
- Config: &chainConfig,
- Alloc: types.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- signer = types.LatestSigner(gspec.Config)
- mergeBlock = math.MaxInt32
- )
- // Generate and import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- // Activate the transition since genesis if required
- if mergePoint == 0 {
- mergeBlock = 0
- merger.ReachTTD()
- merger.FinalizePoS()
-
- // Set the terminal total difficulty in the config
- gspec.Config.TerminalTotalDifficulty = big.NewInt(0)
- }
- genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
- tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key)
- if err != nil {
- t.Fatalf("failed to create tx: %v", err)
- }
- gen.AddTx(tx)
- if int(gen.header.Number.Uint64()) >= mergeBlock {
- gen.SetPoS()
- }
- nonce++
- })
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
-
- lastPrunedIndex := len(blocks) - TestTriesInMemory - 1
- lastPrunedBlock := blocks[lastPrunedIndex-1]
- firstNonPrunedBlock := blocks[len(blocks)-TestTriesInMemory]
-
- // Verify pruning of lastPrunedBlock
- if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
- t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64())
- }
- // Verify firstNonPrunedBlock is not pruned
- if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
- t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
- }
-
- // Activate the transition in the middle of the chain
- if mergePoint == 1 {
- merger.ReachTTD()
- merger.FinalizePoS()
- // Set the terminal total difficulty in the config
- ttd := big.NewInt(int64(len(blocks)))
- ttd.Mul(ttd, params.GenesisDifficulty)
- gspec.Config.TerminalTotalDifficulty = ttd
- mergeBlock = len(blocks)
- }
-
- // Generate the sidechain
- // First block should be a known block, block after should be a pruned block. So
- // canon(pruned), side, side...
-
- // Generate fork chain, make it longer than canon
- parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock
- parent := blocks[parentIndex]
- fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{2})
- if int(b.header.Number.Uint64()) >= mergeBlock {
- b.SetPoS()
- }
- })
- // Prepend the parent(s)
- var sidechain []*types.Block
- for i := numCanonBlocksInSidechain; i > 0; i-- {
- sidechain = append(sidechain, blocks[parentIndex+1-i])
- }
- sidechain = append(sidechain, fork...)
- n, err := chain.InsertChain(sidechain)
- if err != nil {
- t.Errorf("Got error, %v number %d - %d", err, sidechain[n].NumberU64(), n)
- }
- head := chain.CurrentBlock()
- if got := fork[len(fork)-1].Hash(); got != head.Hash() {
- t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
- }
-}
-
-// Tests that importing a sidechain (S), where
-// - S is sidechain, containing blocks [Sn...Sm]
-// - C is canon chain, containing blocks [G..Cn..Cm]
-// - The common ancestor Cc is pruned
-// - The first block in S: Sn, is == Cn
-//
-// That is: the sidechain for import contains some blocks already present in canon chain.
-// So the blocks are:
-//
-// [ Cn, Cn+1, Cc, Sn+3 ... Sm]
-// ^ ^ ^ pruned
-func TestPrunedImportSide(t *testing.T) {
- //glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
- //glogger.Verbosity(3)
- //log.Root().SetHandler(log.Handler(glogger))
- testSideImport(t, 3, 3, -1)
- testSideImport(t, 3, -3, -1)
- testSideImport(t, 10, 0, -1)
- testSideImport(t, 1, 10, -1)
- testSideImport(t, 1, -10, -1)
-}
-
-func TestPrunedImportSideWithMerging(t *testing.T) {
- //glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
- //glogger.Verbosity(3)
- //log.Root().SetHandler(log.Handler(glogger))
- testSideImport(t, 3, 3, 0)
- testSideImport(t, 3, -3, 0)
- testSideImport(t, 10, 0, 0)
- testSideImport(t, 1, 10, 0)
- testSideImport(t, 1, -10, 0)
-
- testSideImport(t, 3, 3, 1)
- testSideImport(t, 3, -3, 1)
- testSideImport(t, 10, 0, 1)
- testSideImport(t, 1, 10, 1)
- testSideImport(t, 1, -10, 1)
-}
-
-func TestInsertKnownHeaders(t *testing.T) {
- testInsertKnownChainData(t, "headers", rawdb.HashScheme)
- testInsertKnownChainData(t, "headers", rawdb.PathScheme)
-}
-func TestInsertKnownReceiptChain(t *testing.T) {
- testInsertKnownChainData(t, "receipts", rawdb.HashScheme)
- testInsertKnownChainData(t, "receipts", rawdb.PathScheme)
-}
-func TestInsertKnownBlocks(t *testing.T) {
- testInsertKnownChainData(t, "blocks", rawdb.HashScheme)
- testInsertKnownChainData(t, "blocks", rawdb.PathScheme)
-}
-
-func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
- engine := ethash.NewFaker()
- genesis := &Genesis{
- Config: params.TestChainConfig,
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- genDb, blocks, receipts := GenerateChainWithGenesis(genesis, engine, 32, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
-
- // A longer chain but total difficulty is lower.
- blocks2, receipts2 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 65, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
-
- // A shorter chain but total difficulty is higher.
- blocks3, receipts3 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 64, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
- b.OffsetTime(-9) // A higher difficulty
- })
- // Import the shared chain and the original canonical one
- chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
- if err != nil {
- t.Fatalf("failed to create temp freezer db: %v", err)
- }
- defer chaindb.Close()
-
- chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- var (
- inserter func(blocks []*types.Block, receipts []types.Receipts) error
- asserter func(t *testing.T, block *types.Block)
- )
- if typ == "headers" {
- inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
- headers := make([]*types.Header, 0, len(blocks))
- for _, block := range blocks {
- headers = append(headers, block.Header())
- }
- _, err := chain.InsertHeaderChain(headers)
- return err
- }
- asserter = func(t *testing.T, block *types.Block) {
- if chain.CurrentHeader().Hash() != block.Hash() {
- t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex())
- }
- }
- } else if typ == "receipts" {
- inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
- headers := make([]*types.Header, 0, len(blocks))
- for _, block := range blocks {
- headers = append(headers, block.Header())
- }
- _, err := chain.InsertHeaderChain(headers)
- if err != nil {
- return err
- }
- _, err = chain.InsertReceiptChain(blocks, receipts, 0)
- return err
- }
- asserter = func(t *testing.T, block *types.Block) {
- if chain.CurrentSnapBlock().Hash() != block.Hash() {
- t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentSnapBlock().Hash().Hex(), block.Hash().Hex())
- }
- }
- } else {
- inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
- _, err := chain.InsertChain(blocks)
- return err
- }
- asserter = func(t *testing.T, block *types.Block) {
- if chain.CurrentBlock().Hash() != block.Hash() {
- t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex())
- }
- }
- }
-
- if err := inserter(blocks, receipts); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
-
- // Reimport the chain data again. All the imported
- // chain data are regarded "known" data.
- if err := inserter(blocks, receipts); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
- asserter(t, blocks[len(blocks)-1])
-
- // Import a long canonical chain with some known data as prefix.
- rollback := blocks[len(blocks)/2].NumberU64()
-
- chain.SetHead(rollback - 1)
- if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
- asserter(t, blocks2[len(blocks2)-1])
-
- // Import a heavier shorter but higher total difficulty chain with some known data as prefix.
- if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
- asserter(t, blocks3[len(blocks3)-1])
-
- // Import a longer but lower total difficulty chain with some known data as prefix.
- if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
- // The head shouldn't change.
- asserter(t, blocks3[len(blocks3)-1])
-
- // Rollback the heavier chain and re-insert the longer chain again
- chain.SetHead(rollback - 1)
- if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
- asserter(t, blocks2[len(blocks2)-1])
-}
-
-func TestInsertKnownHeadersWithMerging(t *testing.T) {
- testInsertKnownChainDataWithMerging(t, "headers", 0)
-}
-func TestInsertKnownReceiptChainWithMerging(t *testing.T) {
- testInsertKnownChainDataWithMerging(t, "receipts", 0)
-}
-func TestInsertKnownBlocksWithMerging(t *testing.T) {
- testInsertKnownChainDataWithMerging(t, "blocks", 0)
-}
-func TestInsertKnownHeadersAfterMerging(t *testing.T) {
- testInsertKnownChainDataWithMerging(t, "headers", 1)
-}
-func TestInsertKnownReceiptChainAfterMerging(t *testing.T) {
- testInsertKnownChainDataWithMerging(t, "receipts", 1)
-}
-func TestInsertKnownBlocksAfterMerging(t *testing.T) {
- testInsertKnownChainDataWithMerging(t, "blocks", 1)
-}
-
-// mergeHeight can be assigned in these values:
-// 0: means the merging is applied since genesis
-// 1: means the merging is applied after the first segment
-func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight int) {
- // Copy the TestChainConfig so we can modify it during tests
- chainConfig := *params.TestChainConfig
- var (
- genesis = &Genesis{
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: &chainConfig,
- }
- engine = beacon.New(ethash.NewFaker())
- mergeBlock = uint64(math.MaxUint64)
- )
- // Apply merging since genesis
- if mergeHeight == 0 {
- genesis.Config.TerminalTotalDifficulty = big.NewInt(0)
- mergeBlock = uint64(0)
- }
-
- genDb, blocks, receipts := GenerateChainWithGenesis(genesis, engine, 32,
- func(i int, b *BlockGen) {
- if b.header.Number.Uint64() >= mergeBlock {
- b.SetPoS()
- }
- b.SetCoinbase(common.Address{1})
- })
-
- // Apply merging after the first segment
- if mergeHeight == 1 {
- // TTD is genesis diff + blocks
- ttd := big.NewInt(1 + int64(len(blocks)))
- ttd.Mul(ttd, params.GenesisDifficulty)
- genesis.Config.TerminalTotalDifficulty = ttd
- mergeBlock = uint64(len(blocks))
- }
- // Longer chain and shorter chain
- blocks2, receipts2 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 65, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
- if b.header.Number.Uint64() >= mergeBlock {
- b.SetPoS()
- }
- })
- blocks3, receipts3 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 64, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
- b.OffsetTime(-9) // Time shifted, difficulty shouldn't be changed
- if b.header.Number.Uint64() >= mergeBlock {
- b.SetPoS()
- }
- })
- // Import the shared chain and the original canonical one
- chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
- if err != nil {
- t.Fatalf("failed to create temp freezer db: %v", err)
- }
- defer chaindb.Close()
-
- chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- var (
- inserter func(blocks []*types.Block, receipts []types.Receipts) error
- asserter func(t *testing.T, block *types.Block)
- )
- if typ == "headers" {
- inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
- headers := make([]*types.Header, 0, len(blocks))
- for _, block := range blocks {
- headers = append(headers, block.Header())
- }
- i, err := chain.InsertHeaderChain(headers)
- if err != nil {
- return fmt.Errorf("index %d, number %d: %w", i, headers[i].Number, err)
- }
- return err
- }
- asserter = func(t *testing.T, block *types.Block) {
- if chain.CurrentHeader().Hash() != block.Hash() {
- t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex())
- }
- }
- } else if typ == "receipts" {
- inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
- headers := make([]*types.Header, 0, len(blocks))
- for _, block := range blocks {
- headers = append(headers, block.Header())
- }
- i, err := chain.InsertHeaderChain(headers)
- if err != nil {
- return fmt.Errorf("index %d: %w", i, err)
- }
- _, err = chain.InsertReceiptChain(blocks, receipts, 0)
- return err
- }
- asserter = func(t *testing.T, block *types.Block) {
- if chain.CurrentSnapBlock().Hash() != block.Hash() {
- t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentSnapBlock().Hash().Hex(), block.Hash().Hex())
- }
- }
- } else {
- inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
- i, err := chain.InsertChain(blocks)
- if err != nil {
- return fmt.Errorf("index %d: %w", i, err)
- }
- return nil
- }
- asserter = func(t *testing.T, block *types.Block) {
- if chain.CurrentBlock().Hash() != block.Hash() {
- t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex())
- }
- }
- }
- if err := inserter(blocks, receipts); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
-
- // Reimport the chain data again. All the imported
- // chain data are regarded "known" data.
- if err := inserter(blocks, receipts); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
- asserter(t, blocks[len(blocks)-1])
-
- // Import a long canonical chain with some known data as prefix.
- rollback := blocks[len(blocks)/2].NumberU64()
- chain.SetHead(rollback - 1)
- if err := inserter(blocks, receipts); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
- asserter(t, blocks[len(blocks)-1])
-
- // Import a longer chain with some known data as prefix.
- if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
- asserter(t, blocks2[len(blocks2)-1])
-
- // Import a shorter chain with some known data as prefix.
- // The reorg is expected since the fork choice rule is
- // already changed.
- if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
- // The head shouldn't change.
- asserter(t, blocks3[len(blocks3)-1])
-
- // Reimport the longer chain again, the reorg is still expected
- chain.SetHead(rollback - 1)
- if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
- t.Fatalf("failed to insert chain data: %v", err)
- }
- asserter(t, blocks2[len(blocks2)-1])
-}
-
-// getLongAndShortChains returns two chains: A is longer, B is heavier.
-func getLongAndShortChains(scheme string) (*BlockChain, []*types.Block, []*types.Block, *Genesis, error) {
- // Generate a canonical chain to act as the main dataset
- engine := ethash.NewFaker()
- genesis := &Genesis{
- Config: params.TestChainConfig,
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- // Generate and import the canonical chain,
- // Offset the time, to keep the difficulty low
- genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
- })
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
- }
- // Generate fork chain, make it shorter than canon, with common ancestor pretty early
- parentIndex := 3
- parent := longChain[parentIndex]
- heavyChainExt, _ := GenerateChain(genesis.Config, parent, engine, genDb, 75, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{2})
- b.OffsetTime(-9)
- })
- var heavyChain []*types.Block
- heavyChain = append(heavyChain, longChain[:parentIndex+1]...)
- heavyChain = append(heavyChain, heavyChainExt...)
-
- // Verify that the test is sane
- var (
- longerTd = new(big.Int)
- shorterTd = new(big.Int)
- )
- for index, b := range longChain {
- longerTd.Add(longerTd, b.Difficulty())
- if index <= parentIndex {
- shorterTd.Add(shorterTd, b.Difficulty())
- }
- }
- for _, b := range heavyChain {
- shorterTd.Add(shorterTd, b.Difficulty())
- }
- if shorterTd.Cmp(longerTd) <= 0 {
- return nil, nil, nil, nil, fmt.Errorf("test is moot, heavyChain td (%v) must be larger than canon td (%v)", shorterTd, longerTd)
- }
- longerNum := longChain[len(longChain)-1].NumberU64()
- shorterNum := heavyChain[len(heavyChain)-1].NumberU64()
- if shorterNum >= longerNum {
- return nil, nil, nil, nil, fmt.Errorf("test is moot, heavyChain num (%v) must be lower than canon num (%v)", shorterNum, longerNum)
- }
- return chain, longChain, heavyChain, genesis, nil
-}
-
-// TestReorgToShorterRemovesCanonMapping tests that if we
-// 1. Have a chain [0 ... N .. X]
-// 2. Reorg to shorter but heavier chain [0 ... N ... Y]
-// 3. Then there should be no canon mapping for the block at height X
-// 4. The forked block should still be retrievable by hash
-func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
- testReorgToShorterRemovesCanonMapping(t, rawdb.HashScheme)
- testReorgToShorterRemovesCanonMapping(t, rawdb.PathScheme)
-}
-
-func testReorgToShorterRemovesCanonMapping(t *testing.T, scheme string) {
- chain, canonblocks, sideblocks, _, err := getLongAndShortChains(scheme)
- if err != nil {
- t.Fatal(err)
- }
- defer chain.Stop()
-
- if n, err := chain.InsertChain(canonblocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
- canonNum := chain.CurrentBlock().Number.Uint64()
- canonHash := chain.CurrentBlock().Hash()
- _, err = chain.InsertChain(sideblocks)
- if err != nil {
- t.Errorf("Got error, %v", err)
- }
- head := chain.CurrentBlock()
- if got := sideblocks[len(sideblocks)-1].Hash(); got != head.Hash() {
- t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
- }
- // We have now inserted a sidechain.
- if blockByNum := chain.GetBlockByNumber(canonNum); blockByNum != nil {
- t.Errorf("expected block to be gone: %v", blockByNum.NumberU64())
- }
- if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
- t.Errorf("expected header to be gone: %v", headerByNum.Number)
- }
- if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
- t.Errorf("expected block to be present: %x", blockByHash.Hash())
- }
- if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil {
- t.Errorf("expected header to be present: %x", headerByHash.Hash())
- }
-}
-
-// TestReorgToShorterRemovesCanonMappingHeaderChain is the same scenario
-// as TestReorgToShorterRemovesCanonMapping, but applied on headerchain
-// imports -- that is, for fast sync
-func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) {
- testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.HashScheme)
- testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.PathScheme)
-}
-
-func testReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T, scheme string) {
- chain, canonblocks, sideblocks, _, err := getLongAndShortChains(scheme)
- if err != nil {
- t.Fatal(err)
- }
- defer chain.Stop()
-
- // Convert into headers
- canonHeaders := make([]*types.Header, len(canonblocks))
- for i, block := range canonblocks {
- canonHeaders[i] = block.Header()
- }
- if n, err := chain.InsertHeaderChain(canonHeaders); err != nil {
- t.Fatalf("header %d: failed to insert into chain: %v", n, err)
- }
- canonNum := chain.CurrentHeader().Number.Uint64()
- canonHash := chain.CurrentBlock().Hash()
- sideHeaders := make([]*types.Header, len(sideblocks))
- for i, block := range sideblocks {
- sideHeaders[i] = block.Header()
- }
- if n, err := chain.InsertHeaderChain(sideHeaders); err != nil {
- t.Fatalf("header %d: failed to insert into chain: %v", n, err)
- }
- head := chain.CurrentHeader()
- if got := sideblocks[len(sideblocks)-1].Hash(); got != head.Hash() {
- t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
- }
- // We have now inserted a sidechain.
- if blockByNum := chain.GetBlockByNumber(canonNum); blockByNum != nil {
- t.Errorf("expected block to be gone: %v", blockByNum.NumberU64())
- }
- if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
- t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64())
- }
- if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
- t.Errorf("expected block to be present: %x", blockByHash.Hash())
- }
- if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil {
- t.Errorf("expected header to be present: %x", headerByHash.Hash())
- }
-}
-
-// Benchmarks large blocks with value transfers to non-existing accounts
-func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) {
- var (
- signer = types.HomesteadSigner{}
- testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
- bankFunds = big.NewInt(100000000000000000)
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- testBankAddress: {Balance: bankFunds},
- common.HexToAddress("0xc0de"): {
- Code: []byte{0x60, 0x01, 0x50},
- Balance: big.NewInt(0),
- }, // push 1, pop
- },
- GasLimit: 100e6, // 100 M
- }
- )
- // Generate the original common chain segment and the two competing forks
- engine := ethash.NewFaker()
-
- blockGenerator := func(i int, block *BlockGen) {
- block.SetCoinbase(common.Address{1})
- for txi := 0; txi < numTxs; txi++ {
- uniq := uint64(i*numTxs + txi)
- recipient := recipientFn(uniq)
- tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, block.header.BaseFee, nil), signer, testBankKey)
- if err != nil {
- b.Error(err)
- }
- block.AddTx(tx)
- }
- }
-
- _, shared, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, blockGenerator)
- b.StopTimer()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- // Import the shared chain and the original canonical one
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- b.Fatalf("failed to create tester chain: %v", err)
- }
- b.StartTimer()
- if _, err := chain.InsertChain(shared); err != nil {
- b.Fatalf("failed to insert shared chain: %v", err)
- }
- b.StopTimer()
- block := chain.GetBlockByHash(chain.CurrentBlock().Hash())
- if got := block.Transactions().Len(); got != numTxs*numBlocks {
- b.Fatalf("Transactions were not included, expected %d, got %d", numTxs*numBlocks, got)
- }
- }
-}
-
-func BenchmarkBlockChain_1x1000ValueTransferToNonexisting(b *testing.B) {
- var (
- numTxs = 1000
- numBlocks = 1
- )
- recipientFn := func(nonce uint64) common.Address {
- return common.BigToAddress(new(big.Int).SetUint64(1337 + nonce))
- }
- dataFn := func(nonce uint64) []byte {
- return nil
- }
- benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
-}
-
-func BenchmarkBlockChain_1x1000ValueTransferToExisting(b *testing.B) {
- var (
- numTxs = 1000
- numBlocks = 1
- )
- b.StopTimer()
- b.ResetTimer()
-
- recipientFn := func(nonce uint64) common.Address {
- return common.BigToAddress(new(big.Int).SetUint64(1337))
- }
- dataFn := func(nonce uint64) []byte {
- return nil
- }
- benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
-}
-
-func BenchmarkBlockChain_1x1000Executions(b *testing.B) {
- var (
- numTxs = 1000
- numBlocks = 1
- )
- b.StopTimer()
- b.ResetTimer()
-
- recipientFn := func(nonce uint64) common.Address {
- return common.BigToAddress(new(big.Int).SetUint64(0xc0de))
- }
- dataFn := func(nonce uint64) []byte {
- return nil
- }
- benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
-}
-
-// Tests that importing a some old blocks, where all blocks are before the
-// pruning point.
-// This internally leads to a sidechain import, since the blocks trigger an
-// ErrPrunedAncestor error.
-// This may e.g. happen if
-// 1. Downloader rollbacks a batch of inserted blocks and exits
-// 2. Downloader starts to sync again
-// 3. The blocks fetched are all known and canonical blocks
-func TestSideImportPrunedBlocks(t *testing.T) {
- testSideImportPrunedBlocks(t, rawdb.HashScheme)
- testSideImportPrunedBlocks(t, rawdb.PathScheme)
-}
-
-func testSideImportPrunedBlocks(t *testing.T, scheme string) {
- // Generate a canonical chain to act as the main dataset
- engine := ethash.NewFaker()
- genesis := &Genesis{
- Config: params.TestChainConfig,
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- // Generate and import the canonical chain
- _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil)
-
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
- // In path-based trie database implementation, it will keep 128 diff + 1 disk
- // layers, totally 129 latest states available. In hash-based it's 128.
- states := TestTriesInMemory
- if scheme == rawdb.PathScheme {
- states = TestTriesInMemory + 1
- }
- lastPrunedIndex := len(blocks) - states - 1
- lastPrunedBlock := blocks[lastPrunedIndex]
-
- // Verify pruning of lastPrunedBlock
- if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
- t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64())
- }
- firstNonPrunedBlock := blocks[len(blocks)-states]
- // Verify firstNonPrunedBlock is not pruned
- if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
- t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
- }
- // Now re-import some old blocks
- blockToReimport := blocks[5:8]
- _, err = chain.InsertChain(blockToReimport)
- if err != nil {
- t.Errorf("Got error, %v", err)
- }
-}
-
-// TestDeleteCreateRevert tests a weird state transition corner case that we hit
-// while changing the internals of statedb. The workflow is that a contract is
-// self destructed, then in a followup transaction (but same block) it's created
-// again and the transaction reverted.
-//
-// The original statedb implementation flushed dirty objects to the tries after
-// each transaction, so this works ok. The rework accumulated writes in memory
-// first, but the journal wiped the entire state object on create-revert.
-func TestDeleteCreateRevert(t *testing.T) {
- testDeleteCreateRevert(t, rawdb.HashScheme)
- testDeleteCreateRevert(t, rawdb.PathScheme)
-}
-
-func testDeleteCreateRevert(t *testing.T, scheme string) {
- var (
- aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
- bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
- engine = ethash.NewFaker()
-
- // A sender who makes transactions, has some funds
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(100000000000000000)
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- // The address 0xAAAAA selfdestructs if called
- aa: {
- // Code needs to just selfdestruct
- Code: []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)},
- Nonce: 1,
- Balance: big.NewInt(0),
- },
- // The address 0xBBBB send 1 wei to 0xAAAA, then reverts
- bb: {
- Code: []byte{
- byte(vm.PC), // [0]
- byte(vm.DUP1), // [0,0]
- byte(vm.DUP1), // [0,0,0]
- byte(vm.DUP1), // [0,0,0,0]
- byte(vm.PUSH1), 0x01, // [0,0,0,0,1] (value)
- byte(vm.PUSH2), 0xaa, 0xaa, // [0,0,0,0,1, 0xaaaa]
- byte(vm.GAS),
- byte(vm.CALL),
- byte(vm.REVERT),
- },
- Balance: big.NewInt(1),
- },
- },
- }
- )
-
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
- // One transaction to AAAA
- tx, _ := types.SignTx(types.NewTransaction(0, aa,
- big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
- b.AddTx(tx)
- // One transaction to BBBB
- tx, _ = types.SignTx(types.NewTransaction(1, bb,
- big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
- b.AddTx(tx)
- })
- // Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
-}
-
-// TestDeleteRecreateSlots tests a state-transition that contains both deletion
-// and recreation of contract state.
-// Contract A exists, has slots 1 and 2 set
-// Tx 1: Selfdestruct A
-// Tx 2: Re-create A, set slots 3 and 4
-// Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
-// and then the new slots exist
-func TestDeleteRecreateSlots(t *testing.T) {
- testDeleteRecreateSlots(t, rawdb.HashScheme)
- testDeleteRecreateSlots(t, rawdb.PathScheme)
-}
-
-func testDeleteRecreateSlots(t *testing.T, scheme string) {
- var (
- engine = ethash.NewFaker()
-
- // A sender who makes transactions, has some funds
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000000000)
- bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
- aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
- aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
- )
- // Populate two slots
- aaStorage[common.HexToHash("01")] = common.HexToHash("01")
- aaStorage[common.HexToHash("02")] = common.HexToHash("02")
-
- // The bb-code needs to CREATE2 the aa contract. It consists of
- // both initcode and deployment code
- // initcode:
- // 1. Set slots 3=3, 4=4,
- // 2. Return aaCode
-
- initCode := []byte{
- byte(vm.PUSH1), 0x3, // value
- byte(vm.PUSH1), 0x3, // location
- byte(vm.SSTORE), // Set slot[3] = 3
- byte(vm.PUSH1), 0x4, // value
- byte(vm.PUSH1), 0x4, // location
- byte(vm.SSTORE), // Set slot[4] = 4
- // Slots are set, now return the code
- byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
- byte(vm.PUSH1), 0x0, // memory start on stack
- byte(vm.MSTORE),
- // Code is now in memory.
- byte(vm.PUSH1), 0x2, // size
- byte(vm.PUSH1), byte(32 - 2), // offset
- byte(vm.RETURN),
- }
- if l := len(initCode); l > 32 {
- t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
- }
- bbCode := []byte{
- // Push initcode onto stack
- byte(vm.PUSH1) + byte(len(initCode)-1)}
- bbCode = append(bbCode, initCode...)
- bbCode = append(bbCode, []byte{
- byte(vm.PUSH1), 0x0, // memory start on stack
- byte(vm.MSTORE),
- byte(vm.PUSH1), 0x00, // salt
- byte(vm.PUSH1), byte(len(initCode)), // size
- byte(vm.PUSH1), byte(32 - len(initCode)), // offset
- byte(vm.PUSH1), 0x00, // endowment
- byte(vm.CREATE2),
- }...)
-
- initHash := crypto.Keccak256Hash(initCode)
- aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
- t.Logf("Destination address: %x\n", aa)
-
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- // The address 0xAAAAA selfdestructs if called
- aa: {
- // Code needs to just selfdestruct
- Code: aaCode,
- Nonce: 1,
- Balance: big.NewInt(0),
- Storage: aaStorage,
- },
- // The contract BB recreates AA
- bb: {
- Code: bbCode,
- Balance: big.NewInt(1),
- },
- },
- }
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
- // One transaction to AA, to kill it
- tx, _ := types.SignTx(types.NewTransaction(0, aa,
- big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
- b.AddTx(tx)
- // One transaction to BB, to recreate AA
- tx, _ = types.SignTx(types.NewTransaction(1, bb,
- big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
- b.AddTx(tx)
- })
- // Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
- Tracer: logger.NewJSONLogger(nil, os.Stdout),
- }, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
- statedb, _ := chain.State()
-
- // If all is correct, then slot 1 and 2 are zero
- if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
- t.Errorf("got %x exp %x", got, exp)
- }
- if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
- t.Errorf("got %x exp %x", got, exp)
- }
- // Also, 3 and 4 should be set
- if got, exp := statedb.GetState(aa, common.HexToHash("03")), common.HexToHash("03"); got != exp {
- t.Fatalf("got %x exp %x", got, exp)
- }
- if got, exp := statedb.GetState(aa, common.HexToHash("04")), common.HexToHash("04"); got != exp {
- t.Fatalf("got %x exp %x", got, exp)
- }
-}
-
-// TestDeleteRecreateAccount tests a state-transition that contains deletion of a
-// contract with storage, and a recreate of the same contract via a
-// regular value-transfer
-// Expected outcome is that _all_ slots are cleared from A
-func TestDeleteRecreateAccount(t *testing.T) {
- testDeleteRecreateAccount(t, rawdb.HashScheme)
- testDeleteRecreateAccount(t, rawdb.PathScheme)
-}
-
-func testDeleteRecreateAccount(t *testing.T, scheme string) {
- var (
- engine = ethash.NewFaker()
-
- // A sender who makes transactions, has some funds
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000000000)
-
- aa = common.HexToAddress("0x7217d81b76bdd8707601e959454e3d776aee5f43")
- aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
- aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
- )
- // Populate two slots
- aaStorage[common.HexToHash("01")] = common.HexToHash("01")
- aaStorage[common.HexToHash("02")] = common.HexToHash("02")
-
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- // The address 0xAAAAA selfdestructs if called
- aa: {
- // Code needs to just selfdestruct
- Code: aaCode,
- Nonce: 1,
- Balance: big.NewInt(0),
- Storage: aaStorage,
- },
- },
- }
-
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
- // One transaction to AA, to kill it
- tx, _ := types.SignTx(types.NewTransaction(0, aa,
- big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
- b.AddTx(tx)
- // One transaction to AA, to recreate it (but without storage
- tx, _ = types.SignTx(types.NewTransaction(1, aa,
- big.NewInt(1), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
- b.AddTx(tx)
- })
- // Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
- Tracer: logger.NewJSONLogger(nil, os.Stdout),
- }, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
- statedb, _ := chain.State()
-
- // If all is correct, then both slots are zero
- if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
- t.Errorf("got %x exp %x", got, exp)
- }
- if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
- t.Errorf("got %x exp %x", got, exp)
- }
-}
-
-// TestDeleteRecreateSlotsAcrossManyBlocks tests multiple state-transition that contains both deletion
-// and recreation of contract state.
-// Contract A exists, has slots 1 and 2 set
-// Tx 1: Selfdestruct A
-// Tx 2: Re-create A, set slots 3 and 4
-// Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
-// and then the new slots exist
-func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
- testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.HashScheme)
- testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.PathScheme)
-}
-
-func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) {
- var (
- engine = ethash.NewFaker()
-
- // A sender who makes transactions, has some funds
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000000000)
- bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
- aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
- aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
- )
- // Populate two slots
- aaStorage[common.HexToHash("01")] = common.HexToHash("01")
- aaStorage[common.HexToHash("02")] = common.HexToHash("02")
-
- // The bb-code needs to CREATE2 the aa contract. It consists of
- // both initcode and deployment code
- // initcode:
- // 1. Set slots 3=blocknum+1, 4=4,
- // 2. Return aaCode
-
- initCode := []byte{
- byte(vm.PUSH1), 0x1, //
- byte(vm.NUMBER), // value = number + 1
- byte(vm.ADD), //
- byte(vm.PUSH1), 0x3, // location
- byte(vm.SSTORE), // Set slot[3] = number + 1
- byte(vm.PUSH1), 0x4, // value
- byte(vm.PUSH1), 0x4, // location
- byte(vm.SSTORE), // Set slot[4] = 4
- // Slots are set, now return the code
- byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
- byte(vm.PUSH1), 0x0, // memory start on stack
- byte(vm.MSTORE),
- // Code is now in memory.
- byte(vm.PUSH1), 0x2, // size
- byte(vm.PUSH1), byte(32 - 2), // offset
- byte(vm.RETURN),
- }
- if l := len(initCode); l > 32 {
- t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
- }
- bbCode := []byte{
- // Push initcode onto stack
- byte(vm.PUSH1) + byte(len(initCode)-1)}
- bbCode = append(bbCode, initCode...)
- bbCode = append(bbCode, []byte{
- byte(vm.PUSH1), 0x0, // memory start on stack
- byte(vm.MSTORE),
- byte(vm.PUSH1), 0x00, // salt
- byte(vm.PUSH1), byte(len(initCode)), // size
- byte(vm.PUSH1), byte(32 - len(initCode)), // offset
- byte(vm.PUSH1), 0x00, // endowment
- byte(vm.CREATE2),
- }...)
-
- initHash := crypto.Keccak256Hash(initCode)
- aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
- t.Logf("Destination address: %x\n", aa)
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- // The address 0xAAAAA selfdestructs if called
- aa: {
- // Code needs to just selfdestruct
- Code: aaCode,
- Nonce: 1,
- Balance: big.NewInt(0),
- Storage: aaStorage,
- },
- // The contract BB recreates AA
- bb: {
- Code: bbCode,
- Balance: big.NewInt(1),
- },
- },
- }
- var nonce uint64
-
- type expectation struct {
- exist bool
- blocknum int
- values map[int]int
- }
- var current = &expectation{
- exist: true, // exists in genesis
- blocknum: 0,
- values: map[int]int{1: 1, 2: 2},
- }
- var expectations []*expectation
- var newDestruct = func(e *expectation, b *BlockGen) *types.Transaction {
- tx, _ := types.SignTx(types.NewTransaction(nonce, aa,
- big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
- nonce++
- if e.exist {
- e.exist = false
- e.values = nil
- }
- //t.Logf("block %d; adding destruct\n", e.blocknum)
- return tx
- }
- var newResurrect = func(e *expectation, b *BlockGen) *types.Transaction {
- tx, _ := types.SignTx(types.NewTransaction(nonce, bb,
- big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
- nonce++
- if !e.exist {
- e.exist = true
- e.values = map[int]int{3: e.blocknum + 1, 4: 4}
- }
- //t.Logf("block %d; adding resurrect\n", e.blocknum)
- return tx
- }
-
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 150, func(i int, b *BlockGen) {
- var exp = new(expectation)
- exp.blocknum = i + 1
- exp.values = make(map[int]int)
- for k, v := range current.values {
- exp.values[k] = v
- }
- exp.exist = current.exist
-
- b.SetCoinbase(common.Address{1})
- if i%2 == 0 {
- b.AddTx(newDestruct(exp, b))
- }
- if i%3 == 0 {
- b.AddTx(newResurrect(exp, b))
- }
- if i%5 == 0 {
- b.AddTx(newDestruct(exp, b))
- }
- if i%7 == 0 {
- b.AddTx(newResurrect(exp, b))
- }
- expectations = append(expectations, exp)
- current = exp
- })
- // Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
- //Debug: true,
- //Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- var asHash = func(num int) common.Hash {
- return common.BytesToHash([]byte{byte(num)})
- }
- for i, block := range blocks {
- blockNum := i + 1
- if n, err := chain.InsertChain([]*types.Block{block}); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
- statedb, _ := chain.State()
- // If all is correct, then slot 1 and 2 are zero
- if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
- t.Errorf("block %d, got %x exp %x", blockNum, got, exp)
- }
- if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
- t.Errorf("block %d, got %x exp %x", blockNum, got, exp)
- }
- exp := expectations[i]
- if exp.exist {
- if !statedb.Exist(aa) {
- t.Fatalf("block %d, expected %v to exist, it did not", blockNum, aa)
- }
- for slot, val := range exp.values {
- if gotValue, expValue := statedb.GetState(aa, asHash(slot)), asHash(val); gotValue != expValue {
- t.Fatalf("block %d, slot %d, got %x exp %x", blockNum, slot, gotValue, expValue)
- }
- }
- } else {
- if statedb.Exist(aa) {
- t.Fatalf("block %d, expected %v to not exist, it did", blockNum, aa)
- }
- }
- }
-}
-
-// TestInitThenFailCreateContract tests a pretty notorious case that happened
-// on mainnet over blocks 7338108, 7338110 and 7338115.
-// - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated
-// with 0.001 ether (thus created but no code)
-// - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on
-// the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the
-// deployment fails due to OOG during initcode execution
-// - Block 7338115: another tx checks the balance of
-// e771789f5cccac282f23bb7add5690e1f6ca467c, and the snapshotter returned it as
-// zero.
-//
-// The problem being that the snapshotter maintains a destructset, and adds items
-// to the destructset in case something is created "onto" an existing item.
-// We need to either roll back the snapDestructs, or not place it into snapDestructs
-// in the first place.
-//
-
-func TestInitThenFailCreateContract(t *testing.T) {
- testInitThenFailCreateContract(t, rawdb.HashScheme)
- testInitThenFailCreateContract(t, rawdb.PathScheme)
-}
-
-func testInitThenFailCreateContract(t *testing.T, scheme string) {
- var (
- engine = ethash.NewFaker()
-
- // A sender who makes transactions, has some funds
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000000000)
- bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
- )
-
- // The bb-code needs to CREATE2 the aa contract. It consists of
- // both initcode and deployment code
- // initcode:
- // 1. If blocknum < 1, error out (e.g invalid opcode)
- // 2. else, return a snippet of code
- initCode := []byte{
- byte(vm.PUSH1), 0x1, // y (2)
- byte(vm.NUMBER), // x (number)
- byte(vm.GT), // x > y?
- byte(vm.PUSH1), byte(0x8),
- byte(vm.JUMPI), // jump to label if number > 2
- byte(0xFE), // illegal opcode
- byte(vm.JUMPDEST),
- byte(vm.PUSH1), 0x2, // size
- byte(vm.PUSH1), 0x0, // offset
- byte(vm.RETURN), // return 2 bytes of zero-code
- }
- if l := len(initCode); l > 32 {
- t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
- }
- bbCode := []byte{
- // Push initcode onto stack
- byte(vm.PUSH1) + byte(len(initCode)-1)}
- bbCode = append(bbCode, initCode...)
- bbCode = append(bbCode, []byte{
- byte(vm.PUSH1), 0x0, // memory start on stack
- byte(vm.MSTORE),
- byte(vm.PUSH1), 0x00, // salt
- byte(vm.PUSH1), byte(len(initCode)), // size
- byte(vm.PUSH1), byte(32 - len(initCode)), // offset
- byte(vm.PUSH1), 0x00, // endowment
- byte(vm.CREATE2),
- }...)
-
- initHash := crypto.Keccak256Hash(initCode)
- aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
- t.Logf("Destination address: %x\n", aa)
-
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- // The address aa has some funds
- aa: {Balance: big.NewInt(100000)},
- // The contract BB tries to create code onto AA
- bb: {
- Code: bbCode,
- Balance: big.NewInt(1),
- },
- },
- }
- nonce := uint64(0)
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 4, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
- // One transaction to BB
- tx, _ := types.SignTx(types.NewTransaction(nonce, bb,
- big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
- b.AddTx(tx)
- nonce++
- })
-
- // Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
- //Debug: true,
- //Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- statedb, _ := chain.State()
- if got, exp := statedb.GetBalance(aa), uint256.NewInt(100000); got.Cmp(exp) != 0 {
- t.Fatalf("Genesis err, got %v exp %v", got, exp)
- }
- // First block tries to create, but fails
- {
- block := blocks[0]
- if _, err := chain.InsertChain([]*types.Block{blocks[0]}); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
- }
- statedb, _ = chain.State()
- if got, exp := statedb.GetBalance(aa), uint256.NewInt(100000); got.Cmp(exp) != 0 {
- t.Fatalf("block %d: got %v exp %v", block.NumberU64(), got, exp)
- }
- }
- // Import the rest of the blocks
- for _, block := range blocks[1:] {
- if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
- }
- }
-}
-
-// TestEIP2718Transition* tests that an EIP-2718 transaction will be accepted
-// after the fork block has passed. This is verified by sending an EIP-2930
-// access list transaction, which specifies a single slot access, and then
-// checking that the gas usage of a hot SLOAD and a cold SLOAD are calculated
-// correctly.
-
-// TestEIP2718TransitionWithTestChainConfig tests EIP-2718 with TestChainConfig.
-func TestEIP2718TransitionWithTestChainConfig(t *testing.T) {
- testEIP2718TransitionWithConfig(t, rawdb.HashScheme, params.TestChainConfig)
- testEIP2718TransitionWithConfig(t, rawdb.HashScheme, params.TestChainConfig)
-}
-
-func preShanghaiConfig() *params.ChainConfig {
- config := *params.ParliaTestChainConfig
- config.ShanghaiTime = nil
- config.KeplerTime = nil
- config.FeynmanTime = nil
- config.FeynmanFixTime = nil
- config.CancunTime = nil
- return &config
-}
-
-// TestEIP2718TransitionWithParliaConfig tests EIP-2718 with Parlia Config.
-func TestEIP2718TransitionWithParliaConfig(t *testing.T) {
- testEIP2718TransitionWithConfig(t, rawdb.HashScheme, preShanghaiConfig())
- testEIP2718TransitionWithConfig(t, rawdb.PathScheme, preShanghaiConfig())
-}
-
-// testEIP2718TransitionWithConfig tests EIP02718 with given ChainConfig.
-func testEIP2718TransitionWithConfig(t *testing.T, scheme string, config *params.ChainConfig) {
- var (
- aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
- engine = ethash.NewFaker()
-
- // A sender who makes transactions, has some funds
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000000000)
- gspec = &Genesis{
- Config: config,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- // The address 0xAAAA sloads 0x00 and 0x01
- aa: {
- Code: []byte{
- byte(vm.PC),
- byte(vm.PC),
- byte(vm.SLOAD),
- byte(vm.SLOAD),
- },
- Nonce: 0,
- Balance: big.NewInt(0),
- },
- },
- }
- )
- // Generate blocks
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
-
- // One transaction to 0xAAAA
- signer := types.LatestSigner(gspec.Config)
- tx, _ := types.SignNewTx(key, signer, &types.AccessListTx{
- ChainID: gspec.Config.ChainID,
- Nonce: 0,
- To: &aa,
- Gas: 30000,
- GasPrice: b.header.BaseFee,
- AccessList: types.AccessList{{
- Address: aa,
- StorageKeys: []common.Hash{{0}},
- }},
- })
- b.AddTx(tx)
- })
-
- // Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
-
- block := chain.GetBlockByNumber(1)
-
- // Expected gas is intrinsic + 2 * pc + hot load + cold load, since only one load is in the access list
- expected := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas +
- vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929
- if block.GasUsed() != expected {
- t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expected, block.GasUsed())
- }
-}
-
-// TestEIP1559Transition tests the following:
-//
-// 1. A transaction whose gasFeeCap is greater than the baseFee is valid.
-// 2. Gas accounting for access lists on EIP-1559 transactions is correct.
-// 3. Only the transaction's tip will be received by the coinbase.
-// 4. The transaction sender pays for both the tip and baseFee.
-// 5. The coinbase receives only the partially realized tip when
-// gasFeeCap - gasTipCap < baseFee.
-// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap).
-func TestEIP1559Transition(t *testing.T) {
- testEIP1559Transition(t, rawdb.HashScheme)
- testEIP1559Transition(t, rawdb.PathScheme)
-}
-
-func testEIP1559Transition(t *testing.T, scheme string) {
- var (
- aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
- engine = ethash.NewFaker()
-
- // A sender who makes transactions, has some funds
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- addr2 = crypto.PubkeyToAddress(key2.PublicKey)
- funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether))
- config = *params.AllEthashProtocolChanges
- gspec = &Genesis{
- Config: &config,
- Alloc: types.GenesisAlloc{
- addr1: {Balance: funds},
- addr2: {Balance: funds},
- // The address 0xAAAA sloads 0x00 and 0x01
- aa: {
- Code: []byte{
- byte(vm.PC),
- byte(vm.PC),
- byte(vm.SLOAD),
- byte(vm.SLOAD),
- },
- Nonce: 0,
- Balance: big.NewInt(0),
- },
- },
- }
- )
-
- gspec.Config.BerlinBlock = common.Big0
- gspec.Config.LondonBlock = common.Big0
- signer := types.LatestSigner(gspec.Config)
-
- genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{1})
-
- // One transaction to 0xAAAA
- accesses := types.AccessList{types.AccessTuple{
- Address: aa,
- StorageKeys: []common.Hash{{0}},
- }}
-
- txdata := &types.DynamicFeeTx{
- ChainID: gspec.Config.ChainID,
- Nonce: 0,
- To: &aa,
- Gas: 30000,
- GasFeeCap: newGwei(5),
- GasTipCap: big.NewInt(2),
- AccessList: accesses,
- Data: []byte{},
- }
- tx := types.NewTx(txdata)
- tx, _ = types.SignTx(tx, signer, key1)
-
- b.AddTx(tx)
- })
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
-
- block := chain.GetBlockByNumber(1)
-
- // 1+2: Ensure EIP-1559 access lists are accounted for via gas usage.
- expectedGas := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas +
- vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929
- if block.GasUsed() != expectedGas {
- t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed())
- }
-
- state, _ := chain.State()
-
- // 3: Ensure that miner received only the tx's tip.
- actual := state.GetBalance(block.Coinbase()).ToBig()
- expected := new(big.Int).Add(
- new(big.Int).SetUint64(block.GasUsed()*block.Transactions()[0].GasTipCap().Uint64()),
- ethash.ConstantinopleBlockReward.ToBig(),
- )
- if actual.Cmp(expected) != 0 {
- t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
- }
-
- // 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
- actual = new(big.Int).Sub(funds, state.GetBalance(addr1).ToBig())
- expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
- if actual.Cmp(expected) != 0 {
- t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
- }
-
- blocks, _ = GenerateChain(gspec.Config, block, engine, genDb, 1, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{2})
-
- txdata := &types.LegacyTx{
- Nonce: 0,
- To: &aa,
- Gas: 30000,
- GasPrice: newGwei(5),
- }
- tx := types.NewTx(txdata)
- tx, _ = types.SignTx(tx, signer, key2)
-
- b.AddTx(tx)
- })
-
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
-
- block = chain.GetBlockByNumber(2)
- state, _ = chain.State()
- effectiveTip := block.Transactions()[0].GasTipCap().Uint64() - block.BaseFee().Uint64()
-
- // 6+5: Ensure that miner received only the tx's effective tip.
- actual = state.GetBalance(block.Coinbase()).ToBig()
- expected = new(big.Int).Add(
- new(big.Int).SetUint64(block.GasUsed()*effectiveTip),
- ethash.ConstantinopleBlockReward.ToBig(),
- )
- if actual.Cmp(expected) != 0 {
- t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
- }
-
- // 4: Ensure the tx sender paid for the gasUsed * (effectiveTip + block baseFee).
- actual = new(big.Int).Sub(funds, state.GetBalance(addr2).ToBig())
- expected = new(big.Int).SetUint64(block.GasUsed() * (effectiveTip + block.BaseFee().Uint64()))
- if actual.Cmp(expected) != 0 {
- t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
- }
-}
-
-// Tests the scenario the chain is requested to another point with the missing state.
-// It expects the state is recovered and all relevant chain markers are set correctly.
-func TestSetCanonical(t *testing.T) {
- testSetCanonical(t, rawdb.HashScheme)
- testSetCanonical(t, rawdb.PathScheme)
-}
-
-func testSetCanonical(t *testing.T, scheme string) {
- //log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
-
- var (
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(100000000000000000)
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{address: {Balance: funds}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- signer = types.LatestSigner(gspec.Config)
- engine = ethash.NewFaker()
- )
- // Generate and import the canonical chain
- _, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
- tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key)
- if err != nil {
- panic(err)
- }
- gen.AddTx(tx)
- })
- diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
- defer diskdb.Close()
-
- chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
-
- if n, err := chain.InsertChain(canon); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
-
- // Generate the side chain and import them
- _, side, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
- tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1), params.TxGas, gen.header.BaseFee, nil), signer, key)
- if err != nil {
- panic(err)
- }
- gen.AddTx(tx)
- })
- for _, block := range side {
- err := chain.InsertBlockWithoutSetHead(block)
- if err != nil {
- t.Fatalf("Failed to insert into chain: %v", err)
- }
- }
- for _, block := range side {
- got := chain.GetBlockByHash(block.Hash())
- if got == nil {
- t.Fatalf("Lost the inserted block")
- }
- }
-
- // Set the chain head to the side chain, ensure all the relevant markers are updated.
- verify := func(head *types.Block) {
- if chain.CurrentBlock().Hash() != head.Hash() {
- t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())
- }
- if chain.CurrentSnapBlock().Hash() != head.Hash() {
- t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentSnapBlock().Hash())
- }
- if chain.CurrentHeader().Hash() != head.Hash() {
- t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())
- }
- if !chain.HasState(head.Root()) {
- t.Fatalf("Lost block state %v %x", head.Number(), head.Hash())
- }
- }
- chain.SetCanonical(side[len(side)-1])
- verify(side[len(side)-1])
-
- // Reset the chain head to original chain
- chain.SetCanonical(canon[TriesInMemory-1])
- verify(canon[TriesInMemory-1])
-}
-
-// TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted
-// correctly in case reorg is called.
-func TestCanonicalHashMarker(t *testing.T) {
- testCanonicalHashMarker(t, rawdb.HashScheme)
- testCanonicalHashMarker(t, rawdb.PathScheme)
-}
-
-func testCanonicalHashMarker(t *testing.T, scheme string) {
- var cases = []struct {
- forkA int
- forkB int
- }{
- // ForkA: 10 blocks
- // ForkB: 1 blocks
- //
- // reorged:
- // markers [2, 10] should be deleted
- // markers [1] should be updated
- {10, 1},
-
- // ForkA: 10 blocks
- // ForkB: 2 blocks
- //
- // reorged:
- // markers [3, 10] should be deleted
- // markers [1, 2] should be updated
- {10, 2},
-
- // ForkA: 10 blocks
- // ForkB: 10 blocks
- //
- // reorged:
- // markers [1, 10] should be updated
- {10, 10},
-
- // ForkA: 10 blocks
- // ForkB: 11 blocks
- //
- // reorged:
- // markers [1, 11] should be updated
- {10, 11},
- }
- for _, c := range cases {
- var (
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- engine = ethash.NewFaker()
- )
- _, forkA, _ := GenerateChainWithGenesis(gspec, engine, c.forkA, func(i int, gen *BlockGen) {})
- _, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {})
-
- // Initialize test chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- // Insert forkA and forkB, the canonical should on forkA still
- if n, err := chain.InsertChain(forkA); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
- if n, err := chain.InsertChain(forkB); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
-
- verify := func(head *types.Block) {
- if chain.CurrentBlock().Hash() != head.Hash() {
- t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())
- }
- if chain.CurrentSnapBlock().Hash() != head.Hash() {
- t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentSnapBlock().Hash())
- }
- if chain.CurrentHeader().Hash() != head.Hash() {
- t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())
- }
- if !chain.HasState(head.Root()) {
- t.Fatalf("Lost block state %v %x", head.Number(), head.Hash())
- }
- }
-
- // Switch canonical chain to forkB if necessary
- if len(forkA) < len(forkB) {
- verify(forkB[len(forkB)-1])
- } else {
- verify(forkA[len(forkA)-1])
- chain.SetCanonical(forkB[len(forkB)-1])
- verify(forkB[len(forkB)-1])
- }
-
- // Ensure all hash markers are updated correctly
- for i := 0; i < len(forkB); i++ {
- block := forkB[i]
- hash := chain.GetCanonicalHash(block.NumberU64())
- if hash != block.Hash() {
- t.Fatalf("Unexpected canonical hash %d", block.NumberU64())
- }
- }
- if c.forkA > c.forkB {
- for i := uint64(c.forkB) + 1; i <= uint64(c.forkA); i++ {
- hash := chain.GetCanonicalHash(i)
- if hash != (common.Hash{}) {
- t.Fatalf("Unexpected canonical hash %d", i)
- }
- }
- }
- chain.Stop()
- }
-}
-
-func TestCreateThenDeletePreByzantium(t *testing.T) {
- // We use Ropsten chain config instead of Testchain config, this is
- // deliberate: we want to use pre-byz rules where we have intermediate state roots
- // between transactions.
- testCreateThenDelete(t, ¶ms.ChainConfig{
- ChainID: big.NewInt(3),
- HomesteadBlock: big.NewInt(0),
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(10),
- EIP158Block: big.NewInt(10),
- ByzantiumBlock: big.NewInt(1_700_000),
- })
-}
-func TestCreateThenDeletePostByzantium(t *testing.T) {
- testCreateThenDelete(t, params.TestChainConfig)
-}
-
-// testCreateThenDelete tests a creation and subsequent deletion of a contract, happening
-// within the same block.
-func testCreateThenDelete(t *testing.T, config *params.ChainConfig) {
- var (
- engine = ethash.NewFaker()
- // A sender who makes transactions, has some funds
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- destAddress = crypto.CreateAddress(address, 0)
- funds = big.NewInt(1000000000000000)
- )
-
- // runtime code is 0x60ffff : PUSH1 0xFF SELFDESTRUCT, a.k.a SELFDESTRUCT(0xFF)
- code := append([]byte{0x60, 0xff, 0xff}, make([]byte, 32-3)...)
- initCode := []byte{
- // SSTORE 1:1
- byte(vm.PUSH1), 0x1,
- byte(vm.PUSH1), 0x1,
- byte(vm.SSTORE),
- // Get the runtime-code on the stack
- byte(vm.PUSH32)}
- initCode = append(initCode, code...)
- initCode = append(initCode, []byte{
- byte(vm.PUSH1), 0x0, // offset
- byte(vm.MSTORE),
- byte(vm.PUSH1), 0x3, // size
- byte(vm.PUSH1), 0x0, // offset
- byte(vm.RETURN), // return 3 bytes of zero-code
- }...)
- gspec := &Genesis{
- Config: config,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- },
- }
- nonce := uint64(0)
- signer := types.HomesteadSigner{}
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2, func(i int, b *BlockGen) {
- fee := big.NewInt(1)
- if b.header.BaseFee != nil {
- fee = b.header.BaseFee
- }
- b.SetCoinbase(common.Address{1})
- tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
- Nonce: nonce,
- GasPrice: new(big.Int).Set(fee),
- Gas: 100000,
- Data: initCode,
- })
- nonce++
- b.AddTx(tx)
- tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
- Nonce: nonce,
- GasPrice: new(big.Int).Set(fee),
- Gas: 100000,
- To: &destAddress,
- })
- b.AddTx(tx)
- nonce++
- })
- // Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
- //Debug: true,
- //Tracer: logger.NewJSONLogger(nil, os.Stdout),
- }, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
- // Import the blocks
- for _, block := range blocks {
- if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
- }
- }
-}
-
-func TestDeleteThenCreate(t *testing.T) {
- var (
- engine = ethash.NewFaker()
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- factoryAddr = crypto.CreateAddress(address, 0)
- funds = big.NewInt(1000000000000000)
- )
- /*
- contract Factory {
- function deploy(bytes memory code) public {
- address addr;
- assembly {
- addr := create2(0, add(code, 0x20), mload(code), 0)
- if iszero(extcodesize(addr)) {
- revert(0, 0)
- }
- }
- }
- }
- */
- factoryBIN := common.Hex2Bytes("608060405234801561001057600080fd5b50610241806100206000396000f3fe608060405234801561001057600080fd5b506004361061002a5760003560e01c80627743601461002f575b600080fd5b610049600480360381019061004491906100d8565b61004b565b005b6000808251602084016000f59050803b61006457600080fd5b5050565b600061007b61007684610146565b610121565b905082815260208101848484011115610097576100966101eb565b5b6100a2848285610177565b509392505050565b600082601f8301126100bf576100be6101e6565b5b81356100cf848260208601610068565b91505092915050565b6000602082840312156100ee576100ed6101f5565b5b600082013567ffffffffffffffff81111561010c5761010b6101f0565b5b610118848285016100aa565b91505092915050565b600061012b61013c565b90506101378282610186565b919050565b6000604051905090565b600067ffffffffffffffff821115610161576101606101b7565b5b61016a826101fa565b9050602081019050919050565b82818337600083830152505050565b61018f826101fa565b810181811067ffffffffffffffff821117156101ae576101ad6101b7565b5b80604052505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f830116905091905056fea2646970667358221220ea8b35ed310d03b6b3deef166941140b4d9e90ea2c92f6b41eb441daf49a59c364736f6c63430008070033")
-
- /*
- contract C {
- uint256 value;
- constructor() {
- value = 100;
- }
- function destruct() public payable {
- selfdestruct(payable(msg.sender));
- }
- receive() payable external {}
- }
- */
- contractABI := common.Hex2Bytes("6080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c63430008070033")
- contractAddr := crypto.CreateAddress2(factoryAddr, [32]byte{}, crypto.Keccak256(contractABI))
-
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- },
- }
- nonce := uint64(0)
- signer := types.HomesteadSigner{}
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2, func(i int, b *BlockGen) {
- fee := big.NewInt(1)
- if b.header.BaseFee != nil {
- fee = b.header.BaseFee
- }
- b.SetCoinbase(common.Address{1})
-
- // Block 1
- if i == 0 {
- tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
- Nonce: nonce,
- GasPrice: new(big.Int).Set(fee),
- Gas: 500000,
- Data: factoryBIN,
- })
- nonce++
- b.AddTx(tx)
-
- data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000")
- tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
- Nonce: nonce,
- GasPrice: new(big.Int).Set(fee),
- Gas: 500000,
- To: &factoryAddr,
- Data: data,
- })
- b.AddTx(tx)
- nonce++
- } else {
- // Block 2
- tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
- Nonce: nonce,
- GasPrice: new(big.Int).Set(fee),
- Gas: 500000,
- To: &contractAddr,
- Data: common.Hex2Bytes("2b68b9c6"), // destruct
- })
- nonce++
- b.AddTx(tx)
-
- data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000")
- tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
- Nonce: nonce,
- GasPrice: new(big.Int).Set(fee),
- Gas: 500000,
- To: &factoryAddr, // re-creation
- Data: data,
- })
- b.AddTx(tx)
- nonce++
- }
- })
- // Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- for _, block := range blocks {
- if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
- }
- }
-}
-
-// TestTransientStorageReset ensures the transient storage is wiped correctly
-// between transactions.
-func TestTransientStorageReset(t *testing.T) {
- var (
- engine = ethash.NewFaker()
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- destAddress = crypto.CreateAddress(address, 0)
- funds = big.NewInt(1000000000000000)
- vmConfig = vm.Config{
- ExtraEips: []int{1153}, // Enable transient storage EIP
- }
- )
- code := append([]byte{
- // TLoad value with location 1
- byte(vm.PUSH1), 0x1,
- byte(vm.TLOAD),
-
- // PUSH location
- byte(vm.PUSH1), 0x1,
-
- // SStore location:value
- byte(vm.SSTORE),
- }, make([]byte, 32-6)...)
- initCode := []byte{
- // TSTORE 1:1
- byte(vm.PUSH1), 0x1,
- byte(vm.PUSH1), 0x1,
- byte(vm.TSTORE),
-
- // Get the runtime-code on the stack
- byte(vm.PUSH32)}
- initCode = append(initCode, code...)
- initCode = append(initCode, []byte{
- byte(vm.PUSH1), 0x0, // offset
- byte(vm.MSTORE),
- byte(vm.PUSH1), 0x6, // size
- byte(vm.PUSH1), 0x0, // offset
- byte(vm.RETURN), // return 6 bytes of zero-code
- }...)
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- },
- }
- nonce := uint64(0)
- signer := types.HomesteadSigner{}
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
- fee := big.NewInt(1)
- if b.header.BaseFee != nil {
- fee = b.header.BaseFee
- }
- b.SetCoinbase(common.Address{1})
- tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
- Nonce: nonce,
- GasPrice: new(big.Int).Set(fee),
- Gas: 100000,
- Data: initCode,
- })
- nonce++
- b.AddTxWithVMConfig(tx, vmConfig)
-
- tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
- Nonce: nonce,
- GasPrice: new(big.Int).Set(fee),
- Gas: 100000,
- To: &destAddress,
- })
- b.AddTxWithVMConfig(tx, vmConfig)
- nonce++
- })
-
- // Initialize the blockchain with 1153 enabled.
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vmConfig, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
- // Import the blocks
- if _, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("failed to insert into chain: %v", err)
- }
- // Check the storage
- state, err := chain.StateAt(chain.CurrentHeader().Root)
- if err != nil {
- t.Fatalf("Failed to load state %v", err)
- }
- loc := common.BytesToHash([]byte{1})
- slot := state.GetState(destAddress, loc)
- if slot != (common.Hash{}) {
- t.Fatalf("Unexpected dirty storage slot")
- }
-}
-
-func TestEIP3651(t *testing.T) {
- var (
- aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
- bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
- engine = beacon.NewFaker()
-
- // A sender who makes transactions, has some funds
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- addr2 = crypto.PubkeyToAddress(key2.PublicKey)
- funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether))
- config = *params.AllEthashProtocolChanges
- gspec = &Genesis{
- Config: &config,
- Alloc: types.GenesisAlloc{
- addr1: {Balance: funds},
- addr2: {Balance: funds},
- // The address 0xAAAA sloads 0x00 and 0x01
- aa: {
- Code: []byte{
- byte(vm.PC),
- byte(vm.PC),
- byte(vm.SLOAD),
- byte(vm.SLOAD),
- },
- Nonce: 0,
- Balance: big.NewInt(0),
- },
- // The address 0xBBBB calls 0xAAAA
- bb: {
- Code: []byte{
- byte(vm.PUSH1), 0, // out size
- byte(vm.DUP1), // out offset
- byte(vm.DUP1), // out insize
- byte(vm.DUP1), // in offset
- byte(vm.PUSH2), // address
- byte(0xaa),
- byte(0xaa),
- byte(vm.GAS), // gas
- byte(vm.DELEGATECALL),
- },
- Nonce: 0,
- Balance: big.NewInt(0),
- },
- },
- }
- )
-
- gspec.Config.BerlinBlock = common.Big0
- gspec.Config.LondonBlock = common.Big0
- gspec.Config.TerminalTotalDifficulty = common.Big0
- gspec.Config.TerminalTotalDifficultyPassed = true
- gspec.Config.ShanghaiTime = u64(0)
- signer := types.LatestSigner(gspec.Config)
-
- _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
- b.SetCoinbase(aa)
- // One transaction to Coinbase
- txdata := &types.DynamicFeeTx{
- ChainID: gspec.Config.ChainID,
- Nonce: 0,
- To: &bb,
- Gas: 500000,
- GasFeeCap: newGwei(5),
- GasTipCap: big.NewInt(2),
- AccessList: nil,
- Data: []byte{},
- }
- tx := types.NewTx(txdata)
- tx, _ = types.SignTx(tx, signer, key1)
-
- b.AddTx(tx)
- })
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, nil, nil)
- if err != nil {
- t.Fatalf("failed to create tester chain: %v", err)
- }
- defer chain.Stop()
- if n, err := chain.InsertChain(blocks); err != nil {
- t.Fatalf("block %d: failed to insert into chain: %v", n, err)
- }
-
- block := chain.GetBlockByNumber(1)
-
- // 1+2: Ensure EIP-1559 access lists are accounted for via gas usage.
- innerGas := vm.GasQuickStep*2 + params.ColdSloadCostEIP2929*2
- expectedGas := params.TxGas + 5*vm.GasFastestStep + vm.GasQuickStep + 100 + innerGas // 100 because 0xaaaa is in access list
- if block.GasUsed() != expectedGas {
- t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed())
- }
-
- state, _ := chain.State()
-
- // 3: Ensure that miner received only the tx's tip.
- actual := state.GetBalance(block.Coinbase()).ToBig()
- expected := new(big.Int).SetUint64(block.GasUsed() * block.Transactions()[0].GasTipCap().Uint64())
- if actual.Cmp(expected) != 0 {
- t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
- }
-
- // 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
- actual = new(big.Int).Sub(funds, state.GetBalance(addr1).ToBig())
- expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
- if actual.Cmp(expected) != 0 {
- t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
- }
-}
-
-type mockParlia struct {
- consensus.Engine
-}
-
-func (c *mockParlia) Author(header *types.Header) (common.Address, error) {
- return header.Coinbase, nil
-}
-
-func (c *mockParlia) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
- return nil
-}
-
-func (c *mockParlia) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
- return nil
-}
-
-func (c *mockParlia) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
- abort := make(chan<- struct{})
- results := make(chan error, len(headers))
- for i := 0; i < len(headers); i++ {
- results <- nil
- }
- return abort, results
-}
-
-func (c *mockParlia) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, _ *[]*types.Transaction, uncles []*types.Header, withdrawals []*types.Withdrawal,
- _ *[]*types.Receipt, _ *[]*types.Transaction, _ *uint64) (err error) {
- return
-}
-
-func (c *mockParlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
- uncles []*types.Header, receipts []*types.Receipt, withdrawals []*types.Withdrawal) (*types.Block, []*types.Receipt, error) {
- // Finalize block
- c.Finalize(chain, header, state, &txs, uncles, nil, nil, nil, nil)
-
- // Assign the final state root to header.
- header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
-
- // Header seems complete, assemble into a block and return
- return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), receipts, nil
-}
-
-func (c *mockParlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
- return big.NewInt(1)
-}
-
-func TestParliaBlobFeeReward(t *testing.T) {
- // Have N headers in the freezer
- frdir := t.TempDir()
- db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false)
- if err != nil {
- t.Fatalf("failed to create database with ancient backend")
- }
- config := params.ParliaTestChainConfig
- gspec := &Genesis{
- Config: config,
- Alloc: types.GenesisAlloc{testAddr: {Balance: new(big.Int).SetUint64(10 * params.Ether)}},
- }
- engine := &mockParlia{}
- chain, _ := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
- signer := types.LatestSigner(config)
-
- _, bs, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, gen *BlockGen) {
- tx, _ := makeMockTx(config, signer, testKey, gen.TxNonce(testAddr), gen.BaseFee().Uint64(), eip4844.CalcBlobFee(gen.ExcessBlobGas()).Uint64(), false)
- gen.AddTxWithChain(chain, tx)
- tx, sidecar := makeMockTx(config, signer, testKey, gen.TxNonce(testAddr), gen.BaseFee().Uint64(), eip4844.CalcBlobFee(gen.ExcessBlobGas()).Uint64(), true)
- gen.AddTxWithChain(chain, tx)
- gen.AddBlobSidecar(&types.BlobSidecar{
- BlobTxSidecar: *sidecar,
- TxIndex: 1,
- TxHash: tx.Hash(),
- })
- })
- if _, err := chain.InsertChain(bs); err != nil {
- panic(err)
- }
-
- stateDB, err := chain.State()
- if err != nil {
- panic(err)
- }
- expect := new(big.Int)
- for _, block := range bs {
- receipts := chain.GetReceiptsByHash(block.Hash())
- for _, receipt := range receipts {
- if receipt.BlobGasPrice != nil {
- blob := receipt.BlobGasPrice.Mul(receipt.BlobGasPrice, new(big.Int).SetUint64(receipt.BlobGasUsed))
- expect.Add(expect, blob)
- }
- plain := receipt.EffectiveGasPrice.Mul(receipt.EffectiveGasPrice, new(big.Int).SetUint64(receipt.GasUsed))
- expect.Add(expect, plain)
- }
- }
- actual := stateDB.GetBalance(params.SystemAddress)
- require.Equal(t, expect.Uint64(), actual.Uint64())
-}
-
-func makeMockTx(config *params.ChainConfig, signer types.Signer, key *ecdsa.PrivateKey, nonce uint64, baseFee uint64, blobBaseFee uint64, isBlobTx bool) (*types.Transaction, *types.BlobTxSidecar) {
- if !isBlobTx {
- raw := &types.DynamicFeeTx{
- ChainID: config.ChainID,
- Nonce: nonce,
- GasTipCap: big.NewInt(10),
- GasFeeCap: new(big.Int).SetUint64(baseFee + 10),
- Gas: params.TxGas,
- To: &common.Address{0x00},
- Value: big.NewInt(0),
- }
- tx, _ := types.SignTx(types.NewTx(raw), signer, key)
- return tx, nil
- }
- sidecar := &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob, emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof},
- }
- raw := &types.BlobTx{
- ChainID: uint256.MustFromBig(config.ChainID),
- Nonce: nonce,
- GasTipCap: uint256.NewInt(10),
- GasFeeCap: uint256.NewInt(baseFee + 10),
- Gas: params.TxGas,
- To: common.Address{0x00},
- Value: uint256.NewInt(0),
- BlobFeeCap: uint256.NewInt(blobBaseFee),
- BlobHashes: sidecar.BlobHashes(),
- }
- tx, _ := types.SignTx(types.NewTx(raw), signer, key)
- return tx, sidecar
-}
diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go
deleted file mode 100644
index f099609015..0000000000
--- a/core/chain_indexer_test.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2017 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "context"
- "errors"
- "fmt"
- "math/big"
- "math/rand"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
-)
-
-// Runs multiple tests with randomized parameters.
-func TestChainIndexerSingle(t *testing.T) {
- for i := 0; i < 10; i++ {
- testChainIndexer(t, 1)
- }
-}
-
-// Runs multiple tests with randomized parameters and different number of
-// chain backends.
-func TestChainIndexerWithChildren(t *testing.T) {
- for i := 2; i < 8; i++ {
- testChainIndexer(t, i)
- }
-}
-
-// testChainIndexer runs a test with either a single chain indexer or a chain of
-// multiple backends. The section size and required confirmation count parameters
-// are randomized.
-func testChainIndexer(t *testing.T, count int) {
- db := rawdb.NewMemoryDatabase()
- defer db.Close()
-
- // Create a chain of indexers and ensure they all report empty
- backends := make([]*testChainIndexBackend, count)
- for i := 0; i < count; i++ {
- var (
- sectionSize = uint64(rand.Intn(100) + 1)
- confirmsReq = uint64(rand.Intn(10))
- )
- backends[i] = &testChainIndexBackend{t: t, processCh: make(chan uint64)}
- backends[i].indexer = NewChainIndexer(db, rawdb.NewTable(db, string([]byte{byte(i)})), backends[i], sectionSize, confirmsReq, 0, fmt.Sprintf("indexer-%d", i))
-
- if sections, _, _ := backends[i].indexer.Sections(); sections != 0 {
- t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, 0)
- }
- if i > 0 {
- backends[i-1].indexer.AddChildIndexer(backends[i].indexer)
- }
- }
- defer backends[0].indexer.Close() // parent indexer shuts down children
- // notify pings the root indexer about a new head or reorg, then expect
- // processed blocks if a section is processable
- notify := func(headNum, failNum uint64, reorg bool) {
- backends[0].indexer.newHead(headNum, reorg)
- if reorg {
- for _, backend := range backends {
- headNum = backend.reorg(headNum)
- backend.assertSections()
- }
- return
- }
- var cascade bool
- for _, backend := range backends {
- headNum, cascade = backend.assertBlocks(headNum, failNum)
- if !cascade {
- break
- }
- backend.assertSections()
- }
- }
- // inject inserts a new random canonical header into the database directly
- inject := func(number uint64) {
- header := &types.Header{Number: big.NewInt(int64(number)), Extra: big.NewInt(rand.Int63()).Bytes()}
- if number > 0 {
- header.ParentHash = rawdb.ReadCanonicalHash(db, number-1)
- }
- rawdb.WriteHeader(db, header)
- rawdb.WriteCanonicalHash(db, header.Hash(), number)
- }
- // Start indexer with an already existing chain
- for i := uint64(0); i <= 100; i++ {
- inject(i)
- }
- notify(100, 100, false)
-
- // Add new blocks one by one
- for i := uint64(101); i <= 1000; i++ {
- inject(i)
- notify(i, i, false)
- }
- // Do a reorg
- notify(500, 500, true)
-
- // Create new fork
- for i := uint64(501); i <= 1000; i++ {
- inject(i)
- notify(i, i, false)
- }
- for i := uint64(1001); i <= 1500; i++ {
- inject(i)
- }
- // Failed processing scenario where less blocks are available than notified
- notify(2000, 1500, false)
-
- // Notify about a reorg (which could have caused the missing blocks if happened during processing)
- notify(1500, 1500, true)
-
- // Create new fork
- for i := uint64(1501); i <= 2000; i++ {
- inject(i)
- notify(i, i, false)
- }
-}
-
-// testChainIndexBackend implements ChainIndexerBackend
-type testChainIndexBackend struct {
- t *testing.T
- indexer *ChainIndexer
- section, headerCnt, stored uint64
- processCh chan uint64
-}
-
-// assertSections verifies if a chain indexer has the correct number of section.
-func (b *testChainIndexBackend) assertSections() {
- // Keep trying for 3 seconds if it does not match
- var sections uint64
- for i := 0; i < 300; i++ {
- sections, _, _ = b.indexer.Sections()
- if sections == b.stored {
- return
- }
- time.Sleep(10 * time.Millisecond)
- }
- b.t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, b.stored)
-}
-
-// assertBlocks expects processing calls after new blocks have arrived. If the
-// failNum < headNum then we are simulating a scenario where a reorg has happened
-// after the processing has started and the processing of a section fails.
-func (b *testChainIndexBackend) assertBlocks(headNum, failNum uint64) (uint64, bool) {
- var sections uint64
- if headNum >= b.indexer.confirmsReq {
- sections = (headNum + 1 - b.indexer.confirmsReq) / b.indexer.sectionSize
- if sections > b.stored {
- // expect processed blocks
- for expectd := b.stored * b.indexer.sectionSize; expectd < sections*b.indexer.sectionSize; expectd++ {
- if expectd > failNum {
- // rolled back after processing started, no more process calls expected
- // wait until updating is done to make sure that processing actually fails
- var updating bool
- for i := 0; i < 300; i++ {
- b.indexer.lock.Lock()
- updating = b.indexer.knownSections > b.indexer.storedSections
- b.indexer.lock.Unlock()
- if !updating {
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- if updating {
- b.t.Fatalf("update did not finish")
- }
- sections = expectd / b.indexer.sectionSize
- break
- }
- select {
- case <-time.After(10 * time.Second):
- b.t.Fatalf("Expected processed block #%d, got nothing", expectd)
- case processed := <-b.processCh:
- if processed != expectd {
- b.t.Errorf("Expected processed block #%d, got #%d", expectd, processed)
- }
- }
- }
- b.stored = sections
- }
- }
- if b.stored == 0 {
- return 0, false
- }
- return b.stored*b.indexer.sectionSize - 1, true
-}
-
-func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
- firstChanged := (headNum + 1) / b.indexer.sectionSize
- if firstChanged < b.stored {
- b.stored = firstChanged
- }
- return b.stored * b.indexer.sectionSize
-}
-
-func (b *testChainIndexBackend) Reset(ctx context.Context, section uint64, prevHead common.Hash) error {
- b.section = section
- b.headerCnt = 0
- return nil
-}
-
-func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Header) error {
- b.headerCnt++
- if b.headerCnt > b.indexer.sectionSize {
- b.t.Error("Processing too many headers")
- }
- //t.processCh <- header.Number.Uint64()
- select {
- case <-time.After(10 * time.Second):
- b.t.Error("Unexpected call to Process")
- // Can't use Fatal since this is not the test's goroutine.
- // Returning error stops the chainIndexer's updateLoop
- return errors.New("Unexpected call to Process")
- case b.processCh <- header.Number.Uint64():
- }
- return nil
-}
-
-func (b *testChainIndexBackend) Commit() error {
- if b.headerCnt != b.indexer.sectionSize {
- b.t.Error("Not enough headers processed")
- }
- return nil
-}
-
-func (b *testChainIndexBackend) Prune(threshold uint64) error {
- return nil
-}
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
deleted file mode 100644
index a2ec9e6507..0000000000
--- a/core/chain_makers_test.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "fmt"
- "math/big"
- "reflect"
- "testing"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/beacon"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/triedb"
-)
-
-func TestGeneratePOSChain(t *testing.T) {
- var (
- keyHex = "9c647b8b7c4e7c3490668fb6c11473619db80c93704c70893d3813af4090c39c"
- key, _ = crypto.HexToECDSA(keyHex)
- address = crypto.PubkeyToAddress(key.PublicKey) // 658bdf435d810c91414ec09147daa6db62406379
- aa = common.Address{0xaa}
- bb = common.Address{0xbb}
- funds = big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(params.Ether))
- config = *params.AllEthashProtocolChanges
- asm4788 = common.Hex2Bytes("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500")
- gspec = &Genesis{
- Config: &config,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- params.BeaconRootsAddress: {Balance: common.Big0, Code: asm4788},
- },
- BaseFee: big.NewInt(params.InitialBaseFee),
- Difficulty: common.Big1,
- GasLimit: 5_000_000,
- }
- gendb = rawdb.NewMemoryDatabase()
- db = rawdb.NewMemoryDatabase()
- )
-
- config.TerminalTotalDifficultyPassed = true
- config.TerminalTotalDifficulty = common.Big0
- config.ShanghaiTime = u64(0)
- config.CancunTime = u64(0)
-
- // init 0xaa with some storage elements
- storage := make(map[common.Hash]common.Hash)
- storage[common.Hash{0x00}] = common.Hash{0x00}
- storage[common.Hash{0x01}] = common.Hash{0x01}
- storage[common.Hash{0x02}] = common.Hash{0x02}
- storage[common.Hash{0x03}] = common.HexToHash("0303")
- gspec.Alloc[aa] = types.Account{
- Balance: common.Big1,
- Nonce: 1,
- Storage: storage,
- Code: common.Hex2Bytes("6042"),
- }
- gspec.Alloc[bb] = types.Account{
- Balance: common.Big2,
- Nonce: 1,
- Storage: storage,
- Code: common.Hex2Bytes("600154600354"),
- }
- genesis := gspec.MustCommit(gendb, triedb.NewDatabase(gendb, triedb.HashDefaults))
-
- genchain, genreceipts := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) {
- gen.SetParentBeaconRoot(common.Hash{byte(i + 1)})
-
- // Add value transfer tx.
- tx := types.MustSignNewTx(key, gen.Signer(), &types.LegacyTx{
- Nonce: gen.TxNonce(address),
- To: &address,
- Value: big.NewInt(1000),
- Gas: params.TxGas,
- GasPrice: new(big.Int).Add(gen.BaseFee(), common.Big1),
- })
- gen.AddTx(tx)
-
- // Add withdrawals.
- if i == 1 {
- gen.AddWithdrawal(&types.Withdrawal{
- Validator: 42,
- Address: common.Address{0xee},
- Amount: 1337,
- })
- gen.AddWithdrawal(&types.Withdrawal{
- Validator: 13,
- Address: common.Address{0xee},
- Amount: 1,
- })
- }
- if i == 3 {
- gen.AddWithdrawal(&types.Withdrawal{
- Validator: 42,
- Address: common.Address{0xee},
- Amount: 1337,
- })
- gen.AddWithdrawal(&types.Withdrawal{
- Validator: 13,
- Address: common.Address{0xee},
- Amount: 1,
- })
- }
- })
-
- // Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{}, nil, nil)
- defer blockchain.Stop()
-
- if i, err := blockchain.InsertChain(genchain); err != nil {
- t.Fatalf("insert error (block %d): %v\n", genchain[i].NumberU64(), err)
- }
-
- // enforce that withdrawal indexes are monotonically increasing from 0
- var (
- withdrawalIndex uint64
- )
- for i := range genchain {
- blocknum := genchain[i].NumberU64()
- block := blockchain.GetBlockByNumber(blocknum)
- if block == nil {
- t.Fatalf("block %d not found", blocknum)
- }
-
- // Verify receipts.
- genBlockReceipts := genreceipts[i]
- for _, r := range genBlockReceipts {
- if r.BlockNumber.Cmp(block.Number()) != 0 {
- t.Errorf("receipt has wrong block number %d, want %d", r.BlockNumber, block.Number())
- }
- if r.BlockHash != block.Hash() {
- t.Errorf("receipt has wrong block hash %v, want %v", r.BlockHash, block.Hash())
- }
-
- // patch up empty logs list to make DeepEqual below work
- if r.Logs == nil {
- r.Logs = []*types.Log{}
- }
- }
- blockchainReceipts := blockchain.GetReceiptsByHash(block.Hash())
- if !reflect.DeepEqual(genBlockReceipts, blockchainReceipts) {
- t.Fatalf("receipts mismatch\ngenerated: %s\nblockchain: %s", spew.Sdump(genBlockReceipts), spew.Sdump(blockchainReceipts))
- }
-
- // Verify withdrawals.
- if len(block.Withdrawals()) == 0 {
- continue
- }
- for j := 0; j < len(block.Withdrawals()); j++ {
- if block.Withdrawals()[j].Index != withdrawalIndex {
- t.Fatalf("withdrawal index %d does not equal expected index %d", block.Withdrawals()[j].Index, withdrawalIndex)
- }
- withdrawalIndex += 1
- }
-
- // Verify parent beacon root.
- want := common.Hash{byte(blocknum)}
- if got := block.BeaconRoot(); *got != want {
- t.Fatalf("block %d, wrong parent beacon root: got %s, want %s", i, got, want)
- }
- state, _ := blockchain.State()
- idx := block.Time()%8191 + 8191
- got := state.GetState(params.BeaconRootsAddress, common.BigToHash(new(big.Int).SetUint64(idx)))
- if got != want {
- t.Fatalf("block %d, wrong parent beacon root in state: got %s, want %s", i, got, want)
- }
- }
-}
-
-func ExampleGenerateChain() {
- var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
- key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- addr2 = crypto.PubkeyToAddress(key2.PublicKey)
- addr3 = crypto.PubkeyToAddress(key3.PublicKey)
- db = rawdb.NewMemoryDatabase()
- genDb = rawdb.NewMemoryDatabase()
- )
-
- // Ensure that key1 has some funds in the genesis block.
- gspec := &Genesis{
- Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)},
- Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
- }
- genesis := gspec.MustCommit(genDb, triedb.NewDatabase(genDb, triedb.HashDefaults))
-
- // This call generates a chain of 5 blocks. The function runs for
- // each block and adds different features to gen based on the
- // block index.
- signer := types.HomesteadSigner{}
- chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), genDb, 5, func(i int, gen *BlockGen) {
- switch i {
- case 0:
- // In block 1, addr1 sends addr2 some ether.
- tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1)
- gen.AddTx(tx)
- case 1:
- // In block 2, addr1 sends some more ether to addr2.
- // addr2 passes it on to addr3.
- tx1, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(1000), params.TxGas, nil, nil), signer, key1)
- tx2, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr3, big.NewInt(1000), params.TxGas, nil, nil), signer, key2)
- gen.AddTx(tx1)
- gen.AddTx(tx2)
- case 2:
- // Block 3 is empty but was mined by addr3.
- gen.SetCoinbase(addr3)
- gen.SetExtra([]byte("yeehaw"))
- case 3:
- // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
- b2 := gen.PrevBlock(1).Header()
- b2.Extra = []byte("foo")
- gen.AddUncle(b2)
- b3 := gen.PrevBlock(2).Header()
- b3.Extra = []byte("foo")
- gen.AddUncle(b3)
- }
- })
-
- // Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer blockchain.Stop()
-
- if i, err := blockchain.InsertChain(chain); err != nil {
- fmt.Printf("insert error (block %d): %v\n", chain[i].NumberU64(), err)
- return
- }
-
- state, _ := blockchain.State()
- fmt.Printf("last block: #%d\n", blockchain.CurrentBlock().Number)
- fmt.Println("balance of addr1:", state.GetBalance(addr1))
- fmt.Println("balance of addr2:", state.GetBalance(addr2))
- fmt.Println("balance of addr3:", state.GetBalance(addr3))
- // Output:
- // last block: #5
- // balance of addr1: 989000
- // balance of addr2: 10000
- // balance of addr3: 19687500000000001000
-}
diff --git a/core/dao_test.go b/core/dao_test.go
deleted file mode 100644
index b9a899ef2f..0000000000
--- a/core/dao_test.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/params"
-)
-
-// Tests that DAO-fork enabled clients can properly filter out fork-commencing
-// blocks based on their extradata fields.
-func TestDAOForkRangeExtradata(t *testing.T) {
- forkBlock := big.NewInt(32)
- chainConfig := *params.NonActivatedConfig
- chainConfig.HomesteadBlock = big.NewInt(0)
-
- // Generate a common prefix for both pro-forkers and non-forkers
- gspec := &Genesis{
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: &chainConfig,
- }
- genDb, prefix, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(forkBlock.Int64()-1), func(i int, gen *BlockGen) {})
-
- // Create the concurrent, conflicting two nodes
- proDb := rawdb.NewMemoryDatabase()
- proConf := *params.NonActivatedConfig
- proConf.HomesteadBlock = big.NewInt(0)
- proConf.DAOForkBlock = forkBlock
- proConf.DAOForkSupport = true
- progspec := &Genesis{
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: &proConf,
- }
- proBc, _ := NewBlockChain(proDb, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer proBc.Stop()
-
- conDb := rawdb.NewMemoryDatabase()
- conConf := *params.NonActivatedConfig
- conConf.HomesteadBlock = big.NewInt(0)
- conConf.DAOForkBlock = forkBlock
- conConf.DAOForkSupport = false
- congspec := &Genesis{
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: &conConf,
- }
- conBc, _ := NewBlockChain(conDb, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer conBc.Stop()
-
- if _, err := proBc.InsertChain(prefix); err != nil {
- t.Fatalf("pro-fork: failed to import chain prefix: %v", err)
- }
- if _, err := conBc.InsertChain(prefix); err != nil {
- t.Fatalf("con-fork: failed to import chain prefix: %v", err)
- }
- // Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks
- for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ {
- // Create a pro-fork block, and try to feed into the no-fork chain
- bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
-
- blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64()))
- for j := 0; j < len(blocks)/2; j++ {
- blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
- }
- if _, err := bc.InsertChain(blocks); err != nil {
- t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
- }
- if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil {
- t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
- }
- bc.Stop()
- blocks, _ = GenerateChain(&proConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
- if _, err := conBc.InsertChain(blocks); err == nil {
- t.Fatalf("contra-fork chain accepted pro-fork block: %v", blocks[0])
- }
- // Create a proper no-fork block for the contra-forker
- blocks, _ = GenerateChain(&conConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
- if _, err := conBc.InsertChain(blocks); err != nil {
- t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err)
- }
- // Create a no-fork block, and try to feed into the pro-fork chain
- bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
-
- blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64()))
- for j := 0; j < len(blocks)/2; j++ {
- blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
- }
- if _, err := bc.InsertChain(blocks); err != nil {
- t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
- }
- if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil {
- t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
- }
- bc.Stop()
- blocks, _ = GenerateChain(&conConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
- if _, err := proBc.InsertChain(blocks); err == nil {
- t.Fatalf("pro-fork chain accepted contra-fork block: %v", blocks[0])
- }
- // Create a proper pro-fork block for the pro-forker
- blocks, _ = GenerateChain(&proConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
- if _, err := proBc.InsertChain(blocks); err != nil {
- t.Fatalf("pro-fork chain didn't accepted pro-fork block: %v", err)
- }
- }
- // Verify that contra-forkers accept pro-fork extra-datas after forking finishes
- bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer bc.Stop()
-
- blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64()))
- for j := 0; j < len(blocks)/2; j++ {
- blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
- }
- if _, err := bc.InsertChain(blocks); err != nil {
- t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
- }
- if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil {
- t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
- }
- blocks, _ = GenerateChain(&proConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
- if _, err := conBc.InsertChain(blocks); err != nil {
- t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err)
- }
- // Verify that pro-forkers accept contra-fork extra-datas after forking finishes
- bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer bc.Stop()
-
- blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64()))
- for j := 0; j < len(blocks)/2; j++ {
- blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
- }
- if _, err := bc.InsertChain(blocks); err != nil {
- t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
- }
- if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil {
- t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
- }
- blocks, _ = GenerateChain(&conConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
- if _, err := proBc.InsertChain(blocks); err != nil {
- t.Fatalf("pro-fork chain didn't accept contra-fork block post-fork: %v", err)
- }
-}
diff --git a/core/data_availability_test.go b/core/data_availability_test.go
deleted file mode 100644
index 2269aee232..0000000000
--- a/core/data_availability_test.go
+++ /dev/null
@@ -1,436 +0,0 @@
-package core
-
-import (
- "crypto/rand"
- "math/big"
- "testing"
-
- "github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
- gokzg4844 "github.com/crate-crypto/go-kzg-4844"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/kzg4844"
- "github.com/ethereum/go-ethereum/params"
- "github.com/holiman/uint256"
- "github.com/stretchr/testify/require"
-)
-
-var (
- emptyBlob = kzg4844.Blob{}
- emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
- emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
-)
-
-func TestIsDataAvailable(t *testing.T) {
- hr := NewMockDAHeaderReader(params.ParliaTestChainConfig)
- tests := []struct {
- block *types.Block
- chasingHead uint64
- withSidecar bool
- err bool
- }{
- {
- block: types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), nil),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof},
- }),
- }, nil),
- chasingHead: 1,
- withSidecar: true,
- err: false,
- },
- {
- block: types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), nil),
- createMockDATx(hr.Config(), nil),
- }, nil),
- chasingHead: 1,
- withSidecar: true,
- err: false,
- },
- {
- block: types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), nil),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof},
- }),
- }, nil),
- chasingHead: 1,
- withSidecar: false,
- err: true,
- },
- {
- block: types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), nil),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof},
- }),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob, emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof},
- }),
- }, nil),
- chasingHead: 1,
- withSidecar: true,
- err: false,
- },
-
- {
- block: types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), nil),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob, emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit, emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof, emptyBlobProof, emptyBlobProof},
- }),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob, emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit, emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof, emptyBlobProof, emptyBlobProof},
- }),
- }, nil),
- chasingHead: params.MinBlocksForBlobRequests + 1,
- withSidecar: true,
- err: true,
- },
- {
- block: types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(0),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), nil),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof},
- }),
- }, nil),
- chasingHead: params.MinBlocksForBlobRequests + 1,
- withSidecar: false,
- err: false,
- },
- }
-
- for i, item := range tests {
- if item.withSidecar {
- item.block = item.block.WithSidecars(collectBlobsFromTxs(item.block.Header(), item.block.Transactions()))
- }
- hr.setChasingHead(item.chasingHead)
- err := IsDataAvailable(hr, item.block)
- if item.err {
- require.Error(t, err, i)
- t.Log(err)
- continue
- }
- require.NoError(t, err, i)
- }
-}
-
-func TestCheckDataAvailableInBatch(t *testing.T) {
- hr := NewMockDAHeaderReader(params.ParliaTestChainConfig)
- tests := []struct {
- chain types.Blocks
- err bool
- index int
- }{
- {
- chain: types.Blocks{
- types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), nil),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof},
- }),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob, emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof},
- }),
- }, nil),
- types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(2),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob, emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof},
- }),
- }, nil),
- },
- err: false,
- },
- {
- chain: types.Blocks{
- types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof},
- }),
- }, nil),
- types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(2),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof},
- }),
- }, nil),
- types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(3),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof},
- }),
- }, nil),
- },
- err: true,
- index: 1,
- },
- {
- chain: types.Blocks{
- types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), nil),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob, emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit, emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof, emptyBlobProof, emptyBlobProof},
- }),
- createMockDATx(hr.Config(), &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob, emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit, emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof, emptyBlobProof, emptyBlobProof},
- }),
- }, nil),
- },
- err: true,
- index: 0,
- },
- }
-
- for i, item := range tests {
- for j, block := range item.chain {
- item.chain[j] = block.WithSidecars(collectBlobsFromTxs(block.Header(), block.Transactions()))
- }
- index, err := CheckDataAvailableInBatch(hr, item.chain)
- if item.err {
- t.Log(index, err)
- require.Error(t, err, i)
- require.Equal(t, item.index, index, i)
- continue
- }
- require.NoError(t, err, i)
- }
-}
-
-func BenchmarkEmptySidecarDAChecking(b *testing.B) {
- hr := NewMockDAHeaderReader(params.ParliaTestChainConfig)
- block := types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), emptySidecar()),
- createMockDATx(hr.Config(), emptySidecar()),
- createMockDATx(hr.Config(), emptySidecar()),
- createMockDATx(hr.Config(), emptySidecar()),
- createMockDATx(hr.Config(), emptySidecar()),
- createMockDATx(hr.Config(), emptySidecar()),
- }, nil)
- block = block.WithSidecars(collectBlobsFromTxs(block.Header(), block.Transactions()))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- IsDataAvailable(hr, block)
- }
-}
-
-func BenchmarkRandomSidecarDAChecking(b *testing.B) {
- hr := NewMockDAHeaderReader(params.ParliaTestChainConfig)
- const count = 10
- blocks := make([]*types.Block, count)
- for i := 0; i < len(blocks); i++ {
- block := types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(1),
- }).WithBody(types.Transactions{
- createMockDATx(hr.Config(), randomSidecar()),
- createMockDATx(hr.Config(), randomSidecar()),
- createMockDATx(hr.Config(), randomSidecar()),
- createMockDATx(hr.Config(), randomSidecar()),
- createMockDATx(hr.Config(), randomSidecar()),
- createMockDATx(hr.Config(), randomSidecar()),
- }, nil)
- block = block.WithSidecars(collectBlobsFromTxs(block.Header(), block.Transactions()))
- blocks[i] = block
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- IsDataAvailable(hr, blocks[i%count])
- }
-}
-
-func collectBlobsFromTxs(header *types.Header, txs types.Transactions) types.BlobSidecars {
- sidecars := make(types.BlobSidecars, 0, len(txs))
- for i, tx := range txs {
- sidecar := types.NewBlobSidecarFromTx(tx)
- if sidecar == nil {
- continue
- }
- sidecar.TxIndex = uint64(i)
- sidecar.TxHash = tx.Hash()
- sidecar.BlockNumber = header.Number
- sidecar.BlockHash = header.Hash()
- sidecars = append(sidecars, sidecar)
- }
- return sidecars
-}
-
-type mockDAHeaderReader struct {
- config *params.ChainConfig
- chasingHead uint64
-}
-
-func NewMockDAHeaderReader(config *params.ChainConfig) *mockDAHeaderReader {
- return &mockDAHeaderReader{
- config: config,
- chasingHead: 0,
- }
-}
-
-func (r *mockDAHeaderReader) setChasingHead(h uint64) {
- r.chasingHead = h
-}
-
-func (r *mockDAHeaderReader) Config() *params.ChainConfig {
- return r.config
-}
-
-func (r *mockDAHeaderReader) CurrentHeader() *types.Header {
- return &types.Header{
- Number: new(big.Int).SetUint64(r.chasingHead),
- }
-}
-
-func (r *mockDAHeaderReader) ChasingHead() *types.Header {
- return &types.Header{
- Number: new(big.Int).SetUint64(r.chasingHead),
- }
-}
-
-func (r *mockDAHeaderReader) GenesisHeader() *types.Header {
- panic("not supported")
-}
-
-func (r *mockDAHeaderReader) GetHeader(hash common.Hash, number uint64) *types.Header {
- panic("not supported")
-}
-
-func (r *mockDAHeaderReader) GetHeaderByNumber(number uint64) *types.Header {
- panic("not supported")
-}
-
-func (r *mockDAHeaderReader) GetHeaderByHash(hash common.Hash) *types.Header {
- panic("not supported")
-}
-
-func (r *mockDAHeaderReader) GetTd(hash common.Hash, number uint64) *big.Int {
- panic("not supported")
-}
-
-func (r *mockDAHeaderReader) GetHighestVerifiedHeader() *types.Header {
- panic("not supported")
-}
-
-func createMockDATx(config *params.ChainConfig, sidecar *types.BlobTxSidecar) *types.Transaction {
- if sidecar == nil {
- tx := &types.DynamicFeeTx{
- ChainID: config.ChainID,
- Nonce: 0,
- GasTipCap: big.NewInt(22),
- GasFeeCap: big.NewInt(5),
- Gas: 25000,
- To: &common.Address{0x03, 0x04, 0x05},
- Value: big.NewInt(99),
- Data: make([]byte, 50),
- }
- return types.NewTx(tx)
- }
- tx := &types.BlobTx{
- ChainID: uint256.MustFromBig(config.ChainID),
- Nonce: 5,
- GasTipCap: uint256.NewInt(22),
- GasFeeCap: uint256.NewInt(5),
- Gas: 25000,
- To: common.Address{0x03, 0x04, 0x05},
- Value: uint256.NewInt(99),
- Data: make([]byte, 50),
- BlobFeeCap: uint256.NewInt(15),
- BlobHashes: sidecar.BlobHashes(),
- Sidecar: sidecar,
- }
- return types.NewTx(tx)
-}
-
-func randFieldElement() [32]byte {
- bytes := make([]byte, 32)
- _, err := rand.Read(bytes)
- if err != nil {
- panic("failed to get random field element")
- }
- var r fr.Element
- r.SetBytes(bytes)
-
- return gokzg4844.SerializeScalar(r)
-}
-
-func randBlob() kzg4844.Blob {
- var blob kzg4844.Blob
- for i := 0; i < len(blob); i += gokzg4844.SerializedScalarSize {
- fieldElementBytes := randFieldElement()
- copy(blob[i:i+gokzg4844.SerializedScalarSize], fieldElementBytes[:])
- }
- return blob
-}
-
-func randomSidecar() *types.BlobTxSidecar {
- blob := randBlob()
- commitment, _ := kzg4844.BlobToCommitment(blob)
- proof, _ := kzg4844.ComputeBlobProof(blob, commitment)
- return &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{blob},
- Commitments: []kzg4844.Commitment{commitment},
- Proofs: []kzg4844.Proof{proof},
- }
-}
-
-func emptySidecar() *types.BlobTxSidecar {
- return &types.BlobTxSidecar{
- Blobs: []kzg4844.Blob{emptyBlob},
- Commitments: []kzg4844.Commitment{emptyBlobCommit},
- Proofs: []kzg4844.Proof{emptyBlobProof},
- }
-}
diff --git a/core/genesis_test.go b/core/genesis_test.go
deleted file mode 100644
index 4b280bcf13..0000000000
--- a/core/genesis_test.go
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright 2017 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "bytes"
- "encoding/json"
- "math/big"
- "reflect"
- "testing"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/triedb"
- "github.com/ethereum/go-ethereum/triedb/pathdb"
-)
-
-func TestSetupGenesis(t *testing.T) {
- testSetupGenesis(t, rawdb.HashScheme)
- testSetupGenesis(t, rawdb.PathScheme)
-}
-
-func testSetupGenesis(t *testing.T, scheme string) {
- var (
- customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50")
- customg = Genesis{
- Config: ¶ms.ChainConfig{HomesteadBlock: big.NewInt(3)},
- Alloc: types.GenesisAlloc{
- {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
- },
- }
- oldcustomg = customg
- )
- oldcustomg.Config = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(2)}
-
- tests := []struct {
- name string
- fn func(ethdb.Database) (*params.ChainConfig, common.Hash, error)
- wantConfig *params.ChainConfig
- wantHash common.Hash
- wantErr error
- }{
- {
- name: "genesis without ChainConfig",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), new(Genesis))
- },
- wantErr: errGenesisNoConfig,
- wantConfig: params.AllEthashProtocolChanges,
- },
- {
- name: "no block in DB, genesis == nil",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
- },
- wantHash: params.BSCGenesisHash,
- wantConfig: params.BSCChainConfig,
- },
- {
- name: "mainnet block in DB, genesis == nil",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- DefaultGenesisBlock().MustCommit(db, triedb.NewDatabase(db, newDbConfig(scheme)))
- return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
- },
- wantHash: params.MainnetGenesisHash,
- wantConfig: params.MainnetChainConfig,
- },
- {
- name: "custom block in DB, genesis == nil",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- tdb := triedb.NewDatabase(db, newDbConfig(scheme))
- customg.Commit(db, tdb)
- return SetupGenesisBlock(db, tdb, nil)
- },
- wantHash: customghash,
- wantConfig: customg.Config,
- },
- {
- name: "custom block in DB, genesis == chapel",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- tdb := triedb.NewDatabase(db, newDbConfig(scheme))
- customg.Commit(db, tdb)
- return SetupGenesisBlock(db, tdb, DefaultChapelGenesisBlock())
- },
- wantErr: &GenesisMismatchError{Stored: customghash, New: params.ChapelGenesisHash},
- wantHash: params.ChapelGenesisHash,
- wantConfig: params.ChapelChainConfig,
- },
- {
- name: "compatible config in DB",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- tdb := triedb.NewDatabase(db, newDbConfig(scheme))
- oldcustomg.Commit(db, tdb)
- return SetupGenesisBlock(db, tdb, &customg)
- },
- wantHash: customghash,
- wantConfig: customg.Config,
- },
- {
- name: "incompatible config in DB",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- // Commit the 'old' genesis block with Homestead transition at #2.
- // Advance to block #4, past the homestead transition block of customg.
- tdb := triedb.NewDatabase(db, newDbConfig(scheme))
- oldcustomg.Commit(db, tdb)
-
- bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
- defer bc.Stop()
-
- _, blocks, _ := GenerateChainWithGenesis(&oldcustomg, ethash.NewFaker(), 4, nil)
- bc.InsertChain(blocks)
-
- // This should return a compatibility error.
- return SetupGenesisBlock(db, tdb, &customg)
- },
- wantHash: customghash,
- wantConfig: customg.Config,
- wantErr: ¶ms.ConfigCompatError{
- What: "Homestead fork block",
- StoredBlock: big.NewInt(2),
- NewBlock: big.NewInt(3),
- RewindToBlock: 1,
- },
- },
- }
-
- for _, test := range tests {
- db := rawdb.NewMemoryDatabase()
- config, hash, err := test.fn(db)
- // Check the return values.
- if !reflect.DeepEqual(err, test.wantErr) {
- spew := spew.ConfigState{DisablePointerAddresses: true, DisableCapacities: true}
- t.Errorf("%s: returned error %#v, want %#v", test.name, spew.NewFormatter(err), spew.NewFormatter(test.wantErr))
- }
- if !reflect.DeepEqual(config, test.wantConfig) {
- t.Errorf("%s:\nreturned %v\nwant %v", test.name, config, test.wantConfig)
- }
- if hash != test.wantHash {
- t.Errorf("%s: returned hash %s, want %s", test.name, hash.Hex(), test.wantHash.Hex())
- } else if err == nil {
- // Check database content.
- stored := rawdb.ReadBlock(db, test.wantHash, 0)
- if stored.Hash() != test.wantHash {
- t.Errorf("%s: block in DB has hash %s, want %s", test.name, stored.Hash(), test.wantHash)
- }
- }
- }
-}
-
-// TestGenesisHashes checks the congruity of default genesis data to
-// corresponding hardcoded genesis hash values.
-func TestGenesisHashes(t *testing.T) {
- for i, c := range []struct {
- genesis *Genesis
- want common.Hash
- }{
- {DefaultGenesisBlock(), params.MainnetGenesisHash},
- } {
- // Test via MustCommit
- db := rawdb.NewMemoryDatabase()
- if have := c.genesis.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults)).Hash(); have != c.want {
- t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
- }
- // Test via ToBlock
- if have := c.genesis.ToBlock().Hash(); have != c.want {
- t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
- }
- }
-}
-
-func TestGenesis_Commit(t *testing.T) {
- genesis := &Genesis{
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: params.TestChainConfig,
- // difficulty is nil
- }
-
- db := rawdb.NewMemoryDatabase()
- genesisBlock := genesis.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults))
-
- if genesis.Difficulty != nil {
- t.Fatalf("assumption wrong")
- }
-
- // This value should have been set as default in the ToBlock method.
- if genesisBlock.Difficulty().Cmp(params.GenesisDifficulty) != 0 {
- t.Errorf("assumption wrong: want: %d, got: %v", params.GenesisDifficulty, genesisBlock.Difficulty())
- }
-
- // Expect the stored total difficulty to be the difficulty of the genesis block.
- stored := rawdb.ReadTd(db, genesisBlock.Hash(), genesisBlock.NumberU64())
-
- if stored.Cmp(genesisBlock.Difficulty()) != 0 {
- t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
- }
-}
-
-func TestReadWriteGenesisAlloc(t *testing.T) {
- var (
- db = rawdb.NewMemoryDatabase()
- alloc = &types.GenesisAlloc{
- {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
- {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
- }
- hash, _ = hashAlloc(alloc, false)
- )
- blob, _ := json.Marshal(alloc)
- rawdb.WriteGenesisStateSpec(db, hash, blob)
-
- var reload types.GenesisAlloc
- err := reload.UnmarshalJSON(rawdb.ReadGenesisStateSpec(db, hash))
- if err != nil {
- t.Fatalf("Failed to load genesis state %v", err)
- }
- if len(reload) != len(*alloc) {
- t.Fatal("Unexpected genesis allocation")
- }
- for addr, account := range reload {
- want, ok := (*alloc)[addr]
- if !ok {
- t.Fatal("Account is not found")
- }
- if !reflect.DeepEqual(want, account) {
- t.Fatal("Unexpected account")
- }
- }
-}
-
-func TestConfigOrDefault(t *testing.T) {
- defaultGenesis := DefaultGenesisBlock()
- if defaultGenesis.Config.PlanckBlock != nil {
- t.Errorf("initial config should have PlanckBlock = nil, but instead PlanckBlock = %v", defaultGenesis.Config.PlanckBlock)
- }
- gHash := params.BSCGenesisHash
- config := defaultGenesis.configOrDefault(gHash)
-
- if config.ChainID.Cmp(params.BSCChainConfig.ChainID) != 0 {
- t.Errorf("ChainID of resulting config should be %v, but is %v instead", params.BSCChainConfig.ChainID, config.ChainID)
- }
-
- if config.HomesteadBlock.Cmp(params.BSCChainConfig.HomesteadBlock) != 0 {
- t.Errorf("resulting config should have HomesteadBlock = %v, but instead is %v", params.BSCChainConfig, config.HomesteadBlock)
- }
-
- if config.PlanckBlock == nil {
- t.Errorf("resulting config should have PlanckBlock = %v , but instead is nil", params.BSCChainConfig.PlanckBlock)
- }
-
- if config.PlanckBlock.Cmp(params.BSCChainConfig.PlanckBlock) != 0 {
- t.Errorf("resulting config should have PlanckBlock = %v , but instead is %v", params.BSCChainConfig.PlanckBlock, config.PlanckBlock)
- }
-}
-
-func newDbConfig(scheme string) *triedb.Config {
- if scheme == rawdb.HashScheme {
- return triedb.HashDefaults
- }
- return &triedb.Config{PathDB: pathdb.Defaults}
-}
-
-func TestVerkleGenesisCommit(t *testing.T) {
- var verkleTime uint64 = 0
- verkleConfig := ¶ms.ChainConfig{
- ChainID: big.NewInt(1),
- HomesteadBlock: big.NewInt(0),
- DAOForkBlock: nil,
- DAOForkSupport: false,
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(0),
- EIP158Block: big.NewInt(0),
- ByzantiumBlock: big.NewInt(0),
- ConstantinopleBlock: big.NewInt(0),
- PetersburgBlock: big.NewInt(0),
- IstanbulBlock: big.NewInt(0),
- MuirGlacierBlock: big.NewInt(0),
- BerlinBlock: big.NewInt(0),
- LondonBlock: big.NewInt(0),
- ArrowGlacierBlock: big.NewInt(0),
- GrayGlacierBlock: big.NewInt(0),
- MergeNetsplitBlock: nil,
- ShanghaiTime: &verkleTime,
- CancunTime: &verkleTime,
- PragueTime: &verkleTime,
- VerkleTime: &verkleTime,
- TerminalTotalDifficulty: big.NewInt(0),
- TerminalTotalDifficultyPassed: true,
- Ethash: nil,
- Clique: nil,
- }
-
- genesis := &Genesis{
- BaseFee: big.NewInt(params.InitialBaseFee),
- Config: verkleConfig,
- Timestamp: verkleTime,
- Difficulty: big.NewInt(0),
- Alloc: types.GenesisAlloc{
- {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
- },
- }
-
- expected := common.Hex2Bytes("14398d42be3394ff8d50681816a4b7bf8d8283306f577faba2d5bc57498de23b")
- got := genesis.ToBlock().Root().Bytes()
- if !bytes.Equal(got, expected) {
- t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)
- }
-
- db := rawdb.NewMemoryDatabase()
- triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, PathDB: pathdb.Defaults})
- block := genesis.MustCommit(db, triedb)
- if !bytes.Equal(block.Root().Bytes(), expected) {
- t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)
- }
-
- // Test that the trie is verkle
- if !triedb.IsVerkle() {
- t.Fatalf("expected trie to be verkle")
- }
-
- if !rawdb.ExistsAccountTrieNode(db, nil) {
- t.Fatal("could not find node")
- }
-}
diff --git a/core/headerchain.go b/core/headerchain.go
index e6f2c76b96..38d07d8265 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -490,7 +490,6 @@ func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
-
// Short circuit if the header's already in the cache, retrieve otherwise
if hash != (common.Hash{}) {
if header, ok := hc.headerCache.Get(hash); ok {
diff --git a/core/headerchain_test.go b/core/headerchain_test.go
deleted file mode 100644
index 25d9bfffcb..0000000000
--- a/core/headerchain_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "errors"
- "fmt"
- "math/big"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/triedb"
-)
-
-func verifyUnbrokenCanonchain(hc *HeaderChain) error {
- h := hc.CurrentHeader()
- for {
- canonHash := rawdb.ReadCanonicalHash(hc.chainDb, h.Number.Uint64())
- if exp := h.Hash(); canonHash != exp {
- return fmt.Errorf("Canon hash chain broken, block %d got %x, expected %x",
- h.Number, canonHash[:8], exp[:8])
- }
- // Verify that we have the TD
- if td := rawdb.ReadTd(hc.chainDb, canonHash, h.Number.Uint64()); td == nil {
- return fmt.Errorf("Canon TD missing at block %d", h.Number)
- }
- if h.Number.Uint64() == 0 {
- break
- }
- h = hc.GetHeader(h.ParentHash, h.Number.Uint64()-1)
- }
- return nil
-}
-
-func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error, forker *ForkChoice) {
- t.Helper()
-
- status, err := hc.InsertHeaderChain(chain, time.Now(), forker)
- if status != wantStatus {
- t.Errorf("wrong write status from InsertHeaderChain: got %v, want %v", status, wantStatus)
- }
- // Always verify that the header chain is unbroken
- if err := verifyUnbrokenCanonchain(hc); err != nil {
- t.Fatal(err)
- }
- if !errors.Is(err, wantErr) {
- t.Fatalf("unexpected error from InsertHeaderChain: %v", err)
- }
-}
-
-// This test checks status reporting of InsertHeaderChain.
-func TestHeaderInsertion(t *testing.T) {
- var (
- db = rawdb.NewMemoryDatabase()
- gspec = &Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges}
- )
- gspec.Commit(db, triedb.NewDatabase(db, nil))
- hc, err := NewHeaderChain(db, gspec.Config, ethash.NewFaker(), func() bool { return false })
- if err != nil {
- t.Fatal(err)
- }
- // chain A: G->A1->A2...A128
- genDb, chainA := makeHeaderChainWithGenesis(gspec, 128, ethash.NewFaker(), 10)
- // chain B: G->A1->B1...B128
- chainB := makeHeaderChain(gspec.Config, chainA[0], 128, ethash.NewFaker(), genDb, 10)
-
- forker := NewForkChoice(hc, nil)
- // Inserting 64 headers on an empty chain, expecting
- // 1 callbacks, 1 canon-status, 0 sidestatus,
- testInsert(t, hc, chainA[:64], CanonStatTy, nil, forker)
-
- // Inserting 64 identical headers, expecting
- // 0 callbacks, 0 canon-status, 0 sidestatus,
- testInsert(t, hc, chainA[:64], NonStatTy, nil, forker)
-
- // Inserting the same some old, some new headers
- // 1 callbacks, 1 canon, 0 side
- testInsert(t, hc, chainA[32:96], CanonStatTy, nil, forker)
-
- // Inserting side blocks, but not overtaking the canon chain
- testInsert(t, hc, chainB[0:32], SideStatTy, nil, forker)
-
- // Inserting more side blocks, but we don't have the parent
- testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor, forker)
-
- // Inserting more sideblocks, overtaking the canon chain
- testInsert(t, hc, chainB[32:97], CanonStatTy, nil, forker)
-
- // Inserting more A-headers, taking back the canonicality
- testInsert(t, hc, chainA[90:100], CanonStatTy, nil, forker)
-
- // And B becomes canon again
- testInsert(t, hc, chainB[97:107], CanonStatTy, nil, forker)
-
- // And B becomes even longer
- testInsert(t, hc, chainB[107:128], CanonStatTy, nil, forker)
-}
diff --git a/core/state_prefetcher_test.go b/core/state_prefetcher_test.go
deleted file mode 100644
index b1c5974151..0000000000
--- a/core/state_prefetcher_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package core
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "math/big"
- "runtime/pprof"
- "strings"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/triedb"
-
- "github.com/google/pprof/profile"
-)
-
-func TestPrefetchLeaking(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- var (
- gendb = rawdb.NewMemoryDatabase()
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(100000000000000000)
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: GenesisAlloc{address: {Balance: funds}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- triedb = triedb.NewDatabase(gendb, nil)
- genesis = gspec.MustCommit(gendb, triedb)
- signer = types.LatestSigner(gspec.Config)
- )
- blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1, func(i int, block *BlockGen) {
- block.SetCoinbase(common.Address{0x00})
- for j := 0; j < 100; j++ {
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
- if err != nil {
- panic(err)
- }
- block.AddTx(tx)
- }
- })
- archiveDb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(archiveDb, triedb)
- archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer archive.Stop()
-
- block := blocks[0]
- parent := archive.GetHeader(block.ParentHash(), block.NumberU64()-1)
- statedb, _ := state.NewWithSharedPool(parent.Root, archive.stateCache, archive.snaps)
- inter := make(chan struct{})
-
- Track(ctx, t, func(ctx context.Context) {
- close(inter)
- go archive.prefetcher.Prefetch(block, statedb, &archive.vmConfig, inter)
- time.Sleep(1 * time.Second)
- })
-}
-
-func Track(ctx context.Context, t *testing.T, fn func(context.Context)) {
- label := t.Name()
- pprof.Do(ctx, pprof.Labels("test", label), fn)
- if err := CheckNoGoroutines("test", label); err != nil {
- t.Fatal("Leaked goroutines\n", err)
- }
-}
-
-func CheckNoGoroutines(key, value string) error {
- var pb bytes.Buffer
- profiler := pprof.Lookup("goroutine")
- if profiler == nil {
- return errors.New("unable to find profile")
- }
- err := profiler.WriteTo(&pb, 0)
- if err != nil {
- return fmt.Errorf("unable to read profile: %w", err)
- }
-
- p, err := profile.ParseData(pb.Bytes())
- if err != nil {
- return fmt.Errorf("unable to parse profile: %w", err)
- }
-
- return summarizeGoroutines(p, key, value)
-}
-
-func summarizeGoroutines(p *profile.Profile, key, expectedValue string) error {
- var b strings.Builder
-
- for _, sample := range p.Sample {
- if !matchesLabel(sample, key, expectedValue) {
- continue
- }
-
- fmt.Fprintf(&b, "count %d @", sample.Value[0])
- // format the stack trace for each goroutine
- for _, loc := range sample.Location {
- for i, ln := range loc.Line {
- if i == 0 {
- fmt.Fprintf(&b, "# %#8x", loc.Address)
- if loc.IsFolded {
- fmt.Fprint(&b, " [F]")
- }
- } else {
- fmt.Fprint(&b, "# ")
- }
- if fn := ln.Function; fn != nil {
- fmt.Fprintf(&b, " %-50s %s:%d", fn.Name, fn.Filename, ln.Line)
- } else {
- fmt.Fprintf(&b, " ???")
- }
- fmt.Fprintf(&b, "\n")
- }
- }
- fmt.Fprintf(&b, "\n")
- }
-
- if b.Len() == 0 {
- return nil
- }
-
- return errors.New(b.String())
-}
-
-func matchesLabel(sample *profile.Sample, key, expectedValue string) bool {
- values, hasLabel := sample.Label[key]
- if !hasLabel {
- return false
- }
-
- for _, value := range values {
- if value == expectedValue {
- return true
- }
- }
-
- return false
-}
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
deleted file mode 100644
index f87997c7ed..0000000000
--- a/core/state_processor_test.go
+++ /dev/null
@@ -1,431 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "crypto/ecdsa"
- "errors"
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/beacon"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/consensus/misc/eip1559"
- "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/holiman/uint256"
- "golang.org/x/crypto/sha3"
-)
-
-func u64(val uint64) *uint64 { return &val }
-
-// TestStateProcessorErrors tests the output from the 'core' errors
-// as defined in core/error.go. These errors are generated when the
-// blockchain imports bad blocks, meaning blocks which have valid headers but
-// contain invalid transactions
-func TestStateProcessorErrors(t *testing.T) {
- var (
- config = ¶ms.ChainConfig{
- ChainID: big.NewInt(1),
- HomesteadBlock: big.NewInt(0),
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(0),
- EIP158Block: big.NewInt(0),
- ByzantiumBlock: big.NewInt(0),
- ConstantinopleBlock: big.NewInt(0),
- PetersburgBlock: big.NewInt(0),
- IstanbulBlock: big.NewInt(0),
- MuirGlacierBlock: big.NewInt(0),
- MirrorSyncBlock: big.NewInt(0),
- BrunoBlock: big.NewInt(0),
- EulerBlock: big.NewInt(0),
- BerlinBlock: big.NewInt(0),
- LondonBlock: big.NewInt(0),
- GibbsBlock: big.NewInt(0),
- Ethash: new(params.EthashConfig),
- TerminalTotalDifficulty: big.NewInt(0),
- TerminalTotalDifficultyPassed: true,
- ShanghaiTime: new(uint64),
- CancunTime: new(uint64),
- }
- signer = types.LatestSigner(config)
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- key2, _ = crypto.HexToECDSA("0202020202020202020202020202020202020202020202020202002020202020")
- )
- var makeTx = func(key *ecdsa.PrivateKey, nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction {
- tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, key)
- return tx
- }
- var mkDynamicTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction {
- tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{
- Nonce: nonce,
- GasTipCap: gasTipCap,
- GasFeeCap: gasFeeCap,
- Gas: gasLimit,
- To: &to,
- Value: big.NewInt(0),
- }), signer, key1)
- return tx
- }
- var mkDynamicCreationTx = func(nonce uint64, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, data []byte) *types.Transaction {
- tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{
- Nonce: nonce,
- GasTipCap: gasTipCap,
- GasFeeCap: gasFeeCap,
- Gas: gasLimit,
- Value: big.NewInt(0),
- Data: data,
- }), signer, key1)
- return tx
- }
- var mkBlobTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int, hashes []common.Hash) *types.Transaction {
- tx, err := types.SignTx(types.NewTx(&types.BlobTx{
- Nonce: nonce,
- GasTipCap: uint256.MustFromBig(gasTipCap),
- GasFeeCap: uint256.MustFromBig(gasFeeCap),
- Gas: gasLimit,
- To: to,
- BlobHashes: hashes,
- BlobFeeCap: uint256.MustFromBig(blobGasFeeCap),
- Value: new(uint256.Int),
- }), signer, key1)
- if err != nil {
- t.Fatal(err)
- }
- return tx
- }
-
- { // Tests against a 'recent' chain definition
- var (
- db = rawdb.NewMemoryDatabase()
- gspec = &Genesis{
- Config: config,
- Alloc: types.GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
- Balance: big.NewInt(1000000000000000000), // 1 ether
- Nonce: 0,
- },
- common.HexToAddress("0xfd0810DD14796680f72adf1a371963d0745BCc64"): types.Account{
- Balance: big.NewInt(1000000000000000000), // 1 ether
- Nonce: math.MaxUint64,
- },
- },
- }
- blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil)
- tooBigInitCode = [params.MaxInitCodeSize + 1]byte{}
- )
-
- defer blockchain.Stop()
- bigNumber := new(big.Int).SetBytes(common.MaxHash.Bytes())
- tooBigNumber := new(big.Int).Set(bigNumber)
- tooBigNumber.Add(tooBigNumber, common.Big1)
- for i, tt := range []struct {
- txs []*types.Transaction
- want string
- }{
- { // ErrNonceTooLow
- txs: []*types.Transaction{
- makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
- makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
- },
- want: "could not apply tx 1 [0x0026256b3939ed97e2c4a6f3fce8ecf83bdcfa6d507c47838c308a1fb0436f62]: nonce too low: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 0 state: 1",
- },
- { // ErrNonceTooHigh
- txs: []*types.Transaction{
- makeTx(key1, 100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
- },
- want: "could not apply tx 0 [0xdebad714ca7f363bd0d8121c4518ad48fa469ca81b0a081be3d10c17460f751b]: nonce too high: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 100 state: 0",
- },
- { // ErrNonceMax
- txs: []*types.Transaction{
- makeTx(key2, math.MaxUint64, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil),
- },
- want: "could not apply tx 0 [0x84ea18d60eb2bb3b040e3add0eb72f757727122cc257dd858c67cb6591a85986]: nonce has max value: address 0xfd0810DD14796680f72adf1a371963d0745BCc64, nonce: 18446744073709551615",
- },
- { // ErrGasLimitReached
- txs: []*types.Transaction{
- makeTx(key1, 0, common.Address{}, big.NewInt(0), 21000000, big.NewInt(875000000), nil),
- },
- want: "could not apply tx 0 [0xbd49d8dadfd47fb846986695f7d4da3f7b2c48c8da82dbc211a26eb124883de9]: gas limit reached",
- },
- { // ErrInsufficientFundsForTransfer
- txs: []*types.Transaction{
- makeTx(key1, 0, common.Address{}, big.NewInt(1000000000000000000), params.TxGas, big.NewInt(875000000), nil),
- },
- want: "could not apply tx 0 [0x98c796b470f7fcab40aaef5c965a602b0238e1034cce6fb73823042dd0638d74]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 1000018375000000000",
- },
- { // ErrInsufficientFunds
- txs: []*types.Transaction{
- makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil),
- },
- want: "could not apply tx 0 [0x4a69690c4b0cd85e64d0d9ea06302455b01e10a83db964d60281739752003440]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 18900000000000000000000",
- },
- // ErrGasUintOverflow
- // One missing 'core' error is ErrGasUintOverflow: "gas uint64 overflow",
- // In order to trigger that one, we'd have to allocate a _huge_ chunk of data, such that the
- // multiplication len(data) +gas_per_byte overflows uint64. Not testable at the moment
- { // ErrIntrinsicGas
- txs: []*types.Transaction{
- makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(875000000), nil),
- },
- want: "could not apply tx 0 [0xcf3b049a0b516cb4f9274b3e2a264359e2ba53b2fb64b7bda2c634d5c9d01fca]: intrinsic gas too low: have 20000, want 21000",
- },
- { // ErrGasLimitReached
- txs: []*types.Transaction{
- makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas*1000, big.NewInt(875000000), nil),
- },
- want: "could not apply tx 0 [0xbd49d8dadfd47fb846986695f7d4da3f7b2c48c8da82dbc211a26eb124883de9]: gas limit reached",
- },
- { // ErrFeeCapTooLow does not apply because default BaseFee is 0
- txs: []*types.Transaction{
- mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(0), big.NewInt(0)),
- },
- want: "could not apply tx 0 [0xc4ab868fef0c82ae0387b742aee87907f2d0fc528fc6ea0a021459fb0fc4a4a8]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 0, baseFee: 875000000",
- },
- { // ErrTipVeryHigh
- txs: []*types.Transaction{
- mkDynamicTx(0, common.Address{}, params.TxGas, tooBigNumber, big.NewInt(1)),
- },
- want: "could not apply tx 0 [0x15b8391b9981f266b32f3ab7da564bbeb3d6c21628364ea9b32a21139f89f712]: max priority fee per gas higher than 2^256-1: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxPriorityFeePerGas bit length: 257",
- },
- { // ErrFeeCapVeryHigh
- txs: []*types.Transaction{
- mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(1), tooBigNumber),
- },
- want: "could not apply tx 0 [0x48bc299b83fdb345c57478f239e89814bb3063eb4e4b49f3b6057a69255c16bd]: max fee per gas higher than 2^256-1: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas bit length: 257",
- },
- { // ErrTipAboveFeeCap
- txs: []*types.Transaction{
- mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(2), big.NewInt(1)),
- },
- want: "could not apply tx 0 [0xf987a31ff0c71895780a7612f965a0c8b056deb54e020bb44fa478092f14c9b4]: max priority fee per gas higher than max fee per gas: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxPriorityFeePerGas: 2, maxFeePerGas: 1",
- },
- { // ErrInsufficientFunds
- // Available balance: 1000000000000000000
- // Effective cost: 18375000021000
- // FeeCap * gas: 1050000000000000000
- // This test is designed to have the effective cost be covered by the balance, but
- // the extended requirement on FeeCap*gas < balance to fail
- txs: []*types.Transaction{
- mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(50000000000000)),
- },
- want: "could not apply tx 0 [0x413603cd096a87f41b1660d3ed3e27d62e1da78eac138961c0a1314ed43bd129]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 1050000000000000000",
- },
- { // Another ErrInsufficientFunds, this one to ensure that feecap/tip of max u256 is allowed
- txs: []*types.Transaction{
- mkDynamicTx(0, common.Address{}, params.TxGas, bigNumber, bigNumber),
- },
- want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 required balance exceeds 256 bits",
- },
- { // ErrMaxInitCodeSizeExceeded
- txs: []*types.Transaction{
- mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.InitialBaseFee), tooBigInitCode[:]),
- },
- want: "could not apply tx 0 [0xd491405f06c92d118dd3208376fcee18a57c54bc52063ee4a26b1cf296857c25]: max initcode size exceeded: code size 49153 limit 49152",
- },
- { // ErrIntrinsicGas: Not enough gas to cover init code
- txs: []*types.Transaction{
- mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.InitialBaseFee), make([]byte, 320)),
- },
- want: "could not apply tx 0 [0xfd49536a9b323769d8472fcb3ebb3689b707a349379baee3e2ee3fe7baae06a1]: intrinsic gas too low: have 54299, want 54300",
- },
- { // ErrBlobFeeCapTooLow
- txs: []*types.Transaction{
- mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), big.NewInt(0), []common.Hash{(common.Hash{1})}),
- },
- want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1, baseFee: 875000000",
- },
- } {
- block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config)
- _, err := blockchain.InsertChain(types.Blocks{block})
- if err == nil {
- t.Fatal("block imported without errors")
- }
- if have, want := err.Error(), tt.want; have != want {
- t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)
- }
- }
- }
-
- // ErrTxTypeNotSupported, For this, we need an older chain
- {
- var (
- db = rawdb.NewMemoryDatabase()
- gspec = &Genesis{
- Config: ¶ms.ChainConfig{
- ChainID: big.NewInt(1),
- HomesteadBlock: big.NewInt(0),
- EIP150Block: big.NewInt(0),
- EIP155Block: big.NewInt(0),
- EIP158Block: big.NewInt(0),
- ByzantiumBlock: big.NewInt(0),
- ConstantinopleBlock: big.NewInt(0),
- PetersburgBlock: big.NewInt(0),
- IstanbulBlock: big.NewInt(0),
- MuirGlacierBlock: big.NewInt(0),
- },
- Alloc: types.GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
- Balance: big.NewInt(1000000000000000000), // 1 ether
- Nonce: 0,
- },
- },
- }
- blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- )
- defer blockchain.Stop()
- for i, tt := range []struct {
- txs []*types.Transaction
- want error
- }{
- { // ErrTxTypeNotSupported
- txs: []*types.Transaction{
- mkDynamicTx(0, common.Address{}, params.TxGas-1000, big.NewInt(0), big.NewInt(0)),
- },
- want: types.ErrTxTypeNotSupported,
- },
- } {
- block := GenerateBadBlock(gspec.ToBlock(), ethash.NewFaker(), tt.txs, gspec.Config)
- _, err := blockchain.InsertChain(types.Blocks{block})
- if err == nil {
- t.Fatal("block imported without errors")
- }
- if have, want := err, tt.want; !errors.Is(have, want) {
- t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)
- }
- }
- }
-
- // ErrSenderNoEOA, for this we need the sender to have contract code
- {
- var (
- db = rawdb.NewMemoryDatabase()
- gspec = &Genesis{
- Config: config,
- Alloc: types.GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
- Balance: big.NewInt(1000000000000000000), // 1 ether
- Nonce: 0,
- Code: common.FromHex("0xB0B0FACE"),
- },
- },
- }
- blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil)
- )
- defer blockchain.Stop()
- for i, tt := range []struct {
- txs []*types.Transaction
- want string
- }{
- { // ErrSenderNoEOA
- txs: []*types.Transaction{
- mkDynamicTx(0, common.Address{}, params.TxGas-1000, big.NewInt(0), big.NewInt(0)),
- },
- want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: sender not an eoa: address 0x71562b71999873DB5b286dF957af199Ec94617F7, codehash: 0x9280914443471259d4570a8661015ae4a5b80186dbc619658fb494bebc3da3d1",
- },
- } {
- block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config)
- _, err := blockchain.InsertChain(types.Blocks{block})
- if err == nil {
- t.Fatal("block imported without errors")
- }
- if have, want := err.Error(), tt.want; have != want {
- t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)
- }
- }
- }
-}
-
-// GenerateBadBlock constructs a "block" which contains the transactions. The transactions are not expected to be
-// valid, and no proper post-state can be made. But from the perspective of the blockchain, the block is sufficiently
-// valid to be considered for import:
-// - valid pow (fake), ancestry, difficulty, gaslimit etc
-func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Transactions, config *params.ChainConfig) *types.Block {
- difficulty := big.NewInt(0)
- if !config.TerminalTotalDifficultyPassed {
- fakeChainReader := newChainMaker(nil, config, engine)
- difficulty = engine.CalcDifficulty(fakeChainReader, parent.Time()+10, &types.Header{
- Number: parent.Number(),
- Time: parent.Time(),
- Difficulty: parent.Difficulty(),
- UncleHash: parent.UncleHash(),
- })
- }
-
- header := &types.Header{
- ParentHash: parent.Hash(),
- Coinbase: parent.Coinbase(),
- Difficulty: difficulty,
- GasLimit: parent.GasLimit(),
- Number: new(big.Int).Add(parent.Number(), common.Big1),
- Time: parent.Time() + 10,
- UncleHash: types.EmptyUncleHash,
- }
- if config.IsLondon(header.Number) {
- header.BaseFee = eip1559.CalcBaseFee(config, parent.Header())
- }
- if config.IsShanghai(header.Number, header.Time) {
- header.WithdrawalsHash = &types.EmptyWithdrawalsHash
- }
- var receipts []*types.Receipt
- // The post-state result doesn't need to be correct (this is a bad block), but we do need something there
- // Preferably something unique. So let's use a combo of blocknum + txhash
- hasher := sha3.NewLegacyKeccak256()
- hasher.Write(header.Number.Bytes())
- var cumulativeGas uint64
- var nBlobs int
- for _, tx := range txs {
- txh := tx.Hash()
- hasher.Write(txh[:])
- receipt := types.NewReceipt(nil, false, cumulativeGas+tx.Gas())
- receipt.TxHash = tx.Hash()
- receipt.GasUsed = tx.Gas()
- receipts = append(receipts, receipt)
- cumulativeGas += tx.Gas()
- nBlobs += len(tx.BlobHashes())
- }
- header.Root = common.BytesToHash(hasher.Sum(nil))
- if config.IsCancun(header.Number, header.Time) {
- var pExcess, pUsed = uint64(0), uint64(0)
- if parent.ExcessBlobGas() != nil {
- pExcess = *parent.ExcessBlobGas()
- pUsed = *parent.BlobGasUsed()
- }
- excess := eip4844.CalcExcessBlobGas(pExcess, pUsed)
- used := uint64(nBlobs * params.BlobTxBlobGasPerBlob)
- header.ExcessBlobGas = &excess
- header.BlobGasUsed = &used
-
- beaconRoot := common.HexToHash("0xbeac00")
- if config.Parlia == nil {
- header.ParentBeaconRoot = &beaconRoot
- }
- }
- // Assemble and return the final block for sealing
- if config.IsShanghai(header.Number, header.Time) {
- return types.NewBlockWithWithdrawals(header, txs, nil, receipts, []*types.Withdrawal{}, trie.NewStackTrie(nil))
- }
- return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
-}
diff --git a/core/txindexer_test.go b/core/txindexer_test.go
deleted file mode 100644
index d078046355..0000000000
--- a/core/txindexer_test.go
+++ /dev/null
@@ -1,243 +0,0 @@
-// Copyright 2024 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see
-
-package core
-
-import (
- "math/big"
- "os"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/ethash"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/params"
-)
-
-// TestTxIndexer tests the functionalities for managing transaction indexes.
-func TestTxIndexer(t *testing.T) {
- var (
- testBankKey, _ = crypto.GenerateKey()
- testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
- testBankFunds = big.NewInt(1000000000000000000)
-
- gspec = &Genesis{
- Config: params.TestChainConfig,
- Alloc: types.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- engine = ethash.NewFaker()
- nonce = uint64(0)
- chainHead = uint64(128)
- )
- _, blocks, receipts := GenerateChainWithGenesis(gspec, engine, int(chainHead), func(i int, gen *BlockGen) {
- tx, _ := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("0xdeadbeef"), big.NewInt(1000), params.TxGas, big.NewInt(10*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey)
- gen.AddTx(tx)
- nonce += 1
- })
-
- // verifyIndexes checks if the transaction indexes are present or not
- // of the specified block.
- verifyIndexes := func(db ethdb.Database, number uint64, exist bool) {
- if number == 0 {
- return
- }
- block := blocks[number-1]
- for _, tx := range block.Transactions() {
- lookup := rawdb.ReadTxLookupEntry(db, tx.Hash())
- if exist && lookup == nil {
- t.Fatalf("missing %d %x", number, tx.Hash().Hex())
- }
- if !exist && lookup != nil {
- t.Fatalf("unexpected %d %x", number, tx.Hash().Hex())
- }
- }
- }
- verify := func(db ethdb.Database, expTail uint64, indexer *txIndexer) {
- tail := rawdb.ReadTxIndexTail(db)
- if tail == nil {
- t.Fatal("Failed to write tx index tail")
- }
- if *tail != expTail {
- t.Fatalf("Unexpected tx index tail, want %v, got %d", expTail, *tail)
- }
- if *tail != 0 {
- for number := uint64(0); number < *tail; number += 1 {
- verifyIndexes(db, number, false)
- }
- }
- for number := *tail; number <= chainHead; number += 1 {
- verifyIndexes(db, number, true)
- }
- progress := indexer.report(chainHead, tail)
- if !progress.Done() {
- t.Fatalf("Expect fully indexed")
- }
- }
-
- var cases = []struct {
- limitA uint64
- tailA uint64
- limitB uint64
- tailB uint64
- limitC uint64
- tailC uint64
- }{
- {
- // LimitA: 0
- // TailA: 0
- //
- // all blocks are indexed
- limitA: 0,
- tailA: 0,
-
- // LimitB: 1
- // TailB: 128
- //
- // block-128 is indexed
- limitB: 1,
- tailB: 128,
-
- // LimitB: 64
- // TailB: 65
- //
- // block [65, 128] are indexed
- limitC: 64,
- tailC: 65,
- },
- {
- // LimitA: 64
- // TailA: 65
- //
- // block [65, 128] are indexed
- limitA: 64,
- tailA: 65,
-
- // LimitB: 1
- // TailB: 128
- //
- // block-128 is indexed
- limitB: 1,
- tailB: 128,
-
- // LimitB: 64
- // TailB: 65
- //
- // block [65, 128] are indexed
- limitC: 64,
- tailC: 65,
- },
- {
- // LimitA: 127
- // TailA: 2
- //
- // block [2, 128] are indexed
- limitA: 127,
- tailA: 2,
-
- // LimitB: 1
- // TailB: 128
- //
- // block-128 is indexed
- limitB: 1,
- tailB: 128,
-
- // LimitB: 64
- // TailB: 65
- //
- // block [65, 128] are indexed
- limitC: 64,
- tailC: 65,
- },
- {
- // LimitA: 128
- // TailA: 1
- //
- // block [2, 128] are indexed
- limitA: 128,
- tailA: 1,
-
- // LimitB: 1
- // TailB: 128
- //
- // block-128 is indexed
- limitB: 1,
- tailB: 128,
-
- // LimitB: 64
- // TailB: 65
- //
- // block [65, 128] are indexed
- limitC: 64,
- tailC: 65,
- },
- {
- // LimitA: 129
- // TailA: 0
- //
- // block [0, 128] are indexed
- limitA: 129,
- tailA: 0,
-
- // LimitB: 1
- // TailB: 128
- //
- // block-128 is indexed
- limitB: 1,
- tailB: 128,
-
- // LimitB: 64
- // TailB: 65
- //
- // block [65, 128] are indexed
- limitC: 64,
- tailC: 65,
- },
- }
- for _, c := range cases {
- frdir := t.TempDir()
- db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false)
- rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
-
- // Index the initial blocks from ancient store
- indexer := &txIndexer{
- limit: c.limitA,
- db: db,
- progress: make(chan chan TxIndexProgress),
- }
- indexer.run(nil, 128, make(chan struct{}), make(chan struct{}))
- verify(db, c.tailA, indexer)
-
- indexer.limit = c.limitB
- indexer.run(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}), make(chan struct{}))
- verify(db, c.tailB, indexer)
-
- indexer.limit = c.limitC
- indexer.run(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}), make(chan struct{}))
- verify(db, c.tailC, indexer)
-
- // Recover all indexes
- indexer.limit = 0
- indexer.run(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}), make(chan struct{}))
- verify(db, 0, indexer)
-
- db.Close()
- os.RemoveAll(frdir)
- }
-}
diff --git a/eth/sync.go b/eth/sync.go
index 96126618df..3b04d09920 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -96,7 +96,45 @@ func (cs *chainSyncer) handlePeerEvent() bool {
// loop runs in its own goroutine and launches the sync when necessary.
func (cs *chainSyncer) loop() {
- return
+ defer cs.handler.wg.Done()
+
+ cs.handler.blockFetcher.Start()
+ cs.handler.txFetcher.Start()
+ defer cs.handler.blockFetcher.Stop()
+ defer cs.handler.txFetcher.Stop()
+ defer cs.handler.downloader.Terminate()
+
+ // The force timer lowers the peer count threshold down to one when it fires.
+ // This ensures we'll always start sync even if there aren't enough peers.
+ cs.force = time.NewTimer(forceSyncCycle)
+ defer cs.force.Stop()
+
+ for {
+ if op := cs.nextSyncOp(); op != nil {
+ cs.startSync(op)
+ }
+ select {
+ case <-cs.peerEventCh:
+ // Peer information changed, recheck.
+ case <-cs.doneCh:
+ cs.doneCh = nil
+ cs.force.Reset(forceSyncCycle)
+ cs.forced = false
+ case <-cs.force.C:
+ cs.forced = true
+
+ case <-cs.handler.quitSync:
+ // Disable all insertion on the blockchain. This needs to happen before
+ // terminating the downloader because the downloader waits for blockchain
+ // inserts, and these can take a long time to finish.
+ cs.handler.chain.StopInsert()
+ cs.handler.downloader.Terminate()
+ if cs.doneCh != nil {
+ <-cs.doneCh
+ }
+ return
+ }
+ }
}
// nextSyncOp determines whether sync is required at this time.
@@ -195,10 +233,6 @@ func (cs *chainSyncer) startSync(op *chainSyncOp) {
// doSync synchronizes the local blockchain with a remote peer.
func (h *handler) doSync(op *chainSyncOp) error {
// Run the sync cycle, and disable snap sync if we're past the pivot block
- if op.mode == downloader.NoSync {
- return nil
- }
-
err := h.downloader.LegacySync(op.peer.ID(), op.head, op.td, h.chain.Config().TerminalTotalDifficulty, op.mode)
if err != nil {
return err