From 938734be3ce1e254a0a72e56f8bdfb41b58e8953 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 24 Apr 2024 11:05:10 +0300 Subject: [PATCH 01/64] params: begin 1.14.1 release cycle --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index b3978be046..319f21b2a8 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 14 // Minor version component of the current release - VersionPatch = 0 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 14 // Minor version component of the current release + VersionPatch = 1 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From 0d4c38865e9cda492e71221c4c429d9b1bec8ac5 Mon Sep 17 00:00:00 2001 From: Martin HS Date: Wed, 24 Apr 2024 11:59:06 +0200 Subject: [PATCH 02/64] core/state: remove account reset operation v2 (#29520) * core/state, tests: remove account reset operation * core/state, core/vm: implement createcontract journal event * core/state: make createcontract not emit dirtied account, unskip tests * core/state: add createcontract to journal fuzzing * core/state: fix journal * core/state: address comments * core/state: remove useless code --------- Co-authored-by: Gary Rong --- core/state/journal.go | 151 ++++++++++++++---- core/state/state_object.go | 54 +++---- core/state/state_test.go | 102 +----------- core/state/statedb.go | 274 ++++++++++++++++---------------- core/state/statedb_fuzz_test.go | 4 +- core/state/statedb_test.go | 173 ++++++++++++++------ core/vm/evm.go | 21 ++- core/vm/interface.go | 1 + tests/block_test.go | 8 - 9 files changed, 428 insertions(+), 360 deletions(-) diff --git a/core/state/journal.go b/core/state/journal.go index 6cdc1fc868..cfd3782eb0 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -17,6 +17,8 @@ package state import ( + "maps" + "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) @@ -29,6 +31,9 @@ type journalEntry interface { // dirtied returns the Ethereum address modified by this journal entry. dirtied() *common.Address + + // copy returns a deep-copied journal entry. + copy() journalEntry } // journal contains the list of state modifications applied since the last state @@ -83,22 +88,31 @@ func (j *journal) length() int { return len(j.entries) } +// copy returns a deep-copied journal. +func (j *journal) copy() *journal { + entries := make([]journalEntry, 0, j.length()) + for i := 0; i < j.length(); i++ { + entries = append(entries, j.entries[i].copy()) + } + return &journal{ + entries: entries, + dirties: maps.Clone(j.dirties), + } +} + type ( // Changes to the account trie. createObjectChange struct { account *common.Address } - resetObjectChange struct { - account *common.Address - prev *stateObject - prevdestruct bool - prevAccount []byte - prevStorage map[common.Hash][]byte - prevAccountOriginExist bool - prevAccountOrigin []byte - prevStorageOrigin map[common.Hash][]byte + // createContractChange represents an account becoming a contract-account. + // This event happens prior to executing initcode. The journal-event simply + // manages the created-flag, in order to allow same-tx destruction. + createContractChange struct { + account common.Address } + selfDestructChange struct { account *common.Address prev bool // whether account had already self-destructed @@ -136,6 +150,7 @@ type ( touchChange struct { account *common.Address } + // Changes to the access list accessListAddAccountChange struct { address *common.Address @@ -145,6 +160,7 @@ type ( slot *common.Hash } + // Changes to transient storage transientStorageChange struct { account *common.Address key, prevalue common.Hash @@ -153,34 +169,30 @@ type ( func (ch createObjectChange) revert(s *StateDB) { delete(s.stateObjects, *ch.account) - delete(s.stateObjectsDirty, *ch.account) } func (ch createObjectChange) dirtied() *common.Address { return ch.account } -func (ch resetObjectChange) revert(s *StateDB) { - s.setStateObject(ch.prev) - if !ch.prevdestruct { - delete(s.stateObjectsDestruct, ch.prev.address) - } - if ch.prevAccount != nil { - s.accounts[ch.prev.addrHash] = ch.prevAccount - } - if ch.prevStorage != nil { - s.storages[ch.prev.addrHash] = ch.prevStorage - } - if ch.prevAccountOriginExist { - s.accountsOrigin[ch.prev.address] = ch.prevAccountOrigin - } - if ch.prevStorageOrigin != nil { - s.storagesOrigin[ch.prev.address] = ch.prevStorageOrigin +func (ch createObjectChange) copy() journalEntry { + return createObjectChange{ + account: ch.account, } } -func (ch resetObjectChange) dirtied() *common.Address { - return ch.account +func (ch createContractChange) revert(s *StateDB) { + s.getStateObject(ch.account).newContract = false +} + +func (ch createContractChange) dirtied() *common.Address { + return nil +} + +func (ch createContractChange) copy() journalEntry { + return createContractChange{ + account: ch.account, + } } func (ch selfDestructChange) revert(s *StateDB) { @@ -195,6 +207,14 @@ func (ch selfDestructChange) dirtied() *common.Address { return ch.account } +func (ch selfDestructChange) copy() journalEntry { + return selfDestructChange{ + account: ch.account, + prev: ch.prev, + prevbalance: new(uint256.Int).Set(ch.prevbalance), + } +} + var ripemd = common.HexToAddress("0000000000000000000000000000000000000003") func (ch touchChange) revert(s *StateDB) { @@ -204,6 +224,12 @@ func (ch touchChange) dirtied() *common.Address { return ch.account } +func (ch touchChange) copy() journalEntry { + return touchChange{ + account: ch.account, + } +} + func (ch balanceChange) revert(s *StateDB) { s.getStateObject(*ch.account).setBalance(ch.prev) } @@ -212,6 +238,13 @@ func (ch balanceChange) dirtied() *common.Address { return ch.account } +func (ch balanceChange) copy() journalEntry { + return balanceChange{ + account: ch.account, + prev: new(uint256.Int).Set(ch.prev), + } +} + func (ch nonceChange) revert(s *StateDB) { s.getStateObject(*ch.account).setNonce(ch.prev) } @@ -220,6 +253,13 @@ func (ch nonceChange) dirtied() *common.Address { return ch.account } +func (ch nonceChange) copy() journalEntry { + return nonceChange{ + account: ch.account, + prev: ch.prev, + } +} + func (ch codeChange) revert(s *StateDB) { s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) } @@ -228,6 +268,14 @@ func (ch codeChange) dirtied() *common.Address { return ch.account } +func (ch codeChange) copy() journalEntry { + return codeChange{ + account: ch.account, + prevhash: common.CopyBytes(ch.prevhash), + prevcode: common.CopyBytes(ch.prevcode), + } +} + func (ch storageChange) revert(s *StateDB) { s.getStateObject(*ch.account).setState(ch.key, ch.prevalue) } @@ -236,6 +284,14 @@ func (ch storageChange) dirtied() *common.Address { return ch.account } +func (ch storageChange) copy() journalEntry { + return storageChange{ + account: ch.account, + key: ch.key, + prevalue: ch.prevalue, + } +} + func (ch transientStorageChange) revert(s *StateDB) { s.setTransientState(*ch.account, ch.key, ch.prevalue) } @@ -244,6 +300,14 @@ func (ch transientStorageChange) dirtied() *common.Address { return nil } +func (ch transientStorageChange) copy() journalEntry { + return transientStorageChange{ + account: ch.account, + key: ch.key, + prevalue: ch.prevalue, + } +} + func (ch refundChange) revert(s *StateDB) { s.refund = ch.prev } @@ -252,6 +316,12 @@ func (ch refundChange) dirtied() *common.Address { return nil } +func (ch refundChange) copy() journalEntry { + return refundChange{ + prev: ch.prev, + } +} + func (ch addLogChange) revert(s *StateDB) { logs := s.logs[ch.txhash] if len(logs) == 1 { @@ -266,6 +336,12 @@ func (ch addLogChange) dirtied() *common.Address { return nil } +func (ch addLogChange) copy() journalEntry { + return addLogChange{ + txhash: ch.txhash, + } +} + func (ch addPreimageChange) revert(s *StateDB) { delete(s.preimages, ch.hash) } @@ -274,6 +350,12 @@ func (ch addPreimageChange) dirtied() *common.Address { return nil } +func (ch addPreimageChange) copy() journalEntry { + return addPreimageChange{ + hash: ch.hash, + } +} + func (ch accessListAddAccountChange) revert(s *StateDB) { /* One important invariant here, is that whenever a (addr, slot) is added, if the @@ -291,6 +373,12 @@ func (ch accessListAddAccountChange) dirtied() *common.Address { return nil } +func (ch accessListAddAccountChange) copy() journalEntry { + return accessListAddAccountChange{ + address: ch.address, + } +} + func (ch accessListAddSlotChange) revert(s *StateDB) { s.accessList.DeleteSlot(*ch.address, *ch.slot) } @@ -298,3 +386,10 @@ func (ch accessListAddSlotChange) revert(s *StateDB) { func (ch accessListAddSlotChange) dirtied() *common.Address { return nil } + +func (ch accessListAddSlotChange) copy() journalEntry { + return accessListAddSlotChange{ + address: ch.address, + slot: ch.slot, + } +} diff --git a/core/state/state_object.go b/core/state/state_object.go index db3c32f2f2..8978b30542 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -32,21 +32,8 @@ import ( "github.com/holiman/uint256" ) -type Code []byte - -func (c Code) String() string { - return string(c) //strings.Join(Disassemble(c), " ") -} - type Storage map[common.Hash]common.Hash -func (s Storage) String() (str string) { - for key, value := range s { - str += fmt.Sprintf("%X : %X\n", key, value) - } - return -} - func (s Storage) Copy() Storage { return maps.Clone(s) } @@ -65,8 +52,8 @@ type stateObject struct { data types.StateAccount // Account data with all mutations applied in the scope of block // Write caches. - trie Trie // storage trie, which becomes non-nil on first access - code Code // contract bytecode, which gets set when code is loaded + trie Trie // storage trie, which becomes non-nil on first access + code []byte // contract bytecode, which gets set when code is loaded originStorage Storage // Storage cache of original entries to dedup rewrites pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block @@ -75,17 +62,16 @@ type stateObject struct { // Cache flags. dirtyCode bool // true if the code was updated - // Flag whether the account was marked as self-destructed. The self-destructed account - // is still accessible in the scope of same transaction. + // Flag whether the account was marked as self-destructed. The self-destructed + // account is still accessible in the scope of same transaction. selfDestructed bool - // Flag whether the account was marked as deleted. A self-destructed account - // or an account that is considered as empty will be marked as deleted at - // the end of transaction and no longer accessible anymore. - deleted bool - - // Flag whether the object was created in the current transaction - created bool + // This is an EIP-6780 flag indicating whether the object is eligible for + // self-destruct according to EIP-6780. The flag could be set either when + // the contract is just created within the current transaction, or when the + // object was previously existent and is being deployed as a contract within + // the current transaction. + newContract bool } // empty returns whether the account is considered empty. @@ -95,10 +81,7 @@ func (s *stateObject) empty() bool { // newObject creates a state object. func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject { - var ( - origin = acct - created = acct == nil // true if the account was not existent - ) + origin := acct if acct == nil { acct = types.NewEmptyStateAccount() } @@ -111,7 +94,6 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s originStorage: make(Storage), pendingStorage: make(Storage), dirtyStorage: make(Storage), - created: created, } } @@ -264,6 +246,10 @@ func (s *stateObject) finalise(prefetch bool) { if len(s.dirtyStorage) > 0 { s.dirtyStorage = make(Storage) } + // Revoke the flag at the end of the transaction. It finalizes the status + // of the newly-created object as it's no longer eligible for self-destruct + // by EIP-6780. For non-newly-created objects, it's a no-op. + s.newContract = false } // updateTrie is responsible for persisting cached storage changes into the @@ -463,12 +449,12 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject { obj.trie = db.db.CopyTrie(s.trie) } obj.code = s.code - obj.dirtyStorage = s.dirtyStorage.Copy() obj.originStorage = s.originStorage.Copy() obj.pendingStorage = s.pendingStorage.Copy() - obj.selfDestructed = s.selfDestructed + obj.dirtyStorage = s.dirtyStorage.Copy() obj.dirtyCode = s.dirtyCode - obj.deleted = s.deleted + obj.selfDestructed = s.selfDestructed + obj.newContract = s.newContract return obj } @@ -483,7 +469,7 @@ func (s *stateObject) Address() common.Address { // Code returns the contract code associated with this object, if any. func (s *stateObject) Code() []byte { - if s.code != nil { + if len(s.code) != 0 { return s.code } if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { @@ -501,7 +487,7 @@ func (s *stateObject) Code() []byte { // or zero if none. This method is an almost mirror of Code, but uses a cache // inside the database to avoid loading codes seen recently. func (s *stateObject) CodeSize() int { - if s.code != nil { + if len(s.code) != 0 { return len(s.code) } if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { diff --git a/core/state/state_test.go b/core/state/state_test.go index c6e6db906e..9200e4abe9 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -194,106 +194,20 @@ func TestSnapshotEmpty(t *testing.T) { s.state.RevertToSnapshot(s.state.Snapshot()) } -func TestSnapshot2(t *testing.T) { +func TestCreateObjectRevert(t *testing.T) { state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + addr := common.BytesToAddress([]byte("so0")) + snap := state.Snapshot() - stateobjaddr0 := common.BytesToAddress([]byte("so0")) - stateobjaddr1 := common.BytesToAddress([]byte("so1")) - var storageaddr common.Hash - - data0 := common.BytesToHash([]byte{17}) - data1 := common.BytesToHash([]byte{18}) - - state.SetState(stateobjaddr0, storageaddr, data0) - state.SetState(stateobjaddr1, storageaddr, data1) - - // db, trie are already non-empty values - so0 := state.getStateObject(stateobjaddr0) + state.CreateAccount(addr) + so0 := state.getStateObject(addr) so0.SetBalance(uint256.NewInt(42), tracing.BalanceChangeUnspecified) so0.SetNonce(43) so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'}) - so0.selfDestructed = false - so0.deleted = false state.setStateObject(so0) - root, _ := state.Commit(0, false) - state, _ = New(root, state.db, state.snaps) - - // and one with deleted == true - so1 := state.getStateObject(stateobjaddr1) - so1.SetBalance(uint256.NewInt(52), tracing.BalanceChangeUnspecified) - so1.SetNonce(53) - so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'}) - so1.selfDestructed = true - so1.deleted = true - state.setStateObject(so1) - - so1 = state.getStateObject(stateobjaddr1) - if so1 != nil { - t.Fatalf("deleted object not nil when getting") - } - - snapshot := state.Snapshot() - state.RevertToSnapshot(snapshot) - - so0Restored := state.getStateObject(stateobjaddr0) - // Update lazily-loaded values before comparing. - so0Restored.GetState(storageaddr) - so0Restored.Code() - // non-deleted is equal (restored) - compareStateObjects(so0Restored, so0, t) - - // deleted should be nil, both before and after restore of state copy - so1Restored := state.getStateObject(stateobjaddr1) - if so1Restored != nil { - t.Fatalf("deleted object not nil after restoring snapshot: %+v", so1Restored) - } -} - -func compareStateObjects(so0, so1 *stateObject, t *testing.T) { - if so0.Address() != so1.Address() { - t.Fatalf("Address mismatch: have %v, want %v", so0.address, so1.address) - } - if so0.Balance().Cmp(so1.Balance()) != 0 { - t.Fatalf("Balance mismatch: have %v, want %v", so0.Balance(), so1.Balance()) - } - if so0.Nonce() != so1.Nonce() { - t.Fatalf("Nonce mismatch: have %v, want %v", so0.Nonce(), so1.Nonce()) - } - if so0.data.Root != so1.data.Root { - t.Errorf("Root mismatch: have %x, want %x", so0.data.Root[:], so1.data.Root[:]) - } - if !bytes.Equal(so0.CodeHash(), so1.CodeHash()) { - t.Fatalf("CodeHash mismatch: have %v, want %v", so0.CodeHash(), so1.CodeHash()) - } - if !bytes.Equal(so0.code, so1.code) { - t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code) - } - - if len(so1.dirtyStorage) != len(so0.dirtyStorage) { - t.Errorf("Dirty storage size mismatch: have %d, want %d", len(so1.dirtyStorage), len(so0.dirtyStorage)) - } - for k, v := range so1.dirtyStorage { - if so0.dirtyStorage[k] != v { - t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, so0.dirtyStorage[k], v) - } - } - for k, v := range so0.dirtyStorage { - if so1.dirtyStorage[k] != v { - t.Errorf("Dirty storage key %x mismatch: have %v, want none.", k, v) - } - } - if len(so1.originStorage) != len(so0.originStorage) { - t.Errorf("Origin storage size mismatch: have %d, want %d", len(so1.originStorage), len(so0.originStorage)) - } - for k, v := range so1.originStorage { - if so0.originStorage[k] != v { - t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, so0.originStorage[k], v) - } - } - for k, v := range so0.originStorage { - if so1.originStorage[k] != v { - t.Errorf("Origin storage key %x mismatch: have %v, want none.", k, v) - } + state.RevertToSnapshot(snap) + if state.Exist(addr) { + t.Error("Unexpected account after revert") } } diff --git a/core/state/statedb.go b/core/state/statedb.go index ab152dd18d..4a934fe82c 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -44,6 +44,26 @@ type revision struct { journalIndex int } +type mutationType int + +const ( + update mutationType = iota + deletion +) + +type mutation struct { + typ mutationType + applied bool +} + +func (m *mutation) copy() *mutation { + return &mutation{typ: m.typ, applied: m.applied} +} + +func (m *mutation) isDelete() bool { + return m.typ == deletion +} + // StateDB structs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: @@ -75,12 +95,22 @@ type StateDB struct { accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format - // This map holds 'live' objects, which will get modified while processing - // a state transition. - stateObjects map[common.Address]*stateObject - stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie - stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution - stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value + // This map holds 'live' objects, which will get modified while + // processing a state transition. + stateObjects map[common.Address]*stateObject + + // This map holds 'deleted' objects. An object with the same address + // might also occur in the 'stateObjects' map due to account + // resurrection. The account value is tracked as the original value + // before the transition. This map is populated at the transaction + // boundaries. + stateObjectsDestruct map[common.Address]*types.StateAccount + + // This map tracks the account mutations that occurred during the + // transition. Uncommitted mutations belonging to the same account + // can be merged into a single one which is equivalent from database's + // perspective. This map is populated at the transaction boundaries. + mutations map[common.Address]*mutation // DB error. // State objects are used by the consensus core and VM which are @@ -154,9 +184,8 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) accountsOrigin: make(map[common.Address][]byte), storagesOrigin: make(map[common.Address]map[common.Hash][]byte), stateObjects: make(map[common.Address]*stateObject), - stateObjectsPending: make(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), stateObjectsDestruct: make(map[common.Address]*types.StateAccount), + mutations: make(map[common.Address]*mutation), logs: make(map[common.Hash][]*types.Log), preimages: make(map[common.Hash][]byte), journal: newJournal(), @@ -472,8 +501,7 @@ func (s *StateDB) Selfdestruct6780(addr common.Address) { if stateObject == nil { return } - - if stateObject.created { + if stateObject.newContract { s.SelfDestruct(addr) } } @@ -552,24 +580,16 @@ func (s *StateDB) deleteStateObject(addr common.Address) { } // getStateObject retrieves a state object given by the address, returning nil if -// the object is not found or was deleted in this execution context. If you need -// to differentiate between non-existent/just-deleted, use getDeletedStateObject. +// the object is not found or was deleted in this execution context. func (s *StateDB) getStateObject(addr common.Address) *stateObject { - if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { - return obj - } - return nil -} - -// getDeletedStateObject is similar to getStateObject, but instead of returning -// nil for a deleted state object, it returns the actual object with the deleted -// flag set. This is needed by the state journal to revert to the correct s- -// destructed object instead of wiping all knowledge about the state object. -func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { // Prefer live objects if any is available if obj := s.stateObjects[addr]; obj != nil { return obj } + // Short circuit if the account is already destructed in this block. + if _, ok := s.stateObjectsDestruct[addr]; ok { + return nil + } // If no live objects are available, attempt to use snapshots var data *types.StateAccount if s.snap != nil { @@ -622,69 +642,40 @@ func (s *StateDB) setStateObject(object *stateObject) { // getOrNewStateObject retrieves a state object or create a new state object if nil. func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject { - stateObject := s.getStateObject(addr) - if stateObject == nil { - stateObject, _ = s.createObject(addr) - } - return stateObject -} - -// createObject creates a new state object. If there is an existing account with -// the given address, it is overwritten and returned as the second return value. -func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) { - prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - newobj = newObject(s, addr, nil) - if prev == nil { - s.journal.append(createObjectChange{account: &addr}) - } else { - // The original account should be marked as destructed and all cached - // account and storage data should be cleared as well. Note, it must - // be done here, otherwise the destruction event of "original account" - // will be lost. - _, prevdestruct := s.stateObjectsDestruct[prev.address] - if !prevdestruct { - s.stateObjectsDestruct[prev.address] = prev.origin - } - // There may be some cached account/storage data already since IntermediateRoot - // will be called for each transaction before byzantium fork which will always - // cache the latest account/storage data. - prevAccount, ok := s.accountsOrigin[prev.address] - s.journal.append(resetObjectChange{ - account: &addr, - prev: prev, - prevdestruct: prevdestruct, - prevAccount: s.accounts[prev.addrHash], - prevStorage: s.storages[prev.addrHash], - prevAccountOriginExist: ok, - prevAccountOrigin: prevAccount, - prevStorageOrigin: s.storagesOrigin[prev.address], - }) - delete(s.accounts, prev.addrHash) - delete(s.storages, prev.addrHash) - delete(s.accountsOrigin, prev.address) - delete(s.storagesOrigin, prev.address) + obj := s.getStateObject(addr) + if obj == nil { + obj = s.createObject(addr) } - s.setStateObject(newobj) - if prev != nil && !prev.deleted { - return newobj, prev - } - return newobj, nil + return obj } -// CreateAccount explicitly creates a state object. If a state object with the address -// already exists the balance is carried over to the new account. -// -// CreateAccount is called during the EVM CREATE operation. The situation might arise that -// a contract does the following: -// -// 1. sends funds to sha(account ++ (nonce + 1)) -// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) -// -// Carrying over the balance ensures that Ether doesn't disappear. +// createObject creates a new state object. The assumption is held there is no +// existing account with the given address, otherwise it will be silently overwritten. +func (s *StateDB) createObject(addr common.Address) *stateObject { + obj := newObject(s, addr, nil) + s.journal.append(createObjectChange{account: &addr}) + s.setStateObject(obj) + return obj +} + +// CreateAccount explicitly creates a new state object, assuming that the +// account did not previously exist in the state. If the account already +// exists, this function will silently overwrite it which might lead to a +// consensus bug eventually. func (s *StateDB) CreateAccount(addr common.Address) { - newObj, prev := s.createObject(addr) - if prev != nil { - newObj.setBalance(prev.data.Balance) + s.createObject(addr) +} + +// CreateContract is used whenever a contract is created. This may be preceded +// by CreateAccount, but that is not required if it already existed in the +// state due to funds sent beforehand. +// This operation sets the 'newContract'-flag, which is required in order to +// correctly handle EIP-6780 'delete-in-same-transaction' logic. +func (s *StateDB) CreateContract(addr common.Address) { + obj := s.getStateObject(addr) + if !obj.newContract { + obj.newContract = true + s.journal.append(createContractChange{account: addr}) } } @@ -695,21 +686,25 @@ func (s *StateDB) Copy() *StateDB { state := &StateDB{ db: s.db, trie: s.db.CopyTrie(s.trie), + hasher: crypto.NewKeccakState(), originalRoot: s.originalRoot, accounts: copySet(s.accounts), storages: copy2DSet(s.storages), accountsOrigin: copySet(s.accountsOrigin), storagesOrigin: copy2DSet(s.storagesOrigin), - stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), - stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), - stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + stateObjects: make(map[common.Address]*stateObject, len(s.stateObjects)), stateObjectsDestruct: maps.Clone(s.stateObjectsDestruct), + mutations: make(map[common.Address]*mutation, len(s.mutations)), + dbErr: s.dbErr, refund: s.refund, + thash: s.thash, + txIndex: s.txIndex, logs: make(map[common.Hash][]*types.Log, len(s.logs)), logSize: s.logSize, preimages: maps.Clone(s.preimages), - journal: newJournal(), - hasher: crypto.NewKeccakState(), + journal: s.journal.copy(), + validRevisions: slices.Clone(s.validRevisions), + nextRevisionId: s.nextRevisionId, // In order for the block producer to be able to use and make additions // to the snapshot tree, we need to copy that as well. Otherwise, any @@ -718,39 +713,14 @@ func (s *StateDB) Copy() *StateDB { snaps: s.snaps, snap: s.snap, } - // Copy the dirty states, logs, and preimages - for addr := range s.journal.dirties { - // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), - // and in the Finalise-method, there is a case where an object is in the journal but not - // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for - // nil - if object, exist := s.stateObjects[addr]; exist { - // Even though the original object is dirty, we are not copying the journal, - // so we need to make sure that any side-effect the journal would have caused - // during a commit (or similar op) is already applied to the copy. - state.stateObjects[addr] = object.deepCopy(state) - - state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits - state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits - } - } - // Above, we don't copy the actual journal. This means that if the copy - // is copied, the loop above will be a no-op, since the copy's journal - // is empty. Thus, here we iterate over stateObjects, to enable copies - // of copies. - for addr := range s.stateObjectsPending { - if _, exist := state.stateObjects[addr]; !exist { - state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) - } - state.stateObjectsPending[addr] = struct{}{} + // Deep copy cached state objects. + for addr, obj := range s.stateObjects { + state.stateObjects[addr] = obj.deepCopy(state) } - for addr := range s.stateObjectsDirty { - if _, exist := state.stateObjects[addr]; !exist { - state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) - } - state.stateObjectsDirty[addr] = struct{}{} + // Deep copy the object state markers. + for addr, op := range s.mutations { + state.mutations[addr] = op.copy() } - // Deep copy the logs occurred in the scope of block for hash, logs := range s.logs { cpy := make([]*types.Log, len(logs)) @@ -760,7 +730,6 @@ func (s *StateDB) Copy() *StateDB { } state.logs[hash] = cpy } - // Do we need to copy the access list and transient storage? // In practice: No. At the start of a transaction, these two lists are empty. // In practice, we only ever copy state _between_ transactions/blocks, never @@ -825,7 +794,8 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { continue } if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { - obj.deleted = true + delete(s.stateObjects, obj.address) + s.markDelete(addr) // If ether was sent to account post-selfdestruct it is burnt. if bal := obj.Balance(); s.logger != nil && s.logger.OnBalanceChange != nil && obj.selfDestructed && bal.Sign() != 0 { @@ -846,11 +816,8 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) } else { obj.finalise(true) // Prefetch slots in the background + s.markUpdate(addr) } - obj.created = false - s.stateObjectsPending[addr] = struct{}{} - s.stateObjectsDirty[addr] = struct{}{} - // At this point, also ship the address off to the precacher. The precacher // will start loading tries, and when the change is eventually committed, // the commit-phase will be a lot faster @@ -889,10 +856,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // the account prefetcher. Instead, let's process all the storage updates // first, giving the account prefetches just a few more milliseconds of time // to pull useful data from disk. - for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { - obj.updateRoot() + for addr, op := range s.mutations { + if op.applied { + continue + } + if op.isDelete() { + continue } + s.stateObjects[addr].updateRoot() } // Now we're about to start to write changes to the trie. The trie is so far // _untouched_. We can check with the prefetcher, if it can give us a trie @@ -902,7 +873,6 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { s.trie = trie } } - usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) // Perform updates before deletions. This prevents resolution of unnecessary trie nodes // in circumstances similar to the following: // @@ -913,13 +883,21 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // If the self-destruct is handled first, then `P` would be left with only one child, thus collapsed // into a shortnode. This requires `B` to be resolved from disk. // Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved. - var deletedAddrs []common.Address - for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { - s.updateStateObject(obj) - s.AccountUpdated += 1 + var ( + usedAddrs [][]byte + deletedAddrs []common.Address + ) + for addr, op := range s.mutations { + if op.applied { + continue + } + op.applied = true + + if op.isDelete() { + deletedAddrs = append(deletedAddrs, addr) } else { - deletedAddrs = append(deletedAddrs, obj.address) + s.updateStateObject(s.stateObjects[addr]) + s.AccountUpdated += 1 } usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure } @@ -930,9 +908,6 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { if prefetcher != nil { prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } - if len(s.stateObjectsPending) > 0 { - s.stateObjectsPending = make(map[common.Address]struct{}) - } // Track the amount of time wasted on hashing the account trie defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) @@ -1176,11 +1151,12 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er return common.Hash{}, err } // Handle all state updates afterwards - for addr := range s.stateObjectsDirty { - obj := s.stateObjects[addr] - if obj.deleted { + for addr, op := range s.mutations { + if op.isDelete() { continue } + obj := s.stateObjects[addr] + // Write any contract code associated with the state object if obj.code != nil && obj.dirtyCode { rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) @@ -1280,7 +1256,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er s.storages = make(map[common.Hash]map[common.Hash][]byte) s.accountsOrigin = make(map[common.Address][]byte) s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) - s.stateObjectsDirty = make(map[common.Address]struct{}) + s.mutations = make(map[common.Address]*mutation) s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) return root, nil } @@ -1395,3 +1371,19 @@ func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common. } return copied } + +func (s *StateDB) markDelete(addr common.Address) { + if _, ok := s.mutations[addr]; !ok { + s.mutations[addr] = &mutation{} + } + s.mutations[addr].applied = false + s.mutations[addr].typ = deletion +} + +func (s *StateDB) markUpdate(addr common.Address) { + if _, ok := s.mutations[addr]; !ok { + s.mutations[addr] = &mutation{} + } + s.mutations[addr].applied = false + s.mutations[addr].typ = update +} diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index 65cf278108..6317681a7f 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -96,7 +96,9 @@ func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction { name: "CreateAccount", fn: func(a testAction, s *StateDB) { - s.CreateAccount(addr) + if !s.Exist(addr) { + s.CreateAccount(addr) + } }, }, { diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index edde6e8254..1a3eccfe10 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -225,6 +225,78 @@ func TestCopy(t *testing.T) { } } +// TestCopyWithDirtyJournal tests if Copy can correct create a equal copied +// stateDB with dirty journal present. +func TestCopyWithDirtyJournal(t *testing.T) { + db := NewDatabase(rawdb.NewMemoryDatabase()) + orig, _ := New(types.EmptyRootHash, db, nil) + + // Fill up the initial states + for i := byte(0); i < 255; i++ { + obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i})) + obj.AddBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified) + obj.data.Root = common.HexToHash("0xdeadbeef") + orig.updateStateObject(obj) + } + root, _ := orig.Commit(0, true) + orig, _ = New(root, db, nil) + + // modify all in memory without finalizing + for i := byte(0); i < 255; i++ { + obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i})) + obj.SubBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified) + orig.updateStateObject(obj) + } + cpy := orig.Copy() + + orig.Finalise(true) + for i := byte(0); i < 255; i++ { + root := orig.GetStorageRoot(common.BytesToAddress([]byte{i})) + if root != (common.Hash{}) { + t.Errorf("Unexpected storage root %x", root) + } + } + cpy.Finalise(true) + for i := byte(0); i < 255; i++ { + root := cpy.GetStorageRoot(common.BytesToAddress([]byte{i})) + if root != (common.Hash{}) { + t.Errorf("Unexpected storage root %x", root) + } + } + if cpy.IntermediateRoot(true) != orig.IntermediateRoot(true) { + t.Error("State is not equal after copy") + } +} + +// TestCopyObjectState creates an original state, S1, and makes a copy S2. +// It then proceeds to make changes to S1. Those changes are _not_ supposed +// to affect S2. This test checks that the copy properly deep-copies the objectstate +func TestCopyObjectState(t *testing.T) { + db := NewDatabase(rawdb.NewMemoryDatabase()) + orig, _ := New(types.EmptyRootHash, db, nil) + + // Fill up the initial states + for i := byte(0); i < 5; i++ { + obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i})) + obj.AddBalance(uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified) + obj.data.Root = common.HexToHash("0xdeadbeef") + orig.updateStateObject(obj) + } + orig.Finalise(true) + cpy := orig.Copy() + for _, op := range cpy.mutations { + if have, want := op.applied, false; have != want { + t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want) + } + } + orig.Commit(0, true) + for _, op := range cpy.mutations { + if have, want := op.applied, false; have != want { + t.Fatalf("Error: original state affected copy, have %v want %v", have, want) + } + } +} + func TestSnapshotRandom(t *testing.T) { config := &quick.Config{MaxCount: 1000} err := quick.Check((*snapshotTest).run, config) @@ -308,7 +380,30 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { { name: "CreateAccount", fn: func(a testAction, s *StateDB) { - s.CreateAccount(addr) + if !s.Exist(addr) { + s.CreateAccount(addr) + } + }, + }, + { + name: "CreateContract", + fn: func(a testAction, s *StateDB) { + if !s.Exist(addr) { + s.CreateAccount(addr) + } + contractHash := s.GetCodeHash(addr) + emptyCode := contractHash == (common.Hash{}) || contractHash == types.EmptyCodeHash + storageRoot := s.GetStorageRoot(addr) + emptyStorage := storageRoot == (common.Hash{}) || storageRoot == types.EmptyRootHash + if s.GetNonce(addr) == 0 && emptyCode && emptyStorage { + s.CreateContract(addr) + // We also set some code here, to prevent the + // CreateContract action from being performed twice in a row, + // which would cause a difference in state when unrolling + // the journal. (CreateContact assumes created was false prior to + // invocation, and the journal rollback sets it to false). + s.SetCode(addr, []byte{1}) + } }, }, { @@ -709,18 +804,19 @@ func TestCopyCopyCommitCopy(t *testing.T) { } } -// TestCommitCopy tests the copy from a committed state is not functional. +// TestCommitCopy tests the copy from a committed state is not fully functional. func TestCommitCopy(t *testing.T) { - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + db := NewDatabase(rawdb.NewMemoryDatabase()) + state, _ := New(types.EmptyRootHash, db, nil) // Create an account and check if the retrieved balance is correct addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") - skey := common.HexToHash("aaa") - sval := common.HexToHash("bbb") + skey1, skey2 := common.HexToHash("a1"), common.HexToHash("a2") + sval1, sval2 := common.HexToHash("b1"), common.HexToHash("b2") state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie state.SetCode(addr, []byte("hello")) // Change an external metadata - state.SetState(addr, skey, sval) // Change the storage trie + state.SetState(addr, skey1, sval1) // Change the storage trie if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 { t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42) @@ -728,25 +824,38 @@ func TestCommitCopy(t *testing.T) { if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) { t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello")) } - if val := state.GetState(addr, skey); val != sval { - t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval) + if val := state.GetState(addr, skey1); val != sval1 { + t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval1) } - if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) { + if val := state.GetCommittedState(addr, skey1); val != (common.Hash{}) { t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{}) } - // Copy the committed state database, the copied one is not functional. - state.Commit(0, true) + root, _ := state.Commit(0, true) + + state, _ = New(root, db, nil) + state.SetState(addr, skey2, sval2) + state.Commit(1, true) + + // Copy the committed state database, the copied one is not fully functional. copied := state.Copy() - if balance := copied.GetBalance(addr); balance.Cmp(uint256.NewInt(0)) != 0 { + if balance := copied.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 { t.Fatalf("unexpected balance: have %v", balance) } - if code := copied.GetCode(addr); code != nil { + if code := copied.GetCode(addr); !bytes.Equal(code, []byte("hello")) { t.Fatalf("unexpected code: have %x", code) } - if val := copied.GetState(addr, skey); val != (common.Hash{}) { + // Miss slots because of non-functional trie after commit + if val := copied.GetState(addr, skey1); val != (common.Hash{}) { + t.Fatalf("unexpected storage slot: have %x", sval1) + } + if val := copied.GetCommittedState(addr, skey1); val != (common.Hash{}) { t.Fatalf("unexpected storage slot: have %x", val) } - if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) { + // Slots cached in the stateDB, available after commit + if val := copied.GetState(addr, skey2); val != sval2 { + t.Fatalf("unexpected storage slot: have %x", sval1) + } + if val := copied.GetCommittedState(addr, skey2); val != sval2 { t.Fatalf("unexpected storage slot: have %x", val) } if !errors.Is(copied.Error(), trie.ErrCommitted) { @@ -1103,40 +1212,6 @@ func TestStateDBTransientStorage(t *testing.T) { } } -func TestResetObject(t *testing.T) { - var ( - disk = rawdb.NewMemoryDatabase() - tdb = triedb.NewDatabase(disk, nil) - db = NewDatabaseWithNodeDB(disk, tdb) - snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash) - state, _ = New(types.EmptyRootHash, db, snaps) - addr = common.HexToAddress("0x1") - slotA = common.HexToHash("0x1") - slotB = common.HexToHash("0x2") - ) - // Initialize account with balance and storage in first transaction. - state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified) - state.SetState(addr, slotA, common.BytesToHash([]byte{0x1})) - state.IntermediateRoot(true) - - // Reset account and mutate balance and storages - state.CreateAccount(addr) - state.SetBalance(addr, uint256.NewInt(2), tracing.BalanceChangeUnspecified) - state.SetState(addr, slotB, common.BytesToHash([]byte{0x2})) - root, _ := state.Commit(0, true) - - // Ensure the original account is wiped properly - snap := snaps.Snapshot(root) - slot, _ := snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotA.Bytes())) - if len(slot) != 0 { - t.Fatalf("Unexpected storage slot") - } - slot, _ = snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotB.Bytes())) - if !bytes.Equal(slot, []byte{0x2}) { - t.Fatalf("Unexpected storage slot value %v", slot) - } -} - func TestDeleteStorage(t *testing.T) { var ( disk = rawdb.NewMemoryDatabase() diff --git a/core/vm/evm.go b/core/vm/evm.go index 045506a1fd..c18353a973 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -436,14 +436,15 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, return nil, common.Address{}, gas, ErrNonceUintOverflow } evm.StateDB.SetNonce(caller.Address(), nonce+1) - // We add this to the access list _before_ taking a snapshot. Even if the creation fails, - // the access-list change should not be rolled back + + // We add this to the access list _before_ taking a snapshot. Even if the + // creation fails, the access-list change should not be rolled back. if evm.chainRules.IsBerlin { evm.StateDB.AddAddressToAccessList(address) } // Ensure there's no existing contract already at the designated address. // Account is regarded as existent if any of these three conditions is met: - // - the nonce is nonzero + // - the nonce is non-zero // - the code is non-empty // - the storage is non-empty contractHash := evm.StateDB.GetCodeHash(address) @@ -456,9 +457,19 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } return nil, common.Address{}, 0, ErrContractAddressCollision } - // Create a new account on the state + // Create a new account on the state only if the object was not present. + // It might be possible the contract code is deployed to a pre-existent + // account with non-zero balance. snapshot := evm.StateDB.Snapshot() - evm.StateDB.CreateAccount(address) + if !evm.StateDB.Exist(address) { + evm.StateDB.CreateAccount(address) + } + // CreateContract means that regardless of whether the account previously existed + // in the state trie or not, it _now_ becomes created as a _contract_ account. + // This is performed _prior_ to executing the initcode, since the initcode + // acts inside that account. + evm.StateDB.CreateContract(address) + if evm.chainRules.IsEIP158 { evm.StateDB.SetNonce(address, 1) } diff --git a/core/vm/interface.go b/core/vm/interface.go index 30742e96de..774360a08e 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -29,6 +29,7 @@ import ( // StateDB is an EVM database for full state querying. type StateDB interface { CreateAccount(common.Address) + CreateContract(common.Address) SubBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) AddBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) diff --git a/tests/block_test.go b/tests/block_test.go index 43e3d99b3e..1ba84f5f24 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -64,14 +64,6 @@ func TestExecutionSpecBlocktests(t *testing.T) { } bt := new(testMatcher) - // These tests fail as of https://github.com/ethereum/go-ethereum/pull/28666, since we - // no longer delete "leftover storage" when deploying a contract. - bt.skipLoad(`^cancun/eip6780_selfdestruct/selfdestruct/self_destructing_initcode_create_tx.json`) - bt.skipLoad(`^cancun/eip6780_selfdestruct/selfdestruct/self_destructing_initcode.json`) - bt.skipLoad(`^cancun/eip6780_selfdestruct/selfdestruct/recreate_self_destructed_contract_different_txs.json`) - bt.skipLoad(`^cancun/eip6780_selfdestruct/selfdestruct/delegatecall_from_new_contract_to_pre_existing_contract.json`) - bt.skipLoad(`^cancun/eip6780_selfdestruct/selfdestruct/create_selfdestruct_same_tx.json`) - bt.walk(t, executionSpecBlockchainTestDir, func(t *testing.T, name string, test *BlockTest) { execBlockTest(t, bt, test) }) From ac21f9bfb5154854e241928840b3978d801eac8c Mon Sep 17 00:00:00 2001 From: qcrao Date: Wed, 24 Apr 2024 20:04:20 +0800 Subject: [PATCH 03/64] trie: preallocate capacity for fields slice (#29614) trie: Preallocate capacity for fields slice --- trie/trienode/node.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trie/trienode/node.go b/trie/trienode/node.go index 95315c2e9a..055db8822e 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -78,7 +78,7 @@ func NewNodeSet(owner common.Hash) *NodeSet { // ForEachWithOrder iterates the nodes with the order from bottom to top, // right to left, nodes with the longest path will be iterated first. func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) { - var paths []string + paths := make([]string, 0, len(set.Nodes)) for path := range set.Nodes { paths = append(paths, path) } @@ -133,7 +133,7 @@ func (set *NodeSet) Size() (int, int) { // Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can // we get rid of it? func (set *NodeSet) Hashes() []common.Hash { - var ret []common.Hash + ret := make([]common.Hash, 0, len(set.Nodes)) for _, node := range set.Nodes { ret = append(ret, node.Hash) } From 7362691479df9eb7d76c8b968ed8f440372fab6e Mon Sep 17 00:00:00 2001 From: Aaron Chen Date: Wed, 24 Apr 2024 20:27:58 +0800 Subject: [PATCH 04/64] trie, consensus/clique: use maps.Clone (#29616) --- consensus/clique/snapshot.go | 23 ++++++----------------- trie/tracer.go | 18 +++++------------- 2 files changed, 11 insertions(+), 30 deletions(-) diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go index 8ff0b3a70f..d0b15e9489 100644 --- a/consensus/clique/snapshot.go +++ b/consensus/clique/snapshot.go @@ -19,6 +19,7 @@ package clique import ( "bytes" "encoding/json" + "maps" "slices" "time" @@ -108,28 +109,16 @@ func (s *Snapshot) store(db ethdb.Database) error { // copy creates a deep copy of the snapshot, though not the individual votes. func (s *Snapshot) copy() *Snapshot { - cpy := &Snapshot{ + return &Snapshot{ config: s.config, sigcache: s.sigcache, Number: s.Number, Hash: s.Hash, - Signers: make(map[common.Address]struct{}), - Recents: make(map[uint64]common.Address), - Votes: make([]*Vote, len(s.Votes)), - Tally: make(map[common.Address]Tally), - } - for signer := range s.Signers { - cpy.Signers[signer] = struct{}{} + Signers: maps.Clone(s.Signers), + Recents: maps.Clone(s.Recents), + Votes: slices.Clone(s.Votes), + Tally: maps.Clone(s.Tally), } - for block, signer := range s.Recents { - cpy.Recents[block] = signer - } - for address, tally := range s.Tally { - cpy.Tally[address] = tally - } - copy(cpy.Votes, s.Votes) - - return cpy } // validVote returns whether it makes sense to cast the specified vote in the diff --git a/trie/tracer.go b/trie/tracer.go index 5786af4d3e..90b9666f0b 100644 --- a/trie/tracer.go +++ b/trie/tracer.go @@ -17,6 +17,8 @@ package trie import ( + "maps" + "github.com/ethereum/go-ethereum/common" ) @@ -92,23 +94,13 @@ func (t *tracer) reset() { // copy returns a deep copied tracer instance. func (t *tracer) copy() *tracer { - var ( - inserts = make(map[string]struct{}) - deletes = make(map[string]struct{}) - accessList = make(map[string][]byte) - ) - for path := range t.inserts { - inserts[path] = struct{}{} - } - for path := range t.deletes { - deletes[path] = struct{}{} - } + accessList := make(map[string][]byte, len(t.accessList)) for path, blob := range t.accessList { accessList[path] = common.CopyBytes(blob) } return &tracer{ - inserts: inserts, - deletes: deletes, + inserts: maps.Clone(t.inserts), + deletes: maps.Clone(t.deletes), accessList: accessList, } } From 4f4f9d88d3a59e972c9fe3a5e6e6a93dad2e7db4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 24 Apr 2024 18:45:24 +0300 Subject: [PATCH 05/64] core/state: storage journal entry should revert dirtyness too (#29641) Currently our state journal tracks each storage update to a contract, having the ability to revert those changes to the previously set value. For the very first modification however, it behaves a bit wonky. Reverting the update doesn't actually remove the dirty-ness of the slot, rather leaves it as "change this slot to it's original value". This can cause issues down the line with for example write witnesses needing to gather an unneeded proof. This PR modifies the storageChange journal entry to not only track the previous value of a slot, but also whether there was any previous value at all set in the current execution context. In essence, the PR changes the semantic of storageChange so it does not simply track storage changes, rather it tracks dirty storage changes, an important distinction for being able to cleanly revert the journal item. --- core/state/journal.go | 13 +++++++------ core/state/state_object.go | 40 ++++++++++++++++++++++++++++---------- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/core/state/journal.go b/core/state/journal.go index cfd3782eb0..c0f5615c98 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -129,8 +129,9 @@ type ( prev uint64 } storageChange struct { - account *common.Address - key, prevalue common.Hash + account *common.Address + key common.Hash + prevvalue *common.Hash } codeChange struct { account *common.Address @@ -277,7 +278,7 @@ func (ch codeChange) copy() journalEntry { } func (ch storageChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setState(ch.key, ch.prevalue) + s.getStateObject(*ch.account).setState(ch.key, ch.prevvalue) } func (ch storageChange) dirtied() *common.Address { @@ -286,9 +287,9 @@ func (ch storageChange) dirtied() *common.Address { func (ch storageChange) copy() journalEntry { return storageChange{ - account: ch.account, - key: ch.key, - prevalue: ch.prevalue, + account: ch.account, + key: ch.key, + prevvalue: ch.prevvalue, } } diff --git a/core/state/state_object.go b/core/state/state_object.go index 8978b30542..aa748f08ac 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -140,13 +140,20 @@ func (s *stateObject) getTrie() (Trie, error) { // GetState retrieves a value from the account storage trie. func (s *stateObject) GetState(key common.Hash) common.Hash { + value, _ := s.getState(key) + return value +} + +// getState retrieves a value from the account storage trie and also returns if +// the slot is already dirty or not. +func (s *stateObject) getState(key common.Hash) (common.Hash, bool) { // If we have a dirty value for this state entry, return it value, dirty := s.dirtyStorage[key] if dirty { - return value + return value, true } // Otherwise return the entry's original value - return s.GetCommittedState(key) + return s.GetCommittedState(key), false } // GetCommittedState retrieves a value from the committed account storage trie. @@ -209,25 +216,38 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // SetState updates a value in account storage. func (s *stateObject) SetState(key, value common.Hash) { - // If the new value is the same as old, don't set - prev := s.GetState(key) + // If the new value is the same as old, don't set. Otherwise, track only the + // dirty changes, supporting reverting all of it back to no change. + prev, dirty := s.getState(key) if prev == value { return } + var prevvalue *common.Hash + if dirty { + prevvalue = &prev + } // New value is different, update and journal the change s.db.journal.append(storageChange{ - account: &s.address, - key: key, - prevalue: prev, + account: &s.address, + key: key, + prevvalue: prevvalue, }) if s.db.logger != nil && s.db.logger.OnStorageChange != nil { s.db.logger.OnStorageChange(s.address, key, prev, value) } - s.setState(key, value) + s.setState(key, &value) } -func (s *stateObject) setState(key, value common.Hash) { - s.dirtyStorage[key] = value +// setState updates a value in account dirty storage. If the value being set is +// nil (assuming journal revert), the dirtyness is removed. +func (s *stateObject) setState(key common.Hash, value *common.Hash) { + // If the first set is being reverted, undo the dirty marker + if value == nil { + delete(s.dirtyStorage, key) + return + } + // Otherwise restore the previous value + s.dirtyStorage[key] = *value } // finalise moves all dirty storage slots into the pending area to be hashed or From 2f6ff492ae02313c9e45df5222bfd472b2481d76 Mon Sep 17 00:00:00 2001 From: yujinpark Date: Thu, 25 Apr 2024 14:47:29 +0900 Subject: [PATCH 06/64] internal/ethapi: typo (#29636) --- internal/ethapi/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 8b15d211d1..4b5145f5de 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -142,7 +142,7 @@ func (s *EthereumAPI) BlobBaseFee(ctx context.Context) *hexutil.Big { } // Syncing returns false in case the node is currently not syncing with the network. It can be up-to-date or has not -// yet received the latest block headers from its pears. In case it is synchronizing: +// yet received the latest block headers from its peers. In case it is synchronizing: // - startingBlock: block number this node started to synchronize from // - currentBlock: block number this node is currently importing // - highestBlock: block number of the highest block header this node has received from peers From a13b92524d9319424b5e9cdf4f9f1d77bf52849f Mon Sep 17 00:00:00 2001 From: Undefinedor Date: Thu, 25 Apr 2024 14:40:29 +0800 Subject: [PATCH 07/64] eth/protocols/eth,p2p/discover: remove unnecessary checks (#29590) fix useless condition --- eth/protocols/eth/handlers.go | 2 +- p2p/discover/v4_udp.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index 96656afb1b..bdc630a9f4 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -190,7 +190,7 @@ func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHe return headers } { // Last mode: deliver ancestors of H - for i := uint64(1); header != nil && i < count; i++ { + for i := uint64(1); i < count; i++ { header = chain.GetHeaderByHash(header.ParentHash) if header == nil { break diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index 44b1f5305c..7a0a0f1c77 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -548,7 +548,7 @@ func (t *UDPv4) handlePacket(from *net.UDPAddr, buf []byte) error { } packet := t.wrapPacket(rawpacket) fromID := fromKey.ID() - if err == nil && packet.preverify != nil { + if packet.preverify != nil { err = packet.preverify(packet, from, fromID, fromKey) } t.log.Trace("<< "+packet.Name(), "id", fromID, "addr", from, "err", err) From 243cde0f54e407ba52c570295886083d6e7d651e Mon Sep 17 00:00:00 2001 From: Martin HS Date: Thu, 25 Apr 2024 09:56:25 +0200 Subject: [PATCH 08/64] core/state: better randomized testing (postcheck) on journalling (#29627) This PR fixes some flaws with the existing tests. The randomized testing (TestSnapshotRandom) executes a series of steps which modify the state and create journal-events. Later on, we compare the forward-going-states against the backwards-unrolling-journal-states, and check that they are identical. The "identical" check is performed using various accessors. It turned out that we failed to check some things: - the accesslist contents - the transient storage contents - the 'newContract' flag - the dirty storage map This change adds these new checks --- core/state/access_list.go | 35 +++++++++++++++++ core/state/state_object.go | 24 ++++++------ core/state/statedb_test.go | 68 ++++++++++++++++++++++++++++++++- core/state/transient_storage.go | 43 +++++++++++++++++++-- 4 files changed, 153 insertions(+), 17 deletions(-) diff --git a/core/state/access_list.go b/core/state/access_list.go index 718bf17cf7..b0effbeadc 100644 --- a/core/state/access_list.go +++ b/core/state/access_list.go @@ -17,7 +17,10 @@ package state import ( + "fmt" "maps" + "slices" + "strings" "github.com/ethereum/go-ethereum/common" ) @@ -130,3 +133,35 @@ func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) { func (al *accessList) DeleteAddress(address common.Address) { delete(al.addresses, address) } + +// Equal returns true if the two access lists are identical +func (al *accessList) Equal(other *accessList) bool { + if !maps.Equal(al.addresses, other.addresses) { + return false + } + return slices.EqualFunc(al.slots, other.slots, + func(m map[common.Hash]struct{}, m2 map[common.Hash]struct{}) bool { + return maps.Equal(m, m2) + }) +} + +// PrettyPrint prints the contents of the access list in a human-readable form +func (al *accessList) PrettyPrint() string { + out := new(strings.Builder) + var sortedAddrs []common.Address + for addr := range al.addresses { + sortedAddrs = append(sortedAddrs, addr) + } + slices.SortFunc(sortedAddrs, common.Address.Cmp) + for _, addr := range sortedAddrs { + idx := al.addresses[addr] + fmt.Fprintf(out, "%#x : (idx %d)\n", addr, idx) + if idx >= 0 { + slotmap := al.slots[idx] + for h := range slotmap { + fmt.Fprintf(out, " %#x\n", h) + } + } + } + return out.String() +} diff --git a/core/state/state_object.go b/core/state/state_object.go index aa748f08ac..d3d20c3dc4 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -459,22 +459,22 @@ func (s *stateObject) setBalance(amount *uint256.Int) { func (s *stateObject) deepCopy(db *StateDB) *stateObject { obj := &stateObject{ - db: db, - address: s.address, - addrHash: s.addrHash, - origin: s.origin, - data: s.data, + db: db, + address: s.address, + addrHash: s.addrHash, + origin: s.origin, + data: s.data, + code: s.code, + originStorage: s.originStorage.Copy(), + pendingStorage: s.pendingStorage.Copy(), + dirtyStorage: s.dirtyStorage.Copy(), + dirtyCode: s.dirtyCode, + selfDestructed: s.selfDestructed, + newContract: s.newContract, } if s.trie != nil { obj.trie = db.db.CopyTrie(s.trie) } - obj.code = s.code - obj.originStorage = s.originStorage.Copy() - obj.pendingStorage = s.pendingStorage.Copy() - obj.dirtyStorage = s.dirtyStorage.Copy() - obj.dirtyCode = s.dirtyCode - obj.selfDestructed = s.selfDestructed - obj.newContract = s.newContract return obj } diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 1a3eccfe10..71d64f5628 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -21,9 +21,11 @@ import ( "encoding/binary" "errors" "fmt" + "maps" "math" "math/rand" "reflect" + "slices" "strings" "sync" "testing" @@ -557,10 +559,14 @@ func forEachStorage(s *StateDB, addr common.Address, cb func(key, value common.H if err != nil { return err } - it := trie.NewIterator(trieIt) + var ( + it = trie.NewIterator(trieIt) + visited = make(map[common.Hash]bool) + ) for it.Next() { key := common.BytesToHash(s.trie.GetKey(it.Key)) + visited[key] = true if value, dirty := so.dirtyStorage[key]; dirty { if !cb(key, value) { return nil @@ -600,6 +606,10 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { checkeq("GetCode", state.GetCode(addr), checkstate.GetCode(addr)) checkeq("GetCodeHash", state.GetCodeHash(addr), checkstate.GetCodeHash(addr)) checkeq("GetCodeSize", state.GetCodeSize(addr), checkstate.GetCodeSize(addr)) + // Check newContract-flag + if obj := state.getStateObject(addr); obj != nil { + checkeq("IsNewContract", obj.newContract, checkstate.getStateObject(addr).newContract) + } // Check storage. if obj := state.getStateObject(addr); obj != nil { forEachStorage(state, addr, func(key, value common.Hash) bool { @@ -608,12 +618,49 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { forEachStorage(checkstate, addr, func(key, value common.Hash) bool { return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value) }) + other := checkstate.getStateObject(addr) + // Check dirty storage which is not in trie + if !maps.Equal(obj.dirtyStorage, other.dirtyStorage) { + print := func(dirty map[common.Hash]common.Hash) string { + var keys []common.Hash + out := new(strings.Builder) + for key := range dirty { + keys = append(keys, key) + } + slices.SortFunc(keys, common.Hash.Cmp) + for i, key := range keys { + fmt.Fprintf(out, " %d. %v %v\n", i, key, dirty[key]) + } + return out.String() + } + return fmt.Errorf("dirty storage err, have\n%v\nwant\n%v", + print(obj.dirtyStorage), + print(other.dirtyStorage)) + } + } + // Check transient storage. + { + have := state.transientStorage + want := checkstate.transientStorage + eq := maps.EqualFunc(have, want, + func(a Storage, b Storage) bool { + return maps.Equal(a, b) + }) + if !eq { + return fmt.Errorf("transient storage differs ,have\n%v\nwant\n%v", + have.PrettyPrint(), + want.PrettyPrint()) + } } if err != nil { return err } } - + if !checkstate.accessList.Equal(state.accessList) { // Check access lists + return fmt.Errorf("AccessLists are wrong, have \n%v\nwant\n%v", + checkstate.accessList.PrettyPrint(), + state.accessList.PrettyPrint()) + } if state.GetRefund() != checkstate.GetRefund() { return fmt.Errorf("got GetRefund() == %d, want GetRefund() == %d", state.GetRefund(), checkstate.GetRefund()) @@ -622,6 +669,23 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { return fmt.Errorf("got GetLogs(common.Hash{}) == %v, want GetLogs(common.Hash{}) == %v", state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{})) } + if !maps.Equal(state.journal.dirties, checkstate.journal.dirties) { + getKeys := func(dirty map[common.Address]int) string { + var keys []common.Address + out := new(strings.Builder) + for key := range dirty { + keys = append(keys, key) + } + slices.SortFunc(keys, common.Address.Cmp) + for i, key := range keys { + fmt.Fprintf(out, " %d. %v\n", i, key) + } + return out.String() + } + have := getKeys(state.journal.dirties) + want := getKeys(checkstate.journal.dirties) + return fmt.Errorf("dirty-journal set mismatch.\nhave:\n%v\nwant:\n%v\n", have, want) + } return nil } diff --git a/core/state/transient_storage.go b/core/state/transient_storage.go index 66e563efa7..e63db39eba 100644 --- a/core/state/transient_storage.go +++ b/core/state/transient_storage.go @@ -17,6 +17,10 @@ package state import ( + "fmt" + "slices" + "strings" + "github.com/ethereum/go-ethereum/common" ) @@ -30,10 +34,19 @@ func newTransientStorage() transientStorage { // Set sets the transient-storage `value` for `key` at the given `addr`. func (t transientStorage) Set(addr common.Address, key, value common.Hash) { - if _, ok := t[addr]; !ok { - t[addr] = make(Storage) + if value == (common.Hash{}) { // this is a 'delete' + if _, ok := t[addr]; ok { + delete(t[addr], key) + if len(t[addr]) == 0 { + delete(t, addr) + } + } + } else { + if _, ok := t[addr]; !ok { + t[addr] = make(Storage) + } + t[addr][key] = value } - t[addr][key] = value } // Get gets the transient storage for `key` at the given `addr`. @@ -53,3 +66,27 @@ func (t transientStorage) Copy() transientStorage { } return storage } + +// PrettyPrint prints the contents of the access list in a human-readable form +func (t transientStorage) PrettyPrint() string { + out := new(strings.Builder) + var sortedAddrs []common.Address + for addr := range t { + sortedAddrs = append(sortedAddrs, addr) + slices.SortFunc(sortedAddrs, common.Address.Cmp) + } + + for _, addr := range sortedAddrs { + fmt.Fprintf(out, "%#x:", addr) + var sortedKeys []common.Hash + storage := t[addr] + for key := range storage { + sortedKeys = append(sortedKeys, key) + } + slices.SortFunc(sortedKeys, common.Hash.Cmp) + for _, key := range sortedKeys { + fmt.Fprintf(out, " %X : %X\n", key, storage[key]) + } + } + return out.String() +} From 1f628d842c92cdfc85f260194078afad782fe824 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 25 Apr 2024 11:50:25 +0300 Subject: [PATCH 09/64] build: build all the builders to build all the builders (#29647) * build: build all the builders to build all the builders * build: tweak the indexes a bit to make them consistent --- build/checksums.txt | 10 ++++---- build/ci.go | 45 +++++++++++++++++++++--------------- build/deb/ethereum/deb.rules | 5 ++-- 3 files changed, 35 insertions(+), 25 deletions(-) diff --git a/build/checksums.txt b/build/checksums.txt index 27577285b8..767fc88ce5 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -56,10 +56,12 @@ a5e68ae73d38748b5269fad36ac7575e3c162a5dc63ef58abdea03cc5da4522a golangci-lint- # This is the builder on PPA that will build Go itself (inception-y), don't modify! # # This version is fine to be old and full of security holes, we just use it -# to build the latest Go. Don't change it. If it ever becomes insufficient, -# we need to switch over to a recursive builder to jump across supported -# versions. +# to build the latest Go. Don't change it. # -# version:ppa-builder 1.19.6 +# version:ppa-builder-1 1.19.6 # https://go.dev/dl/ d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz + +# version:ppa-builder-2 1.21.9 +# https://go.dev/dl/ +58f0c5ced45a0012bce2ff7a9df03e128abcc8818ebabe5027bb92bafe20e421 go1.21.9.src.tar.gz diff --git a/build/ci.go b/build/ci.go index 4d8dba6ce2..abcc248ac5 100644 --- a/build/ci.go +++ b/build/ci.go @@ -694,8 +694,8 @@ func doDebianSource(cmdline []string) { } // Download and verify the Go source packages. var ( - gobootbundle = downloadGoBootstrapSources(*cachedir) - gobundle = downloadGoSources(*cachedir) + gobootbundles = downloadGoBootstrapSources(*cachedir) + gobundle = downloadGoSources(*cachedir) ) // Download all the dependencies needed to build the sources and run the ci script srcdepfetch := tc.Go("mod", "download") @@ -714,11 +714,13 @@ func doDebianSource(cmdline []string) { pkgdir := stageDebianSource(*workdir, meta) // Add bootstrapper Go source code - if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil { - log.Fatalf("Failed to extract bootstrapper Go sources: %v", err) - } - if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".goboot")); err != nil { - log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err) + for i, gobootbundle := range gobootbundles { + if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil { + log.Fatalf("Failed to extract bootstrapper Go sources: %v", err) + } + if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, fmt.Sprintf(".goboot-%d", i+1))); err != nil { + log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err) + } } // Add builder Go source code if err := build.ExtractArchive(gobundle, pkgdir); err != nil { @@ -754,21 +756,26 @@ func doDebianSource(cmdline []string) { } } -// downloadGoBootstrapSources downloads the Go source tarball that will be used +// downloadGoBootstrapSources downloads the Go source tarball(s) that will be used // to bootstrap the builder Go. -func downloadGoBootstrapSources(cachedir string) string { +func downloadGoBootstrapSources(cachedir string) []string { csdb := build.MustLoadChecksums("build/checksums.txt") - gobootVersion, err := build.Version(csdb, "ppa-builder") - if err != nil { - log.Fatal(err) - } - file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion) - url := "https://dl.google.com/go/" + file - dst := filepath.Join(cachedir, file) - if err := csdb.DownloadFile(url, dst); err != nil { - log.Fatal(err) + + var bundles []string + for _, booter := range []string{"ppa-builder-1", "ppa-builder-2"} { + gobootVersion, err := build.Version(csdb, booter) + if err != nil { + log.Fatal(err) + } + file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion) + url := "https://dl.google.com/go/" + file + dst := filepath.Join(cachedir, file) + if err := csdb.DownloadFile(url, dst); err != nil { + log.Fatal(err) + } + bundles = append(bundles, dst) } - return dst + return bundles } // downloadGoSources downloads the Go source tarball. diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index daca793e55..cb48b23a5e 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -19,8 +19,9 @@ override_dh_auto_build: # # We're also shipping the bootstrapper as of Go 1.20 as it had minimum version # requirements opposed to older versions of Go. - (mv .goboot ../ && cd ../.goboot/src && ./make.bash) - (mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot ./make.bash) + (mv .goboot-1 ../ && cd ../.goboot-1/src && ./make.bash) + (mv .goboot-2 ../ && cd ../.goboot-2/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot-1 ./make.bash) + (mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot-2 ./make.bash) # We can't download external go modules within Launchpad, so we're shipping the # entire dependency source cache with go-ethereum. From a0282fc94f0a58f2e9a355c6a22a2ac8966d35ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 25 Apr 2024 12:00:59 +0300 Subject: [PATCH 10/64] travis: temporarilly enable PPA builds for testing (#29648) --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 8c0af291a3..c93ccd4225 100644 --- a/.travis.yml +++ b/.travis.yml @@ -123,7 +123,6 @@ jobs: # This builder does the Ubuntu PPA nightly uploads - stage: build - if: type = cron || (type = push && tag ~= /^v[0-9]/) os: linux dist: bionic go: 1.22.x From 634d03793787dad138c41a0fc92c2a6aa91a976e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 25 Apr 2024 12:27:36 +0300 Subject: [PATCH 11/64] travis: revert the PPA fix hot-build, it works (#29649) --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index c93ccd4225..8c0af291a3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -123,6 +123,7 @@ jobs: # This builder does the Ubuntu PPA nightly uploads - stage: build + if: type = cron || (type = push && tag ~= /^v[0-9]/) os: linux dist: bionic go: 1.22.x From ad4fb2c7291972a1940dbb276ed1b6f49906767c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 25 Apr 2024 14:07:39 +0300 Subject: [PATCH 12/64] build: drop trusty from PPA builds, EOL and incompatible (#29651) * build: drop trusty from PPA builds, EOL and incompatible * build: add Ubuntu Noble PPA build target --- build/ci.go | 51 +++++++++++++--------------------- build/deb/ethereum/deb.control | 2 +- build/deb/ethereum/deb.rules | 2 +- 3 files changed, 21 insertions(+), 34 deletions(-) diff --git a/build/ci.go b/build/ci.go index abcc248ac5..9a2532f51f 100644 --- a/build/ci.go +++ b/build/ci.go @@ -117,23 +117,15 @@ var ( debEthereum, } - // Distros for which packages are created. - // Note: vivid is unsupported because there is no golang-1.6 package for it. - // Note: the following Ubuntu releases have been officially deprecated on Launchpad: - // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish, - // kinetic, lunar - debDistroGoBoots = map[string]string{ - "trusty": "golang-1.11", // 14.04, EOL: 04/2024 - "xenial": "golang-go", // 16.04, EOL: 04/2026 - "bionic": "golang-go", // 18.04, EOL: 04/2028 - "focal": "golang-go", // 20.04, EOL: 04/2030 - "jammy": "golang-go", // 22.04, EOL: 04/2032 - "mantic": "golang-go", // 23.10, EOL: 07/2024 - } + // Distros for which packages are created + debDistros = []string{ + "xenial", // 16.04, EOL: 04/2026 + "bionic", // 18.04, EOL: 04/2028 + "focal", // 20.04, EOL: 04/2030 + "jammy", // 22.04, EOL: 04/2032 + "noble", // 24.04, EOL: 04/2034 - debGoBootPaths = map[string]string{ - "golang-1.11": "/usr/lib/go-1.11", - "golang-go": "/usr/lib/go", + "mantic", // 23.10, EOL: 07/2024 } // This is where the tests should be unpacked. @@ -708,9 +700,9 @@ func doDebianSource(cmdline []string) { // Create Debian packages and upload them. for _, pkg := range debPackages { - for distro, goboot := range debDistroGoBoots { + for _, distro := range debDistros { // Prepare the debian package with the go-ethereum sources. - meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) + meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) pkgdir := stageDebianSource(*workdir, meta) // Add bootstrapper Go source code @@ -853,10 +845,7 @@ type debPackage struct { } type debMetadata struct { - Env build.Environment - GoBootPackage string - GoBootPath string - + Env build.Environment PackageName string // go-ethereum version being built. Note that this @@ -884,21 +873,19 @@ func (d debExecutable) Package() string { return d.BinaryName } -func newDebMetadata(distro, goboot, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata { +func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata { if author == "" { // No signing key, use default author. author = "Ethereum Builds " } return debMetadata{ - GoBootPackage: goboot, - GoBootPath: debGoBootPaths[goboot], - PackageName: name, - Env: env, - Author: author, - Distro: distro, - Version: version, - Time: t.Format(time.RFC1123Z), - Executables: exes, + PackageName: name, + Env: env, + Author: author, + Distro: distro, + Version: version, + Time: t.Format(time.RFC1123Z), + Executables: exes, } } diff --git a/build/deb/ethereum/deb.control b/build/deb/ethereum/deb.control index 3b759f2d04..333e954c17 100644 --- a/build/deb/ethereum/deb.control +++ b/build/deb/ethereum/deb.control @@ -2,7 +2,7 @@ Source: {{.Name}} Section: science Priority: extra Maintainer: {{.Author}} -Build-Depends: debhelper (>= 8.0.0), {{.GoBootPackage}} +Build-Depends: debhelper (>= 8.0.0), golang-go Standards-Version: 3.9.5 Homepage: https://ethereum.org Vcs-Git: https://github.com/ethereum/go-ethereum.git diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index cb48b23a5e..3287e15ff0 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -7,7 +7,7 @@ # Launchpad rejects Go's access to $HOME, use custom folders export GOCACHE=/tmp/go-build export GOPATH=/tmp/gopath -export GOROOT_BOOTSTRAP={{.GoBootPath}} +export GOROOT_BOOTSTRAP=/usr/lib/go override_dh_auto_clean: # Don't try to be smart Launchpad, we know our build rules better than you From 8d42e115b1cae4f09fd02b71c06ec9c85f22ad4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 26 Apr 2024 15:24:40 +0300 Subject: [PATCH 13/64] core/state: revert pending storage updates if they revert to original (#29661) --- core/state/state_object.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index d3d20c3dc4..14a1bd389f 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/holiman/uint256" @@ -255,9 +256,16 @@ func (s *stateObject) setState(key common.Hash, value *common.Hash) { func (s *stateObject) finalise(prefetch bool) { slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) for key, value := range s.dirtyStorage { - s.pendingStorage[key] = value + // If the slot is different from its original value, move it into the + // pending area to be committed at the end of the block (and prefetch + // the pathways). if value != s.originStorage[key] { + s.pendingStorage[key] = value slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + } else { + // Otherwise, the slot was reverted to its original value, remove it + // from the pending area to avoid thrashing the data strutures. + delete(s.pendingStorage, key) } } if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { @@ -371,6 +379,11 @@ func (s *stateObject) updateTrie() (Trie, error) { } s.db.StorageDeleted += 1 } + // If no slots were touched, issue a warning as we shouldn't have done all + // the above work in the first place + if len(usedStorage) == 0 { + log.Error("State object update was noop", "addr", s.address, "slots", len(s.pendingStorage)) + } if s.db.prefetcher != nil { s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage) } From 4253030ef67a2af2e59bbd1fd90a4c1e75939b9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 26 Apr 2024 18:35:52 +0300 Subject: [PATCH 14/64] core/state: move metrics out of state objects (#29665) --- core/blockchain.go | 4 +--- core/state/state_object.go | 9 --------- core/state/statedb.go | 9 +++++++-- 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index b45cd92e52..e4c8966824 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -68,7 +68,6 @@ var ( accountCommitTimer = metrics.NewRegisteredResettingTimer("chain/account/commits", nil) storageReadTimer = metrics.NewRegisteredResettingTimer("chain/storage/reads", nil) - storageHashTimer = metrics.NewRegisteredResettingTimer("chain/storage/hashes", nil) storageUpdateTimer = metrics.NewRegisteredResettingTimer("chain/storage/updates", nil) storageCommitTimer = metrics.NewRegisteredResettingTimer("chain/storage/commits", nil) @@ -1937,8 +1936,7 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation) storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation) accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation) - storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation) - triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing + triehash := statedb.AccountHashes // The time spent on tries hashing trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read diff --git a/core/state/state_object.go b/core/state/state_object.go index 14a1bd389f..1454f7a459 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -294,9 +294,6 @@ func (s *stateObject) updateTrie() (Trie, error) { if len(s.pendingStorage) == 0 { return s.trie, nil } - // Track the amount of time wasted on updating the storage trie - defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now()) - // The snapshot storage map for the object var ( storage map[common.Hash][]byte @@ -400,9 +397,6 @@ func (s *stateObject) updateRoot() { if err != nil || tr == nil { return } - // Track the amount of time wasted on hashing the storage trie - defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now()) - s.data.Root = tr.Hash() } @@ -415,9 +409,6 @@ func (s *stateObject) commit() (*trienode.NodeSet, error) { s.origin = s.data.Copy() return nil, nil } - // Track the amount of time wasted on committing the storage trie - defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) - // The trie is currently in an open state and could potentially contain // cached mutations. Call commit to acquire a set of nodes that have been // modified, the set can be nil if nothing to commit. diff --git a/core/state/statedb.go b/core/state/statedb.go index 4a934fe82c..6d9cc907e0 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -151,7 +151,6 @@ type StateDB struct { AccountUpdates time.Duration AccountCommits time.Duration StorageReads time.Duration - StorageHashes time.Duration StorageUpdates time.Duration StorageCommits time.Duration SnapshotAccountReads time.Duration @@ -856,6 +855,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // the account prefetcher. Instead, let's process all the storage updates // first, giving the account prefetches just a few more milliseconds of time // to pull useful data from disk. + start := time.Now() for addr, op := range s.mutations { if op.applied { continue @@ -865,6 +865,8 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { } s.stateObjects[addr].updateRoot() } + s.StorageUpdates += time.Since(start) + // Now we're about to start to write changes to the trie. The trie is so far // _untouched_. We can check with the prefetcher, if it can give us a trie // which has the same root, but also has some content loaded into it. @@ -1151,6 +1153,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er return common.Hash{}, err } // Handle all state updates afterwards + start := time.Now() for addr, op := range s.mutations { if op.isDelete() { continue @@ -1179,13 +1182,15 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er storageTrieNodesDeleted += deleted } } + s.StorageCommits += time.Since(start) + if codeWriter.ValueSize() > 0 { if err := codeWriter.Write(); err != nil { log.Crit("Failed to commit dirty codes", "error", err) } } // Write the account trie changes, measuring the amount of wasted time - start := time.Now() + start = time.Now() root, set, err := s.trie.Commit(true) if err != nil { From 4bdbaab471d2566c9f267b8372ed18c2af6646ee Mon Sep 17 00:00:00 2001 From: Roy Crihfield Date: Sun, 28 Apr 2024 19:03:03 +0800 Subject: [PATCH 15/64] params: clarify consensus engine config `String`s (#29643) Define these on a value receiever so that nil is shown differently. --- params/config.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/params/config.go b/params/config.go index 439e882189..486cb6d132 100644 --- a/params/config.go +++ b/params/config.go @@ -374,7 +374,7 @@ type ChainConfig struct { type EthashConfig struct{} // String implements the stringer interface, returning the consensus engine details. -func (c *EthashConfig) String() string { +func (c EthashConfig) String() string { return "ethash" } @@ -385,8 +385,8 @@ type CliqueConfig struct { } // String implements the stringer interface, returning the consensus engine details. -func (c *CliqueConfig) String() string { - return "clique" +func (c CliqueConfig) String() string { + return fmt.Sprintf("clique(period: %d, epoch: %d)", c.Period, c.Epoch) } // Description returns a human-readable description of ChainConfig. From 8c3fc56d7f980d8e200918c956f2bc424d59d305 Mon Sep 17 00:00:00 2001 From: Aaron Chen Date: Tue, 30 Apr 2024 01:44:41 +0800 Subject: [PATCH 16/64] p2p/simulations/adapters: use maps.Clone (#29626) --- p2p/simulations/adapters/inproc.go | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go index 349e496b2f..0efe9744a5 100644 --- a/p2p/simulations/adapters/inproc.go +++ b/p2p/simulations/adapters/inproc.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "maps" "math" "net" "sync" @@ -215,10 +216,7 @@ func (sn *SimNode) ServeRPC(conn *websocket.Conn) error { // simulation_snapshot RPC method func (sn *SimNode) Snapshots() (map[string][]byte, error) { sn.lock.RLock() - services := make(map[string]node.Lifecycle, len(sn.running)) - for name, service := range sn.running { - services[name] = service - } + services := maps.Clone(sn.running) sn.lock.RUnlock() if len(services) == 0 { return nil, errors.New("no running services") @@ -315,11 +313,7 @@ func (sn *SimNode) Services() []node.Lifecycle { func (sn *SimNode) ServiceMap() map[string]node.Lifecycle { sn.lock.RLock() defer sn.lock.RUnlock() - services := make(map[string]node.Lifecycle, len(sn.running)) - for name, service := range sn.running { - services[name] = service - } - return services + return maps.Clone(sn.running) } // Server returns the underlying p2p.Server From fecc8a0f4a5b4f42825ccc1628d069e6eceaba49 Mon Sep 17 00:00:00 2001 From: maskpp Date: Tue, 30 Apr 2024 17:19:59 +0800 Subject: [PATCH 17/64] cmd/evm/internal/t8ntool, core: prealloc map sizes where possible (#29620) set cap for map in a certain scenario --- cmd/evm/internal/t8ntool/transition.go | 2 +- core/blockchain.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 2b5eaa65aa..9ea94d195e 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -296,7 +296,7 @@ func (g Alloc) OnAccount(addr *common.Address, dumpAccount state.DumpAccount) { balance, _ := new(big.Int).SetString(dumpAccount.Balance, 0) var storage map[common.Hash]common.Hash if dumpAccount.Storage != nil { - storage = make(map[common.Hash]common.Hash) + storage = make(map[common.Hash]common.Hash, len(dumpAccount.Storage)) for k, v := range dumpAccount.Storage { storage[k] = common.HexToHash(v) } diff --git a/core/blockchain.go b/core/blockchain.go index e4c8966824..9de4baccca 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1309,7 +1309,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ // Delete block data from the main database. var ( batch = bc.db.NewBatch() - canonHashes = make(map[common.Hash]struct{}) + canonHashes = make(map[common.Hash]struct{}, len(blockChain)) ) for _, block := range blockChain { canonHashes[block.Hash()] = struct{}{} From 69f815f6f5791e0e48160bdad284773d0ffb1ba9 Mon Sep 17 00:00:00 2001 From: Nathan Date: Tue, 30 Apr 2024 17:22:02 +0800 Subject: [PATCH 18/64] params: print time value instead of pointer in ConfigCompatError (#29514) --- params/config.go | 12 ++++++++++-- params/config_test.go | 18 ++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/params/config.go b/params/config.go index 486cb6d132..62acaddaad 100644 --- a/params/config.go +++ b/params/config.go @@ -880,7 +880,7 @@ func newTimestampCompatError(what string, storedtime, newtime *uint64) *ConfigCo NewTime: newtime, RewindToTime: 0, } - if rew != nil { + if rew != nil && *rew != 0 { err.RewindToTime = *rew - 1 } return err @@ -890,7 +890,15 @@ func (err *ConfigCompatError) Error() string { if err.StoredBlock != nil { return fmt.Sprintf("mismatching %s in database (have block %d, want block %d, rewindto block %d)", err.What, err.StoredBlock, err.NewBlock, err.RewindToBlock) } - return fmt.Sprintf("mismatching %s in database (have timestamp %d, want timestamp %d, rewindto timestamp %d)", err.What, err.StoredTime, err.NewTime, err.RewindToTime) + + if err.StoredTime == nil && err.NewTime == nil { + return "" + } else if err.StoredTime == nil && err.NewTime != nil { + return fmt.Sprintf("mismatching %s in database (have timestamp nil, want timestamp %d, rewindto timestamp %d)", err.What, *err.NewTime, err.RewindToTime) + } else if err.StoredTime != nil && err.NewTime == nil { + return fmt.Sprintf("mismatching %s in database (have timestamp %d, want timestamp nil, rewindto timestamp %d)", err.What, *err.StoredTime, err.RewindToTime) + } + return fmt.Sprintf("mismatching %s in database (have timestamp %d, want timestamp %d, rewindto timestamp %d)", err.What, *err.StoredTime, *err.NewTime, err.RewindToTime) } // Rules wraps ChainConfig and is merely syntactic sugar or can be used for functions diff --git a/params/config_test.go b/params/config_test.go index bf8ce2fc5e..fa444a1d0b 100644 --- a/params/config_test.go +++ b/params/config_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common/math" + "github.com/stretchr/testify/require" ) func TestCheckCompatible(t *testing.T) { @@ -137,3 +138,20 @@ func TestConfigRules(t *testing.T) { t.Errorf("expected %v to be shanghai", stamp) } } + +func TestTimestampCompatError(t *testing.T) { + require.Equal(t, new(ConfigCompatError).Error(), "") + + errWhat := "Shanghai fork timestamp" + require.Equal(t, newTimestampCompatError(errWhat, nil, newUint64(1681338455)).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp nil, want timestamp 1681338455, rewindto timestamp 1681338454)") + + require.Equal(t, newTimestampCompatError(errWhat, newUint64(1681338455), nil).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp 1681338455, want timestamp nil, rewindto timestamp 1681338454)") + + require.Equal(t, newTimestampCompatError(errWhat, newUint64(1681338455), newUint64(600624000)).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp 1681338455, want timestamp 600624000, rewindto timestamp 600623999)") + + require.Equal(t, newTimestampCompatError(errWhat, newUint64(0), newUint64(1681338455)).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp 0, want timestamp 1681338455, rewindto timestamp 0)") +} From c04b8e6d74fe6d1c1f9a96e6269d51e47124d6f1 Mon Sep 17 00:00:00 2001 From: felipe Date: Tue, 30 Apr 2024 03:22:57 -0600 Subject: [PATCH 19/64] cmd/utils: require TTD and difficulty to be zero at genesis for dev mode (#29579) --- cmd/utils/flags.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e1c33678be..ecf6acc186 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1872,13 +1872,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { Fatalf("Could not read genesis from database: %v", err) } if !genesis.Config.TerminalTotalDifficultyPassed { - Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true in developer mode") + Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true") } if genesis.Config.TerminalTotalDifficulty == nil { - Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified.") + Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified") + } else if genesis.Config.TerminalTotalDifficulty.Cmp(big.NewInt(0)) != 0 { + Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be 0") } - if genesis.Difficulty.Cmp(genesis.Config.TerminalTotalDifficulty) != 1 { - Fatalf("Bad developer-mode genesis configuration: genesis block difficulty must be > terminalTotalDifficulty") + if genesis.Difficulty.Cmp(big.NewInt(0)) != 0 { + Fatalf("Bad developer-mode genesis configuration: difficulty must be 0") } } chaindb.Close() From f46c878441e2e567e8815f1e252a38ad0ffafbc2 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 30 Apr 2024 17:33:22 +0800 Subject: [PATCH 20/64] core/rawdb: implement in-memory freezer (#29135) --- core/rawdb/ancient_scheme.go | 20 +- core/rawdb/ancienttest/testsuite.go | 325 +++++++++++++++++++ core/rawdb/chain_freezer.go | 40 ++- core/rawdb/database.go | 27 +- core/rawdb/freezer.go | 8 +- core/rawdb/freezer_memory.go | 428 ++++++++++++++++++++++++++ core/rawdb/freezer_memory_test.go | 41 +++ core/rawdb/freezer_resettable.go | 38 +-- core/rawdb/freezer_resettable_test.go | 6 +- core/rawdb/freezer_test.go | 20 ++ ethdb/database.go | 30 +- node/node.go | 3 +- triedb/pathdb/database.go | 110 ++++--- triedb/pathdb/history.go | 40 +-- triedb/pathdb/history_inspect.go | 12 +- triedb/pathdb/history_test.go | 17 +- 16 files changed, 1014 insertions(+), 151 deletions(-) create mode 100644 core/rawdb/ancienttest/testsuite.go create mode 100644 core/rawdb/freezer_memory.go create mode 100644 core/rawdb/freezer_memory_test.go diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go index e88867af0e..44867ded04 100644 --- a/core/rawdb/ancient_scheme.go +++ b/core/rawdb/ancient_scheme.go @@ -16,7 +16,11 @@ package rawdb -import "path/filepath" +import ( + "path/filepath" + + "github.com/ethereum/go-ethereum/ethdb" +) // The list of table names of chain freezer. const ( @@ -75,7 +79,15 @@ var ( // freezers the collections of all builtin freezers. var freezers = []string{ChainFreezerName, StateFreezerName} -// NewStateFreezer initializes the freezer for state history. -func NewStateFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) { - return NewResettableFreezer(filepath.Join(ancientDir, StateFreezerName), "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerNoSnappy) +// NewStateFreezer initializes the ancient store for state history. +// +// - if the empty directory is given, initializes the pure in-memory +// state freezer (e.g. dev mode). +// - if non-empty directory is given, initializes the regular file-based +// state freezer. +func NewStateFreezer(ancientDir string, readOnly bool) (ethdb.ResettableAncientStore, error) { + if ancientDir == "" { + return NewMemoryFreezer(readOnly, stateFreezerNoSnappy), nil + } + return newResettableFreezer(filepath.Join(ancientDir, StateFreezerName), "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerNoSnappy) } diff --git a/core/rawdb/ancienttest/testsuite.go b/core/rawdb/ancienttest/testsuite.go new file mode 100644 index 0000000000..70de263c04 --- /dev/null +++ b/core/rawdb/ancienttest/testsuite.go @@ -0,0 +1,325 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ancienttest + +import ( + "bytes" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/internal/testrand" +) + +// TestAncientSuite runs a suite of tests against an ancient database +// implementation. +func TestAncientSuite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + // Test basic read methods + t.Run("BasicRead", func(t *testing.T) { basicRead(t, newFn) }) + + // Test batch read method + t.Run("BatchRead", func(t *testing.T) { batchRead(t, newFn) }) + + // Test basic write methods + t.Run("BasicWrite", func(t *testing.T) { basicWrite(t, newFn) }) + + // Test if data mutation is allowed after db write + t.Run("nonMutable", func(t *testing.T) { nonMutable(t, newFn) }) +} + +func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + var ( + db = newFn([]string{"a"}) + data = makeDataset(100, 32) + ) + defer db.Close() + + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < len(data); i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + db.TruncateTail(10) + db.TruncateHead(90) + + // Test basic tail and head retrievals + tail, err := db.Tail() + if err != nil || tail != 10 { + t.Fatal("Failed to retrieve tail") + } + ancient, err := db.Ancients() + if err != nil || ancient != 90 { + t.Fatal("Failed to retrieve ancient") + } + + // Test the deleted items shouldn't be reachable + var cases = []struct { + start int + limit int + }{ + {0, 10}, + {90, 100}, + } + for _, c := range cases { + for i := c.start; i < c.limit; i++ { + exist, err := db.HasAncient("a", uint64(i)) + if err != nil { + t.Fatalf("Failed to check presence, %v", err) + } + if exist { + t.Fatalf("Item %d is already truncated", uint64(i)) + } + _, err = db.Ancient("a", uint64(i)) + if err == nil { + t.Fatal("Error is expected for non-existent item") + } + } + } + + // Test the items in range should be reachable + for i := 10; i < 90; i++ { + exist, err := db.HasAncient("a", uint64(i)) + if err != nil { + t.Fatalf("Failed to check presence, %v", err) + } + if !exist { + t.Fatalf("Item %d is missing", uint64(i)) + } + blob, err := db.Ancient("a", uint64(i)) + if err != nil { + t.Fatalf("Failed to retrieve item, %v", err) + } + if !bytes.Equal(blob, data[i]) { + t.Fatalf("Unexpected item content, want: %v, got: %v", data[i], blob) + } + } + + // Test the items in unknown table shouldn't be reachable + exist, err := db.HasAncient("b", uint64(0)) + if err != nil { + t.Fatalf("Failed to check presence, %v", err) + } + if exist { + t.Fatal("Item in unknown table shouldn't be found") + } + _, err = db.Ancient("b", uint64(0)) + if err == nil { + t.Fatal("Error is expected for unknown table") + } +} + +func batchRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + var ( + db = newFn([]string{"a"}) + data = makeDataset(100, 32) + ) + defer db.Close() + + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + db.TruncateTail(10) + db.TruncateHead(90) + + // Test the items in range should be reachable + var cases = []struct { + start uint64 + count uint64 + maxSize uint64 + expStart int + expLimit int + }{ + // Items in range [10, 90) with no size limitation + { + 10, 80, 0, 10, 90, + }, + // Items in range [10, 90) with 32 size cap, single item is expected + { + 10, 80, 32, 10, 11, + }, + // Items in range [10, 90) with 31 size cap, single item is expected + { + 10, 80, 31, 10, 11, + }, + // Items in range [10, 90) with 32*80 size cap, all items are expected + { + 10, 80, 32 * 80, 10, 90, + }, + // Extra items above the last item are not returned + { + 10, 90, 0, 10, 90, + }, + } + for i, c := range cases { + batch, err := db.AncientRange("a", c.start, c.count, c.maxSize) + if err != nil { + t.Fatalf("Failed to retrieve item in range, %v", err) + } + if !reflect.DeepEqual(batch, data[c.expStart:c.expLimit]) { + t.Fatalf("Case %d, Batch content is not matched", i) + } + } + + // Test out-of-range / zero-size retrieval should be rejected + _, err := db.AncientRange("a", 0, 1, 0) + if err == nil { + t.Fatal("Out-of-range retrieval should be rejected") + } + _, err = db.AncientRange("a", 90, 1, 0) + if err == nil { + t.Fatal("Out-of-range retrieval should be rejected") + } + _, err = db.AncientRange("a", 10, 0, 0) + if err == nil { + t.Fatal("Zero-size retrieval should be rejected") + } + + // Test item in unknown table shouldn't be reachable + _, err = db.AncientRange("b", 10, 1, 0) + if err == nil { + t.Fatal("Item in unknown table shouldn't be found") + } +} + +func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + var ( + db = newFn([]string{"a", "b"}) + dataA = makeDataset(100, 32) + dataB = makeDataset(100, 32) + ) + defer db.Close() + + // The ancient write to tables should be aligned + _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + } + return nil + }) + if err == nil { + t.Fatal("Unaligned ancient write should be rejected") + } + + // Test normal ancient write + size, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + op.AppendRaw("b", uint64(i), dataB[i]) + } + return nil + }) + if err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } + wantSize := int64(6400) + if size != wantSize { + t.Fatalf("Ancient write size is not expected, want: %d, got: %d", wantSize, size) + } + + // Write should work after head truncating + db.TruncateHead(90) + _, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 90; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + op.AppendRaw("b", uint64(i), dataB[i]) + } + return nil + }) + if err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } + + // Write should work after truncating everything + db.TruncateTail(0) + _, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + op.AppendRaw("b", uint64(i), dataB[i]) + } + return nil + }) + if err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } +} + +func nonMutable(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + db := newFn([]string{"a"}) + defer db.Close() + + // We write 100 zero-bytes to the freezer and immediately mutate the slice + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + data := make([]byte, 100) + op.AppendRaw("a", uint64(0), data) + for i := range data { + data[i] = 0xff + } + return nil + }) + // Now read it. + data, err := db.Ancient("a", uint64(0)) + if err != nil { + t.Fatal(err) + } + for k, v := range data { + if v != 0 { + t.Fatalf("byte %d != 0: %x", k, v) + } + } +} + +// TestResettableAncientSuite runs a suite of tests against a resettable ancient +// database implementation. +func TestResettableAncientSuite(t *testing.T, newFn func(kinds []string) ethdb.ResettableAncientStore) { + t.Run("Reset", func(t *testing.T) { + var ( + db = newFn([]string{"a"}) + data = makeDataset(100, 32) + ) + defer db.Close() + + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + db.TruncateTail(10) + db.TruncateHead(90) + + // Ancient write should work after resetting + db.Reset() + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + }) +} + +func makeDataset(size, value int) [][]byte { + var vals [][]byte + for i := 0; i < size; i += 1 { + vals = append(vals, testrand.Bytes(value)) + } + return vals +} diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index d8214874bd..7a0b819b6f 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -39,26 +39,40 @@ const ( freezerBatchLimit = 30000 ) -// chainFreezer is a wrapper of freezer with additional chain freezing feature. -// The background thread will keep moving ancient chain segments from key-value -// database to flat files for saving space on live database. +// chainFreezer is a wrapper of chain ancient store with additional chain freezing +// feature. The background thread will keep moving ancient chain segments from +// key-value database to flat files for saving space on live database. type chainFreezer struct { - *Freezer + ethdb.AncientStore // Ancient store for storing cold chain segment + quit chan struct{} wg sync.WaitGroup trigger chan chan struct{} // Manual blocking freeze trigger, test determinism } -// newChainFreezer initializes the freezer for ancient chain data. +// newChainFreezer initializes the freezer for ancient chain segment. +// +// - if the empty directory is given, initializes the pure in-memory +// state freezer (e.g. dev mode). +// - if non-empty directory is given, initializes the regular file-based +// state freezer. func newChainFreezer(datadir string, namespace string, readonly bool) (*chainFreezer, error) { - freezer, err := NewChainFreezer(datadir, namespace, readonly) + var ( + err error + freezer ethdb.AncientStore + ) + if datadir == "" { + freezer = NewMemoryFreezer(readonly, chainFreezerNoSnappy) + } else { + freezer, err = NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerNoSnappy) + } if err != nil { return nil, err } return &chainFreezer{ - Freezer: freezer, - quit: make(chan struct{}), - trigger: make(chan chan struct{}), + AncientStore: freezer, + quit: make(chan struct{}), + trigger: make(chan chan struct{}), }, nil } @@ -70,7 +84,7 @@ func (f *chainFreezer) Close() error { close(f.quit) } f.wg.Wait() - return f.Freezer.Close() + return f.AncientStore.Close() } // readHeadNumber returns the number of chain head block. 0 is returned if the @@ -167,7 +181,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { log.Debug("Current full block not old enough to freeze", "err", err) continue } - frozen := f.frozen.Load() + frozen, _ := f.Ancients() // no error will occur, safe to ignore // Short circuit if the blocks below threshold are already frozen. if frozen != 0 && frozen-1 >= threshold { @@ -190,7 +204,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { backoff = true continue } - // Batch of blocks have been frozen, flush them before wiping from leveldb + // Batch of blocks have been frozen, flush them before wiping from key-value store if err := f.Sync(); err != nil { log.Crit("Failed to flush frozen tables", "err", err) } @@ -210,7 +224,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { // Wipe out side chains also and track dangling side chains var dangling []common.Hash - frozen = f.frozen.Load() // Needs reload after during freezeRange + frozen, _ = f.Ancients() // Needs reload after during freezeRange for number := first; number < frozen; number++ { // Always keep the genesis block in active database if number != 0 { diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 7b2c0415cb..0a9f6f73c7 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -34,11 +34,13 @@ import ( "github.com/olekukonko/tablewriter" ) -// freezerdb is a database wrapper that enables freezer data retrievals. +// freezerdb is a database wrapper that enables ancient chain segment freezing. type freezerdb struct { - ancientRoot string ethdb.KeyValueStore - ethdb.AncientStore + *chainFreezer + + readOnly bool + ancientRoot string } // AncientDatadir returns the path of root ancient directory. @@ -50,7 +52,7 @@ func (frdb *freezerdb) AncientDatadir() (string, error) { // the slow ancient tables. func (frdb *freezerdb) Close() error { var errs []error - if err := frdb.AncientStore.Close(); err != nil { + if err := frdb.chainFreezer.Close(); err != nil { errs = append(errs, err) } if err := frdb.KeyValueStore.Close(); err != nil { @@ -66,12 +68,12 @@ func (frdb *freezerdb) Close() error { // a freeze cycle completes, without having to sleep for a minute to trigger the // automatic background run. func (frdb *freezerdb) Freeze() error { - if frdb.AncientStore.(*chainFreezer).readonly { + if frdb.readOnly { return errReadOnly } // Trigger a freeze cycle and block until it's done trigger := make(chan struct{}, 1) - frdb.AncientStore.(*chainFreezer).trigger <- trigger + frdb.chainFreezer.trigger <- trigger <-trigger return nil } @@ -192,8 +194,13 @@ func resolveChainFreezerDir(ancient string) string { // storage. The passed ancient indicates the path of root ancient directory // where the chain freezer can be opened. func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) { - // Create the idle freezer instance - frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly) + // Create the idle freezer instance. If the given ancient directory is empty, + // in-memory chain freezer is used (e.g. dev mode); otherwise the regular + // file-based freezer is created. + if ancient != "" { + ancient = resolveChainFreezerDir(ancient) + } + frdb, err := newChainFreezer(ancient, namespace, readonly) if err != nil { printChainMetadata(db) return nil, err @@ -277,7 +284,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st } } // Freezer is consistent with the key-value database, permit combining the two - if !frdb.readonly { + if !readonly { frdb.wg.Add(1) go func() { frdb.freeze(db) @@ -287,7 +294,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st return &freezerdb{ ancientRoot: ancient, KeyValueStore: db, - AncientStore: frdb, + chainFreezer: frdb, }, nil } diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index b7824ddc0d..0f28782db9 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -62,7 +62,7 @@ const freezerTableSize = 2 * 1000 * 1000 * 1000 // reserving it for go-ethereum. This would also reduce the memory requirements // of Geth, and thus also GC overhead. type Freezer struct { - frozen atomic.Uint64 // Number of blocks already frozen + frozen atomic.Uint64 // Number of items already frozen tail atomic.Uint64 // Number of the first stored item in the freezer // This lock synchronizes writers and the truncate operation, as well as @@ -76,12 +76,6 @@ type Freezer struct { closeOnce sync.Once } -// NewChainFreezer is a small utility method around NewFreezer that sets the -// default parameters for the chain storage. -func NewChainFreezer(datadir string, namespace string, readonly bool) (*Freezer, error) { - return NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerNoSnappy) -} - // NewFreezer creates a freezer instance for maintaining immutable ordered // data according to the given parameters. // diff --git a/core/rawdb/freezer_memory.go b/core/rawdb/freezer_memory.go new file mode 100644 index 0000000000..954b58e874 --- /dev/null +++ b/core/rawdb/freezer_memory.go @@ -0,0 +1,428 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +// memoryTable is used to store a list of sequential items in memory. +type memoryTable struct { + name string // Table name + items uint64 // Number of stored items in the table, including the deleted ones + offset uint64 // Number of deleted items from the table + data [][]byte // List of rlp-encoded items, sort in order + size uint64 // Total memory size occupied by the table + lock sync.RWMutex +} + +// newMemoryTable initializes the memory table. +func newMemoryTable(name string) *memoryTable { + return &memoryTable{name: name} +} + +// has returns an indicator whether the specified data exists. +func (t *memoryTable) has(number uint64) bool { + t.lock.RLock() + defer t.lock.RUnlock() + + return number >= t.offset && number < t.items +} + +// retrieve retrieves multiple items in sequence, starting from the index 'start'. +// It will return: +// - at most 'count' items, +// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), +// but will otherwise return as many items as fit into maxByteSize. +// - if maxBytes is not specified, 'count' items will be returned if they are present +func (t *memoryTable) retrieve(start uint64, count, maxBytes uint64) ([][]byte, error) { + t.lock.RLock() + defer t.lock.RUnlock() + + var ( + size uint64 + batch [][]byte + ) + // Ensure the start is written, not deleted from the tail, and that the + // caller actually wants something. + if t.items <= start || t.offset > start || count == 0 { + return nil, errOutOfBounds + } + // Cap the item count if the retrieval is out of bound. + if start+count > t.items { + count = t.items - start + } + for n := start; n < start+count; n++ { + index := n - t.offset + if len(batch) != 0 && maxBytes != 0 && size+uint64(len(t.data[index])) > maxBytes { + return batch, nil + } + batch = append(batch, t.data[index]) + size += uint64(len(t.data[index])) + } + return batch, nil +} + +// truncateHead discards any recent data above the provided threshold number. +func (t *memoryTable) truncateHead(items uint64) error { + t.lock.Lock() + defer t.lock.Unlock() + + // Short circuit if nothing to delete. + if t.items <= items { + return nil + } + if items < t.offset { + return errors.New("truncation below tail") + } + t.data = t.data[:items-t.offset] + t.items = items + return nil +} + +// truncateTail discards any recent data before the provided threshold number. +func (t *memoryTable) truncateTail(items uint64) error { + t.lock.Lock() + defer t.lock.Unlock() + + // Short circuit if nothing to delete. + if t.offset >= items { + return nil + } + if t.items < items { + return errors.New("truncation above head") + } + t.data = t.data[items-t.offset:] + t.offset = items + return nil +} + +// commit merges the given item batch into table. It's presumed that the +// batch is ordered and continuous with table. +func (t *memoryTable) commit(batch [][]byte) error { + t.lock.Lock() + defer t.lock.Unlock() + + for _, item := range batch { + t.size += uint64(len(item)) + } + t.data = append(t.data, batch...) + t.items += uint64(len(batch)) + return nil +} + +// memoryBatch is the singleton batch used for ancient write. +type memoryBatch struct { + data map[string][][]byte + next map[string]uint64 + size map[string]int64 +} + +func newMemoryBatch() *memoryBatch { + return &memoryBatch{ + data: make(map[string][][]byte), + next: make(map[string]uint64), + size: make(map[string]int64), + } +} + +func (b *memoryBatch) reset(freezer *MemoryFreezer) { + b.data = make(map[string][][]byte) + b.next = make(map[string]uint64) + b.size = make(map[string]int64) + + for name, table := range freezer.tables { + b.next[name] = table.items + } +} + +// Append adds an RLP-encoded item. +func (b *memoryBatch) Append(kind string, number uint64, item interface{}) error { + if b.next[kind] != number { + return errOutOrderInsertion + } + blob, err := rlp.EncodeToBytes(item) + if err != nil { + return err + } + b.data[kind] = append(b.data[kind], blob) + b.next[kind]++ + b.size[kind] += int64(len(blob)) + return nil +} + +// AppendRaw adds an item without RLP-encoding it. +func (b *memoryBatch) AppendRaw(kind string, number uint64, blob []byte) error { + if b.next[kind] != number { + return errOutOrderInsertion + } + b.data[kind] = append(b.data[kind], common.CopyBytes(blob)) + b.next[kind]++ + b.size[kind] += int64(len(blob)) + return nil +} + +// commit is called at the end of a write operation and writes all remaining +// data to tables. +func (b *memoryBatch) commit(freezer *MemoryFreezer) (items uint64, writeSize int64, err error) { + // Check that count agrees on all batches. + items = math.MaxUint64 + for name, next := range b.next { + if items < math.MaxUint64 && next != items { + return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, next, items) + } + items = next + } + // Commit all table batches. + for name, batch := range b.data { + table := freezer.tables[name] + if err := table.commit(batch); err != nil { + return 0, 0, err + } + writeSize += b.size[name] + } + return items, writeSize, nil +} + +// MemoryFreezer is an ephemeral ancient store. It implements the ethdb.AncientStore +// interface and can be used along with ephemeral key-value store. +type MemoryFreezer struct { + items uint64 // Number of items stored + tail uint64 // Number of the first stored item in the freezer + readonly bool // Flag if the freezer is only for reading + lock sync.RWMutex // Lock to protect fields + tables map[string]*memoryTable // Tables for storing everything + writeBatch *memoryBatch // Pre-allocated write batch +} + +// NewMemoryFreezer initializes an in-memory freezer instance. +func NewMemoryFreezer(readonly bool, tableName map[string]bool) *MemoryFreezer { + tables := make(map[string]*memoryTable) + for name := range tableName { + tables[name] = newMemoryTable(name) + } + return &MemoryFreezer{ + writeBatch: newMemoryBatch(), + readonly: readonly, + tables: tables, + } +} + +// HasAncient returns an indicator whether the specified data exists. +func (f *MemoryFreezer) HasAncient(kind string, number uint64) (bool, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + if table := f.tables[kind]; table != nil { + return table.has(number), nil + } + return false, nil +} + +// Ancient retrieves an ancient binary blob from the in-memory freezer. +func (f *MemoryFreezer) Ancient(kind string, number uint64) ([]byte, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + t := f.tables[kind] + if t == nil { + return nil, errUnknownTable + } + data, err := t.retrieve(number, 1, 0) + if err != nil { + return nil, err + } + return data[0], nil +} + +// AncientRange retrieves multiple items in sequence, starting from the index 'start'. +// It will return +// - at most 'count' items, +// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), +// but will otherwise return as many items as fit into maxByteSize. +// - if maxBytes is not specified, 'count' items will be returned if they are present +func (f *MemoryFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + t := f.tables[kind] + if t == nil { + return nil, errUnknownTable + } + return t.retrieve(start, count, maxBytes) +} + +// Ancients returns the ancient item numbers in the freezer. +func (f *MemoryFreezer) Ancients() (uint64, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + return f.items, nil +} + +// Tail returns the number of first stored item in the freezer. +// This number can also be interpreted as the total deleted item numbers. +func (f *MemoryFreezer) Tail() (uint64, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + return f.tail, nil +} + +// AncientSize returns the ancient size of the specified category. +func (f *MemoryFreezer) AncientSize(kind string) (uint64, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + if table := f.tables[kind]; table != nil { + return table.size, nil + } + return 0, errUnknownTable +} + +// ReadAncients runs the given read operation while ensuring that no writes take place +// on the underlying freezer. +func (f *MemoryFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { + f.lock.RLock() + defer f.lock.RUnlock() + + return fn(f) +} + +// ModifyAncients runs the given write operation. +func (f *MemoryFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.readonly { + return 0, errReadOnly + } + // Roll back all tables to the starting position in case of error. + defer func(old uint64) { + if err == nil { + return + } + // The write operation has failed. Go back to the previous item position. + for name, table := range f.tables { + err := table.truncateHead(old) + if err != nil { + log.Error("Freezer table roll-back failed", "table", name, "index", old, "err", err) + } + } + }(f.items) + + // Modify the ancients in batch. + f.writeBatch.reset(f) + if err := fn(f.writeBatch); err != nil { + return 0, err + } + item, writeSize, err := f.writeBatch.commit(f) + if err != nil { + return 0, err + } + f.items = item + return writeSize, nil +} + +// TruncateHead discards any recent data above the provided threshold number. +// It returns the previous head number. +func (f *MemoryFreezer) TruncateHead(items uint64) (uint64, error) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.readonly { + return 0, errReadOnly + } + old := f.items + if old <= items { + return old, nil + } + for _, table := range f.tables { + if err := table.truncateHead(items); err != nil { + return 0, err + } + } + f.items = items + return old, nil +} + +// TruncateTail discards any recent data below the provided threshold number. +func (f *MemoryFreezer) TruncateTail(tail uint64) (uint64, error) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.readonly { + return 0, errReadOnly + } + old := f.tail + if old >= tail { + return old, nil + } + for _, table := range f.tables { + if err := table.truncateTail(tail); err != nil { + return 0, err + } + } + f.tail = tail + return old, nil +} + +// Sync flushes all data tables to disk. +func (f *MemoryFreezer) Sync() error { + return nil +} + +// MigrateTable processes and migrates entries of a given table to a new format. +// The second argument is a function that takes a raw entry and returns it +// in the newest format. +func (f *MemoryFreezer) MigrateTable(string, func([]byte) ([]byte, error)) error { + return errors.New("not implemented") +} + +// Close releases all the sources held by the memory freezer. It will panic if +// any following invocation is made to a closed freezer. +func (f *MemoryFreezer) Close() error { + f.lock.Lock() + defer f.lock.Unlock() + + f.tables = nil + f.writeBatch = nil + return nil +} + +// Reset drops all the data cached in the memory freezer and reset itself +// back to default state. +func (f *MemoryFreezer) Reset() error { + f.lock.Lock() + defer f.lock.Unlock() + + tables := make(map[string]*memoryTable) + for name := range f.tables { + tables[name] = newMemoryTable(name) + } + f.tables = tables + f.items, f.tail = 0, 0 + return nil +} diff --git a/core/rawdb/freezer_memory_test.go b/core/rawdb/freezer_memory_test.go new file mode 100644 index 0000000000..e71de0f629 --- /dev/null +++ b/core/rawdb/freezer_memory_test.go @@ -0,0 +1,41 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "testing" + + "github.com/ethereum/go-ethereum/core/rawdb/ancienttest" + "github.com/ethereum/go-ethereum/ethdb" +) + +func TestMemoryFreezer(t *testing.T) { + ancienttest.TestAncientSuite(t, func(kinds []string) ethdb.AncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + return NewMemoryFreezer(false, tables) + }) + ancienttest.TestResettableAncientSuite(t, func(kinds []string) ethdb.ResettableAncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + return NewMemoryFreezer(false, tables) + }) +} diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go index 7a85489738..7fa59b8d21 100644 --- a/core/rawdb/freezer_resettable.go +++ b/core/rawdb/freezer_resettable.go @@ -30,16 +30,16 @@ const tmpSuffix = ".tmp" // freezerOpenFunc is the function used to open/create a freezer. type freezerOpenFunc = func() (*Freezer, error) -// ResettableFreezer is a wrapper of the freezer which makes the +// resettableFreezer is a wrapper of the freezer which makes the // freezer resettable. -type ResettableFreezer struct { +type resettableFreezer struct { freezer *Freezer opener freezerOpenFunc datadir string lock sync.RWMutex } -// NewResettableFreezer creates a resettable freezer, note freezer is +// newResettableFreezer creates a resettable freezer, note freezer is // only resettable if the passed file directory is exclusively occupied // by the freezer. And also the user-configurable ancient root directory // is **not** supported for reset since it might be a mount and rename @@ -48,7 +48,7 @@ type ResettableFreezer struct { // // The reset function will delete directory atomically and re-create the // freezer from scratch. -func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*ResettableFreezer, error) { +func newResettableFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*resettableFreezer, error) { if err := cleanup(datadir); err != nil { return nil, err } @@ -59,7 +59,7 @@ func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTa if err != nil { return nil, err } - return &ResettableFreezer{ + return &resettableFreezer{ freezer: freezer, opener: opener, datadir: datadir, @@ -70,7 +70,7 @@ func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTa // recreate the freezer from scratch. The atomicity of directory deletion // is guaranteed by the rename operation, the leftover directory will be // cleaned up in next startup in case crash happens after rename. -func (f *ResettableFreezer) Reset() error { +func (f *resettableFreezer) Reset() error { f.lock.Lock() defer f.lock.Unlock() @@ -93,7 +93,7 @@ func (f *ResettableFreezer) Reset() error { } // Close terminates the chain freezer, unmapping all the data files. -func (f *ResettableFreezer) Close() error { +func (f *resettableFreezer) Close() error { f.lock.RLock() defer f.lock.RUnlock() @@ -102,7 +102,7 @@ func (f *ResettableFreezer) Close() error { // HasAncient returns an indicator whether the specified ancient data exists // in the freezer -func (f *ResettableFreezer) HasAncient(kind string, number uint64) (bool, error) { +func (f *resettableFreezer) HasAncient(kind string, number uint64) (bool, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -110,7 +110,7 @@ func (f *ResettableFreezer) HasAncient(kind string, number uint64) (bool, error) } // Ancient retrieves an ancient binary blob from the append-only immutable files. -func (f *ResettableFreezer) Ancient(kind string, number uint64) ([]byte, error) { +func (f *resettableFreezer) Ancient(kind string, number uint64) ([]byte, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -123,7 +123,7 @@ func (f *ResettableFreezer) Ancient(kind string, number uint64) ([]byte, error) // - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), // but will otherwise return as many items as fit into maxByteSize. // - if maxBytes is not specified, 'count' items will be returned if they are present. -func (f *ResettableFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { +func (f *resettableFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -131,7 +131,7 @@ func (f *ResettableFreezer) AncientRange(kind string, start, count, maxBytes uin } // Ancients returns the length of the frozen items. -func (f *ResettableFreezer) Ancients() (uint64, error) { +func (f *resettableFreezer) Ancients() (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -139,7 +139,7 @@ func (f *ResettableFreezer) Ancients() (uint64, error) { } // Tail returns the number of first stored item in the freezer. -func (f *ResettableFreezer) Tail() (uint64, error) { +func (f *resettableFreezer) Tail() (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -147,7 +147,7 @@ func (f *ResettableFreezer) Tail() (uint64, error) { } // AncientSize returns the ancient size of the specified category. -func (f *ResettableFreezer) AncientSize(kind string) (uint64, error) { +func (f *resettableFreezer) AncientSize(kind string) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -156,7 +156,7 @@ func (f *ResettableFreezer) AncientSize(kind string) (uint64, error) { // ReadAncients runs the given read operation while ensuring that no writes take place // on the underlying freezer. -func (f *ResettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { +func (f *resettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { f.lock.RLock() defer f.lock.RUnlock() @@ -164,7 +164,7 @@ func (f *ResettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) ( } // ModifyAncients runs the given write operation. -func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { +func (f *resettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { f.lock.RLock() defer f.lock.RUnlock() @@ -173,7 +173,7 @@ func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) // TruncateHead discards any recent data above the provided threshold number. // It returns the previous head number. -func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) { +func (f *resettableFreezer) TruncateHead(items uint64) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -182,7 +182,7 @@ func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) { // TruncateTail discards any recent data below the provided threshold number. // It returns the previous value -func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) { +func (f *resettableFreezer) TruncateTail(tail uint64) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -190,7 +190,7 @@ func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) { } // Sync flushes all data tables to disk. -func (f *ResettableFreezer) Sync() error { +func (f *resettableFreezer) Sync() error { f.lock.RLock() defer f.lock.RUnlock() @@ -199,7 +199,7 @@ func (f *ResettableFreezer) Sync() error { // MigrateTable processes the entries in a given table in sequence // converting them to a new format if they're of an old format. -func (f *ResettableFreezer) MigrateTable(kind string, convert convertLegacyFn) error { +func (f *resettableFreezer) MigrateTable(kind string, convert convertLegacyFn) error { f.lock.RLock() defer f.lock.RUnlock() diff --git a/core/rawdb/freezer_resettable_test.go b/core/rawdb/freezer_resettable_test.go index d741bc14e5..61dc23d798 100644 --- a/core/rawdb/freezer_resettable_test.go +++ b/core/rawdb/freezer_resettable_test.go @@ -33,7 +33,7 @@ func TestResetFreezer(t *testing.T) { {1, bytes.Repeat([]byte{1}, 2048)}, {2, bytes.Repeat([]byte{2}, 2048)}, } - f, _ := NewResettableFreezer(t.TempDir(), "", false, 2048, freezerTestTableDef) + f, _ := newResettableFreezer(t.TempDir(), "", false, 2048, freezerTestTableDef) defer f.Close() f.ModifyAncients(func(op ethdb.AncientWriteOp) error { @@ -87,7 +87,7 @@ func TestFreezerCleanup(t *testing.T) { {2, bytes.Repeat([]byte{2}, 2048)}, } datadir := t.TempDir() - f, _ := NewResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) + f, _ := newResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) f.ModifyAncients(func(op ethdb.AncientWriteOp) error { for _, item := range items { op.AppendRaw("test", item.id, item.blob) @@ -98,7 +98,7 @@ func TestFreezerCleanup(t *testing.T) { os.Rename(datadir, tmpName(datadir)) // Open the freezer again, trigger cleanup operation - f, _ = NewResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) + f, _ = newResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) f.Close() if _, err := os.Lstat(tmpName(datadir)); !os.IsNotExist(err) { diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index 93bc2c2254..72d1417200 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -27,6 +27,7 @@ import ( "sync" "testing" + "github.com/ethereum/go-ethereum/core/rawdb/ancienttest" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/require" @@ -480,3 +481,22 @@ func TestFreezerCloseSync(t *testing.T) { t.Fatalf("want %v, have %v", have, want) } } + +func TestFreezerSuite(t *testing.T) { + ancienttest.TestAncientSuite(t, func(kinds []string) ethdb.AncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + f, _ := newFreezerForTesting(t, tables) + return f + }) + ancienttest.TestResettableAncientSuite(t, func(kinds []string) ethdb.ResettableAncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + f, _ := newResettableFreezer(t.TempDir(), "", false, 2048, tables) + return f + }) +} diff --git a/ethdb/database.go b/ethdb/database.go index 4d4817daf2..3ec1f70e3b 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -88,8 +88,8 @@ type AncientReaderOp interface { // Ancients returns the ancient item numbers in the ancient store. Ancients() (uint64, error) - // Tail returns the number of first stored item in the freezer. - // This number can also be interpreted as the total deleted item numbers. + // Tail returns the number of first stored item in the ancient store. + // This number can also be interpreted as the total deleted items. Tail() (uint64, error) // AncientSize returns the ancient size of the specified category. @@ -101,7 +101,7 @@ type AncientReader interface { AncientReaderOp // ReadAncients runs the given read operation while ensuring that no writes take place - // on the underlying freezer. + // on the underlying ancient store. ReadAncients(fn func(AncientReaderOp) error) (err error) } @@ -141,11 +141,15 @@ type AncientWriteOp interface { AppendRaw(kind string, number uint64, item []byte) error } -// AncientStater wraps the Stat method of a backing data store. +// AncientStater wraps the Stat method of a backing ancient store. type AncientStater interface { - // AncientDatadir returns the path of root ancient directory. Empty string - // will be returned if ancient store is not enabled at all. The returned - // path can be used to construct the path of other freezers. + // AncientDatadir returns the path of the ancient store directory. + // + // If the ancient store is not activated, an error is returned. + // If an ephemeral ancient store is used, an empty path is returned. + // + // The path returned by AncientDatadir can be used as the root path + // of the ancient store to construct paths for other sub ancient stores. AncientDatadir() (string, error) } @@ -171,15 +175,23 @@ type Stater interface { } // AncientStore contains all the methods required to allow handling different -// ancient data stores backing immutable chain data store. +// ancient data stores backing immutable data store. type AncientStore interface { AncientReader AncientWriter io.Closer } +// ResettableAncientStore extends the AncientStore interface by adding a Reset method. +type ResettableAncientStore interface { + AncientStore + + // Reset is designed to reset the entire ancient store to its default state. + Reset() error +} + // Database contains all the methods required by the high level database to not -// only access the key-value data store but also the chain freezer. +// only access the key-value data store but also the ancient chain store. type Database interface { Reader Writer diff --git a/node/node.go b/node/node.go index 6cbae68591..633f88f058 100644 --- a/node/node.go +++ b/node/node.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" @@ -752,7 +753,7 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient var db ethdb.Database var err error if n.config.DataDir == "" { - db = rawdb.NewMemoryDatabase() + db, err = rawdb.NewDatabaseWithFreezer(memorydb.New(), "", namespace, readonly) } else { db, err = rawdb.Open(rawdb.OpenOptions{ Type: n.config.DBEngine, diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 18f2eeef00..50beebced1 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -131,15 +131,15 @@ type Database struct { // readOnly is the flag whether the mutation is allowed to be applied. // It will be set automatically when the database is journaled during // the shutdown to reject all following unexpected mutations. - readOnly bool // Flag if database is opened in read only mode - waitSync bool // Flag if database is deactivated due to initial state sync - isVerkle bool // Flag if database is used for verkle tree - bufferSize int // Memory allowance (in bytes) for caching dirty nodes - config *Config // Configuration for database - diskdb ethdb.Database // Persistent storage for matured trie nodes - tree *layerTree // The group for all known layers - freezer *rawdb.ResettableFreezer // Freezer for storing trie histories, nil possible in tests - lock sync.RWMutex // Lock to prevent mutations from happening at the same time + readOnly bool // Flag if database is opened in read only mode + waitSync bool // Flag if database is deactivated due to initial state sync + isVerkle bool // Flag if database is used for verkle tree + bufferSize int // Memory allowance (in bytes) for caching dirty nodes + config *Config // Configuration for database + diskdb ethdb.Database // Persistent storage for matured trie nodes + tree *layerTree // The group for all known layers + freezer ethdb.ResettableAncientStore // Freezer for storing trie histories, nil possible in tests + lock sync.RWMutex // Lock to prevent mutations from happening at the same time } // New attempts to load an already existing layer from a persistent key-value @@ -162,45 +162,10 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database { // and in-memory layer journal. db.tree = newLayerTree(db.loadLayers()) - // Open the freezer for state history if the passed database contains an - // ancient store. Otherwise, all the relevant functionalities are disabled. - // - // Because the freezer can only be opened once at the same time, this - // mechanism also ensures that at most one **non-readOnly** database - // is opened at the same time to prevent accidental mutation. - if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly { - freezer, err := rawdb.NewStateFreezer(ancient, false) - if err != nil { - log.Crit("Failed to open state history freezer", "err", err) - } - db.freezer = freezer - - diskLayerID := db.tree.bottom().stateID() - if diskLayerID == 0 { - // Reset the entire state histories in case the trie database is - // not initialized yet, as these state histories are not expected. - frozen, err := db.freezer.Ancients() - if err != nil { - log.Crit("Failed to retrieve head of state history", "err", err) - } - if frozen != 0 { - err := db.freezer.Reset() - if err != nil { - log.Crit("Failed to reset state histories", "err", err) - } - log.Info("Truncated extraneous state history") - } - } else { - // Truncate the extra state histories above in freezer in case - // it's not aligned with the disk layer. - pruned, err := truncateFromHead(db.diskdb, freezer, diskLayerID) - if err != nil { - log.Crit("Failed to truncate extra state histories", "err", err) - } - if pruned != 0 { - log.Warn("Truncated extra state histories", "number", pruned) - } - } + // Repair the state history, which might not be aligned with the state + // in the key-value store due to an unclean shutdown. + if err := db.repairHistory(); err != nil { + log.Crit("Failed to repair pathdb", "err", err) } // Disable database in case node is still in the initial state sync stage. if rawdb.ReadSnapSyncStatusFlag(diskdb) == rawdb.StateSyncRunning && !db.readOnly { @@ -211,6 +176,55 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database { return db } +// repairHistory truncates leftover state history objects, which may occur due +// to an unclean shutdown or other unexpected reasons. +func (db *Database) repairHistory() error { + // Open the freezer for state history. This mechanism ensures that + // only one database instance can be opened at a time to prevent + // accidental mutation. + ancient, err := db.diskdb.AncientDatadir() + if err != nil { + // TODO error out if ancient store is disabled. A tons of unit tests + // disable the ancient store thus the error here will immediately fail + // all of them. Fix the tests first. + return nil + } + freezer, err := rawdb.NewStateFreezer(ancient, false) + if err != nil { + log.Crit("Failed to open state history freezer", "err", err) + } + db.freezer = freezer + + // Reset the entire state histories if the trie database is not initialized + // yet. This action is necessary because these state histories are not + // expected to exist without an initialized trie database. + id := db.tree.bottom().stateID() + if id == 0 { + frozen, err := db.freezer.Ancients() + if err != nil { + log.Crit("Failed to retrieve head of state history", "err", err) + } + if frozen != 0 { + err := db.freezer.Reset() + if err != nil { + log.Crit("Failed to reset state histories", "err", err) + } + log.Info("Truncated extraneous state history") + } + return nil + } + // Truncate the extra state histories above in freezer in case it's not + // aligned with the disk layer. It might happen after a unclean shutdown. + pruned, err := truncateFromHead(db.diskdb, db.freezer, id) + if err != nil { + log.Crit("Failed to truncate extra state histories", "err", err) + } + if pruned != 0 { + log.Warn("Truncated extra state histories", "number", pruned) + } + return nil +} + // Update adds a new layer into the tree, if that can be linked to an existing // old parent. It is disallowed to insert a disk layer (the origin of all). Apart // from that this function will flatten the extra diff layers at bottom into disk diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index 7099b2b381..3663cbbdb9 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -472,8 +472,8 @@ func (h *history) decode(accountData, storageData, accountIndexes, storageIndexe } // readHistory reads and decodes the state history object by the given id. -func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) { - blob := rawdb.ReadStateHistoryMeta(freezer, id) +func readHistory(reader ethdb.AncientReader, id uint64) (*history, error) { + blob := rawdb.ReadStateHistoryMeta(reader, id) if len(blob) == 0 { return nil, fmt.Errorf("state history not found %d", id) } @@ -483,10 +483,10 @@ func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) } var ( dec = history{meta: &m} - accountData = rawdb.ReadStateAccountHistory(freezer, id) - storageData = rawdb.ReadStateStorageHistory(freezer, id) - accountIndexes = rawdb.ReadStateAccountIndex(freezer, id) - storageIndexes = rawdb.ReadStateStorageIndex(freezer, id) + accountData = rawdb.ReadStateAccountHistory(reader, id) + storageData = rawdb.ReadStateStorageHistory(reader, id) + accountIndexes = rawdb.ReadStateAccountIndex(reader, id) + storageIndexes = rawdb.ReadStateStorageIndex(reader, id) ) if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil { return nil, err @@ -495,7 +495,7 @@ func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) } // writeHistory persists the state history with the provided state set. -func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { +func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error { // Short circuit if state set is not available. if dl.states == nil { return errors.New("state change set is not available") @@ -509,7 +509,7 @@ func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { indexSize := common.StorageSize(len(accountIndex) + len(storageIndex)) // Write history data into five freezer table respectively. - rawdb.WriteStateHistory(freezer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateHistory(writer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData) historyDataBytesMeter.Mark(int64(dataSize)) historyIndexBytesMeter.Mark(int64(indexSize)) @@ -521,13 +521,13 @@ func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { // checkHistories retrieves a batch of meta objects with the specified range // and performs the callback on each item. -func checkHistories(freezer *rawdb.ResettableFreezer, start, count uint64, check func(*meta) error) error { +func checkHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error { for count > 0 { number := count if number > 10000 { number = 10000 // split the big read into small chunks } - blobs, err := rawdb.ReadStateHistoryMetaList(freezer, start, number) + blobs, err := rawdb.ReadStateHistoryMetaList(reader, start, number) if err != nil { return err } @@ -548,12 +548,12 @@ func checkHistories(freezer *rawdb.ResettableFreezer, start, count uint64, check // truncateFromHead removes the extra state histories from the head with the given // parameters. It returns the number of items removed from the head. -func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead uint64) (int, error) { - ohead, err := freezer.Ancients() +func truncateFromHead(db ethdb.Batcher, store ethdb.AncientStore, nhead uint64) (int, error) { + ohead, err := store.Ancients() if err != nil { return 0, err } - otail, err := freezer.Tail() + otail, err := store.Tail() if err != nil { return 0, err } @@ -566,7 +566,7 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead return 0, nil } // Load the meta objects in range [nhead+1, ohead] - blobs, err := rawdb.ReadStateHistoryMetaList(freezer, nhead+1, ohead-nhead) + blobs, err := rawdb.ReadStateHistoryMetaList(store, nhead+1, ohead-nhead) if err != nil { return 0, err } @@ -581,7 +581,7 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead if err := batch.Write(); err != nil { return 0, err } - ohead, err = freezer.TruncateHead(nhead) + ohead, err = store.TruncateHead(nhead) if err != nil { return 0, err } @@ -590,12 +590,12 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead // truncateFromTail removes the extra state histories from the tail with the given // parameters. It returns the number of items removed from the tail. -func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail uint64) (int, error) { - ohead, err := freezer.Ancients() +func truncateFromTail(db ethdb.Batcher, store ethdb.AncientStore, ntail uint64) (int, error) { + ohead, err := store.Ancients() if err != nil { return 0, err } - otail, err := freezer.Tail() + otail, err := store.Tail() if err != nil { return 0, err } @@ -608,7 +608,7 @@ func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail return 0, nil } // Load the meta objects in range [otail+1, ntail] - blobs, err := rawdb.ReadStateHistoryMetaList(freezer, otail+1, ntail-otail) + blobs, err := rawdb.ReadStateHistoryMetaList(store, otail+1, ntail-otail) if err != nil { return 0, err } @@ -623,7 +623,7 @@ func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail if err := batch.Write(); err != nil { return 0, err } - otail, err = freezer.TruncateTail(ntail) + otail, err = store.TruncateTail(ntail) if err != nil { return 0, err } diff --git a/triedb/pathdb/history_inspect.go b/triedb/pathdb/history_inspect.go index d8a761b916..240474da37 100644 --- a/triedb/pathdb/history_inspect.go +++ b/triedb/pathdb/history_inspect.go @@ -21,7 +21,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -34,7 +34,7 @@ type HistoryStats struct { } // sanitizeRange limits the given range to fit within the local history store. -func sanitizeRange(start, end uint64, freezer *rawdb.ResettableFreezer) (uint64, uint64, error) { +func sanitizeRange(start, end uint64, freezer ethdb.AncientReader) (uint64, uint64, error) { // Load the id of the first history object in local store. tail, err := freezer.Tail() if err != nil { @@ -60,7 +60,7 @@ func sanitizeRange(start, end uint64, freezer *rawdb.ResettableFreezer) (uint64, return first, last, nil } -func inspectHistory(freezer *rawdb.ResettableFreezer, start, end uint64, onHistory func(*history, *HistoryStats)) (*HistoryStats, error) { +func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory func(*history, *HistoryStats)) (*HistoryStats, error) { var ( stats = &HistoryStats{} init = time.Now() @@ -96,7 +96,7 @@ func inspectHistory(freezer *rawdb.ResettableFreezer, start, end uint64, onHisto } // accountHistory inspects the account history within the range. -func accountHistory(freezer *rawdb.ResettableFreezer, address common.Address, start, end uint64) (*HistoryStats, error) { +func accountHistory(freezer ethdb.AncientReader, address common.Address, start, end uint64) (*HistoryStats, error) { return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) { blob, exists := h.accounts[address] if !exists { @@ -108,7 +108,7 @@ func accountHistory(freezer *rawdb.ResettableFreezer, address common.Address, st } // storageHistory inspects the storage history within the range. -func storageHistory(freezer *rawdb.ResettableFreezer, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) { +func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) { return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) { slots, exists := h.storages[address] if !exists { @@ -124,7 +124,7 @@ func storageHistory(freezer *rawdb.ResettableFreezer, address common.Address, sl } // historyRange returns the block number range of local state histories. -func historyRange(freezer *rawdb.ResettableFreezer) (uint64, uint64, error) { +func historyRange(freezer ethdb.AncientReader) (uint64, uint64, error) { // Load the id of the first history object in local store. tail, err := freezer.Tail() if err != nil { diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go index 81ac768acd..4114aa1185 100644 --- a/triedb/pathdb/history_test.go +++ b/triedb/pathdb/history_test.go @@ -102,7 +102,7 @@ func TestEncodeDecodeHistory(t *testing.T) { } } -func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, id uint64, root common.Hash, exist bool) { +func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, id uint64, root common.Hash, exist bool) { blob := rawdb.ReadStateHistoryMeta(freezer, id) if exist && len(blob) == 0 { t.Fatalf("Failed to load trie history, %d", id) @@ -118,7 +118,7 @@ func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.Resettab } } -func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, from, to uint64, roots []common.Hash, exist bool) { +func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, from, to uint64, roots []common.Hash, exist bool) { for i, j := from, 0; i <= to; i, j = i+1, j+1 { checkHistory(t, db, freezer, i, roots[j], exist) } @@ -129,7 +129,7 @@ func TestTruncateHeadHistory(t *testing.T) { roots []common.Hash hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir(), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false) ) defer freezer.Close() @@ -157,7 +157,7 @@ func TestTruncateTailHistory(t *testing.T) { roots []common.Hash hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir(), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false) ) defer freezer.Close() @@ -200,7 +200,7 @@ func TestTruncateTailHistories(t *testing.T) { roots []common.Hash hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir()+fmt.Sprintf("%d", i), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir()+fmt.Sprintf("%d", i), false) ) defer freezer.Close() @@ -228,7 +228,7 @@ func TestTruncateOutOfRange(t *testing.T) { var ( hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir(), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false) ) defer freezer.Close() @@ -268,11 +268,6 @@ func TestTruncateOutOfRange(t *testing.T) { } } -// openFreezer initializes the freezer instance for storing state histories. -func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) { - return rawdb.NewStateFreezer(datadir, readOnly) -} - func compareSet[k comparable](a, b map[k][]byte) bool { if len(a) != len(b) { return false From 242b24af9f21cd4b35e2e609b12371f41528da3d Mon Sep 17 00:00:00 2001 From: Martin HS Date: Tue, 30 Apr 2024 13:51:04 +0200 Subject: [PATCH 21/64] trie/trienode: minor speedup in nodeset merging (#29683) --- trie/trienode/node.go | 7 ++++- trie/trienode/node_test.go | 61 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 trie/trienode/node_test.go diff --git a/trie/trienode/node.go b/trie/trienode/node.go index 055db8822e..bc93e3ca88 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -114,7 +114,12 @@ func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error { set.updates -= 1 } } - set.AddNode([]byte(path), node) + if node.IsDeleted() { + set.deletes += 1 + } else { + set.updates += 1 + } + set.Nodes[path] = node } return nil } diff --git a/trie/trienode/node_test.go b/trie/trienode/node_test.go new file mode 100644 index 0000000000..bcb3a2202b --- /dev/null +++ b/trie/trienode/node_test.go @@ -0,0 +1,61 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package trienode + +import ( + "crypto/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +func BenchmarkMerge(b *testing.B) { + b.Run("1K", func(b *testing.B) { + benchmarkMerge(b, 1000) + }) + b.Run("10K", func(b *testing.B) { + benchmarkMerge(b, 10_000) + }) +} + +func benchmarkMerge(b *testing.B, count int) { + x := NewNodeSet(common.Hash{}) + y := NewNodeSet(common.Hash{}) + addNode := func(s *NodeSet) { + path := make([]byte, 4) + rand.Read(path) + blob := make([]byte, 32) + rand.Read(blob) + hash := crypto.Keccak256Hash(blob) + s.AddNode(path, New(hash, blob)) + } + for i := 0; i < count; i++ { + // Random path of 4 nibbles + addNode(x) + addNode(y) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Store set x into a backup + z := NewNodeSet(common.Hash{}) + z.Merge(common.Hash{}, x.Nodes) + // Merge y into x + x.Merge(common.Hash{}, y.Nodes) + x = z + } +} From ea89f9adf0ace09c46e790bf8c38414f7b90af69 Mon Sep 17 00:00:00 2001 From: Aaron Chen Date: Tue, 30 Apr 2024 20:08:13 +0800 Subject: [PATCH 22/64] core/vm: remove a redundant zero check in opAddmod (#29672) --- core/vm/instructions.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index a062bb15ff..f37ee004dc 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -173,11 +173,7 @@ func opByte(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt func opAddmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek() - if z.IsZero() { - z.Clear() - } else { - z.AddMod(&x, &y, z) - } + z.AddMod(&x, &y, z) return nil, nil } From 7c7e3a77fced21b098841aa3cb3663de0c85f951 Mon Sep 17 00:00:00 2001 From: Dragan Milic Date: Tue, 30 Apr 2024 14:33:22 +0200 Subject: [PATCH 23/64] eth/tracers/native: fix flatCallTracer Stop() bug (#29623) Co-authored-by: Sina Mahmoodi --- eth/tracers/native/call_flat.go | 18 +++++++- eth/tracers/native/call_flat_test.go | 64 ++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 eth/tracers/native/call_flat_test.go diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go index f8d38ddd2d..ce0fb08114 100644 --- a/eth/tracers/native/call_flat.go +++ b/eth/tracers/native/call_flat.go @@ -23,6 +23,7 @@ import ( "math/big" "slices" "strings" + "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -114,7 +115,7 @@ type flatCallTracer struct { tracer *callTracer config flatCallTracerConfig ctx *tracers.Context // Holds tracer context data - reason error // Textual reason for the interruption + interrupt atomic.Bool // Atomic flag to signal execution interruption activePrecompiles []common.Address // Updated on tx start based on given rules } @@ -154,6 +155,9 @@ func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Trac // OnEnter is called when EVM enters a new scope (via call, create or selfdestruct). func (t *flatCallTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + if t.interrupt.Load() { + return + } t.tracer.OnEnter(depth, typ, from, to, input, gas, value) if depth == 0 { @@ -169,6 +173,9 @@ func (t *flatCallTracer) OnEnter(depth int, typ byte, from common.Address, to co // OnExit is called when EVM exits a scope, even if the scope didn't // execute any code. func (t *flatCallTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if t.interrupt.Load() { + return + } t.tracer.OnExit(depth, output, gasUsed, err, reverted) if depth == 0 { @@ -194,6 +201,9 @@ func (t *flatCallTracer) OnExit(depth int, output []byte, gasUsed uint64, err er } func (t *flatCallTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) { + if t.interrupt.Load() { + return + } t.tracer.OnTxStart(env, tx, from) // Update list of precompiles based on current block rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time) @@ -201,6 +211,9 @@ func (t *flatCallTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction } func (t *flatCallTracer) OnTxEnd(receipt *types.Receipt, err error) { + if t.interrupt.Load() { + return + } t.tracer.OnTxEnd(receipt, err) } @@ -219,12 +232,13 @@ func (t *flatCallTracer) GetResult() (json.RawMessage, error) { if err != nil { return nil, err } - return res, t.reason + return res, t.tracer.reason } // Stop terminates execution of the tracer at the first opportune moment. func (t *flatCallTracer) Stop(err error) { t.tracer.Stop(err) + t.interrupt.Store(true) } // isPrecompiled returns whether the addr is a precompile. diff --git a/eth/tracers/native/call_flat_test.go b/eth/tracers/native/call_flat_test.go new file mode 100644 index 0000000000..d5481b868b --- /dev/null +++ b/eth/tracers/native/call_flat_test.go @@ -0,0 +1,64 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package native_test + +import ( + "errors" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +func TestCallFlatStop(t *testing.T) { + tracer, err := tracers.DefaultDirectory.New("flatCallTracer", &tracers.Context{}, nil) + require.NoError(t, err) + + // this error should be returned by GetResult + stopError := errors.New("stop error") + + // simulate a transaction + tx := types.NewTx(&types.LegacyTx{ + Nonce: 0, + To: &common.Address{}, + Value: big.NewInt(0), + Gas: 0, + GasPrice: big.NewInt(0), + Data: nil, + }) + + tracer.OnTxStart(&tracing.VMContext{ + ChainConfig: params.MainnetChainConfig, + }, tx, common.Address{}) + + tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, nil, 0, big.NewInt(0)) + + // stop before the transaction is finished + tracer.Stop(stopError) + + tracer.OnTxEnd(&types.Receipt{GasUsed: 0}, nil) + + // check that the error is returned by GetResult + _, tracerError := tracer.GetResult() + require.Equal(t, stopError, tracerError) +} From bd6bc37eec15542b8c27004a8018ef27be71931d Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Tue, 30 Apr 2024 14:35:48 +0200 Subject: [PATCH 24/64] core/vm: add subgroup checks for mul/mulexp for G1/G2 (#29637) --- core/vm/contracts.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 5f7de8007b..527d9f4f47 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -705,6 +705,8 @@ func (c *bls12381G1Add) Run(input []byte) ([]byte, error) { return nil, err } + // No need to check the subgroup here, as specified by EIP-2537 + // Compute r = p_0 + p_1 p0.Add(p0, p1) @@ -734,6 +736,11 @@ func (c *bls12381G1Mul) Run(input []byte) ([]byte, error) { if p0, err = decodePointG1(input[:128]); err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p0.IsInSubGroup() { + return nil, errBLS12381G1PointSubgroup + } // Decode scalar value e := new(big.Int).SetBytes(input[128:]) @@ -787,6 +794,11 @@ func (c *bls12381G1MultiExp) Run(input []byte) ([]byte, error) { if err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p.IsInSubGroup() { + return nil, errBLS12381G1PointSubgroup + } points[i] = *p // Decode scalar value scalars[i] = *new(fr.Element).SetBytes(input[t1:t2]) @@ -827,6 +839,8 @@ func (c *bls12381G2Add) Run(input []byte) ([]byte, error) { return nil, err } + // No need to check the subgroup here, as specified by EIP-2537 + // Compute r = p_0 + p_1 r := new(bls12381.G2Affine) r.Add(p0, p1) @@ -857,6 +871,11 @@ func (c *bls12381G2Mul) Run(input []byte) ([]byte, error) { if p0, err = decodePointG2(input[:256]); err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p0.IsInSubGroup() { + return nil, errBLS12381G2PointSubgroup + } // Decode scalar value e := new(big.Int).SetBytes(input[256:]) @@ -910,6 +929,11 @@ func (c *bls12381G2MultiExp) Run(input []byte) ([]byte, error) { if err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p.IsInSubGroup() { + return nil, errBLS12381G2PointSubgroup + } points[i] = *p // Decode scalar value scalars[i] = *new(fr.Element).SetBytes(input[t1:t2]) From 5e070545891961a353694682f9fa3f095e1b7d73 Mon Sep 17 00:00:00 2001 From: Martin HS Date: Tue, 30 Apr 2024 14:48:54 +0200 Subject: [PATCH 25/64] internal/ethapi: listen to ctx cancellation in access list (#29686) --- internal/ethapi/api.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 4b5145f5de..d308cead62 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1524,6 +1524,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH prevTracer = logger.NewAccessListTracer(*args.AccessList, args.from(), to, precompiles) } for { + if err := ctx.Err(); err != nil { + return nil, 0, nil, err + } // Retrieve the current access list to expand accessList := prevTracer.AccessList() log.Trace("Creating access list", "input", accessList) From 2e8e35f2ada60e8e90aff8f6374ea7cd7da16116 Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Tue, 30 Apr 2024 06:55:08 -0600 Subject: [PATCH 26/64] all: refactor so `NewBlock`, `WithBody` take `types.Body` (#29482) * all: refactor so NewBlock(..) and WithBody(..) take a types.Body * core: fixup comments, remove txs != receipts panic * core/types: add empty withdrawls to body if len == 0 --- beacon/engine/types.go | 2 +- beacon/types/exec_payload.go | 4 +- cmd/evm/internal/t8ntool/block.go | 2 +- consensus/beacon/consensus.go | 2 +- consensus/clique/clique.go | 2 +- consensus/ethash/consensus.go | 2 +- core/genesis.go | 2 +- core/rawdb/accessors_chain.go | 14 ++++- core/rawdb/accessors_chain_test.go | 2 +- core/rawdb/accessors_indexes_test.go | 2 +- core/rawdb/chain_iterator_test.go | 8 +-- core/state_processor_test.go | 5 +- core/txpool/legacypool/legacypool_test.go | 2 +- core/types/block.go | 68 +++++++++-------------- core/types/block_test.go | 2 +- eth/catalyst/api_test.go | 6 +- eth/downloader/downloader.go | 6 +- eth/downloader/queue.go | 9 +++ ethclient/ethclient.go | 7 ++- internal/era/era.go | 2 +- internal/era/iterator.go | 2 +- internal/ethapi/api_test.go | 4 +- miner/miner_test.go | 2 +- 23 files changed, 81 insertions(+), 76 deletions(-) diff --git a/beacon/engine/types.go b/beacon/engine/types.go index fc77c13af7..a73691ca05 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -250,7 +250,7 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash, BlobGasUsed: params.BlobGasUsed, ParentBeaconRoot: beaconRoot, } - block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals) + block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: params.Withdrawals}) if block.Hash() != params.BlockHash { return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash()) } diff --git a/beacon/types/exec_payload.go b/beacon/types/exec_payload.go index 718f98f529..4448f854ad 100644 --- a/beacon/types/exec_payload.go +++ b/beacon/types/exec_payload.go @@ -63,9 +63,7 @@ func convertPayload[T payloadType](payload T, parentRoot *zrntcommon.Root) (*typ panic("unsupported block type") } - block := types.NewBlockWithHeader(&header) - block = block.WithBody(transactions, nil) - block = block.WithWithdrawals(withdrawals) + block := types.NewBlockWithHeader(&header).WithBody(types.Body{Transactions: transactions, Withdrawals: withdrawals}) if hash := block.Hash(); hash != expectedHash { return nil, fmt.Errorf("Sanity check failed, payload hash does not match (expected %x, got %x)", expectedHash, hash) } diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 62c8593a1d..37a6db9ffc 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -160,7 +160,7 @@ func (i *bbInput) ToBlock() *types.Block { if i.Header.Difficulty != nil { header.Difficulty = i.Header.Difficulty } - return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals) + return types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: i.Txs, Uncles: i.Ommers, Withdrawals: i.Withdrawals}) } // SealBlock seals the given block using the configured engine. diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 4e3fbeb09a..b8946e0c71 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -388,7 +388,7 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea header.Root = state.IntermediateRoot(true) // Assemble and return the final block. - return types.NewBlockWithWithdrawals(header, body.Transactions, body.Uncles, receipts, body.Withdrawals, trie.NewStackTrie(nil)), nil + return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), nil } // Seal generates a new sealing request for the given input block and pushes diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index b5727fc666..c9e9484002 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -597,7 +597,7 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header * header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Assemble and return the final block for sealing. - return types.NewBlock(header, body.Transactions, nil, receipts, trie.NewStackTrie(nil)), nil + return types.NewBlock(header, &types.Body{Transactions: body.Transactions}, receipts, trie.NewStackTrie(nil)), nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 9800bf9288..b5e2754c2d 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -520,7 +520,7 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, body.Transactions, body.Uncles, receipts, trie.NewStackTrie(nil)), nil + return types.NewBlock(header, &types.Body{Transactions: body.Transactions, Uncles: body.Uncles}, receipts, trie.NewStackTrie(nil)), nil } // SealHash returns the hash of a block prior to it being sealed. diff --git a/core/genesis.go b/core/genesis.go index f05e84199a..42836e0269 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -476,7 +476,7 @@ func (g *Genesis) ToBlock() *types.Block { } } } - return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)).WithWithdrawals(withdrawals) + return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil)) } // Commit writes the block and state of a genesis specification to the database. diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 5a4af5bb87..025be7ade7 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -753,7 +753,7 @@ func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block { if body == nil { return nil } - return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles).WithWithdrawals(body.Withdrawals) + return types.NewBlockWithHeader(header).WithBody(*body) } // WriteBlock serializes a block into the database, header and body separately. @@ -843,7 +843,11 @@ func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block { } for _, bad := range badBlocks { if bad.Header.Hash() == hash { - return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles).WithWithdrawals(bad.Body.Withdrawals) + block := types.NewBlockWithHeader(bad.Header) + if bad.Body != nil { + block = block.WithBody(*bad.Body) + } + return block } } return nil @@ -862,7 +866,11 @@ func ReadAllBadBlocks(db ethdb.Reader) []*types.Block { } var blocks []*types.Block for _, bad := range badBlocks { - blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles).WithWithdrawals(bad.Body.Withdrawals)) + block := types.NewBlockWithHeader(bad.Header) + if bad.Body != nil { + block = block.WithBody(*bad.Body) + } + blocks = append(blocks, block) } return blocks } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index a7ceb72998..fdc940b57e 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -640,7 +640,7 @@ func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block { Number: big.NewInt(int64(i)), Extra: []byte("test block"), } - blocks[i] = types.NewBlockWithHeader(header).WithBody(txs, nil) + blocks[i] = types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs}) blocks[i].Hash() // pre-cache the block hash } return blocks diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index 124389ba7a..78dba000fc 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -76,7 +76,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []*types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, &types.Body{Transactions: txs}, nil, newTestHasher()) // Check that no transactions entries are in a pristine database for i, tx := range txs { diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index 78b0a82e10..390424f673 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -34,7 +34,7 @@ func TestChainIterator(t *testing.T) { var block *types.Block var txs []*types.Transaction to := common.BytesToAddress([]byte{0x11}) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) // Empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, newTestHasher()) // Empty genesis block WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) for i := uint64(1); i <= 10; i++ { @@ -60,7 +60,7 @@ func TestChainIterator(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, &types.Body{Transactions: types.Transactions{tx}}, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -111,7 +111,7 @@ func TestIndexTransactions(t *testing.T) { to := common.BytesToAddress([]byte{0x11}) // Write empty genesis block - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) @@ -138,7 +138,7 @@ func TestIndexTransactions(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, &types.Body{Transactions: types.Transactions{tx}}, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index dc9cb203bc..e98d27eb92 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -417,10 +417,11 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr header.ParentBeaconRoot = &beaconRoot } // Assemble and return the final block for sealing + body := &types.Body{Transactions: txs} if config.IsShanghai(header.Number, header.Time) { - return types.NewBlockWithWithdrawals(header, txs, nil, receipts, []*types.Withdrawal{}, trie.NewStackTrie(nil)) + body.Withdrawals = []*types.Withdrawal{} } - return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) + return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)) } var ( diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index 68d7b6f411..c86991c942 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -87,7 +87,7 @@ func (bc *testBlockChain) CurrentBlock() *types.Header { } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { - return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil)) + return types.NewBlock(bc.CurrentBlock(), nil, nil, trie.NewStackTrie(nil)) } func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { diff --git a/core/types/block.go b/core/types/block.go index 53054f52d3..4857cd6e50 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -23,6 +23,7 @@ import ( "io" "math/big" "reflect" + "slices" "sync/atomic" "time" @@ -217,13 +218,19 @@ type extblock struct { // NewBlock creates a new block. The input data is copied, changes to header and to the // field values will not affect the block. // -// The values of TxHash, UncleHash, ReceiptHash and Bloom in header -// are ignored and set to values derived from the given txs, uncles -// and receipts. -func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher TrieHasher) *Block { - b := &Block{header: CopyHeader(header)} +// The body elements and the receipts are used to recompute and overwrite the +// relevant portions of the header. +func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher) *Block { + if body == nil { + body = &Body{} + } + var ( + b = NewBlockWithHeader(header) + txs = body.Transactions + uncles = body.Uncles + withdrawals = body.Withdrawals + ) - // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { b.header.TxHash = EmptyTxsHash } else { @@ -249,27 +256,18 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* } } - return b -} - -// NewBlockWithWithdrawals creates a new block with withdrawals. The input data is copied, -// changes to header and to the field values will not affect the block. -// -// The values of TxHash, UncleHash, ReceiptHash and Bloom in header are ignored and set to -// values derived from the given txs, uncles and receipts. -func NewBlockWithWithdrawals(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, withdrawals []*Withdrawal, hasher TrieHasher) *Block { - b := NewBlock(header, txs, uncles, receipts, hasher) - if withdrawals == nil { b.header.WithdrawalsHash = nil } else if len(withdrawals) == 0 { b.header.WithdrawalsHash = &EmptyWithdrawalsHash + b.withdrawals = Withdrawals{} } else { - h := DeriveSha(Withdrawals(withdrawals), hasher) - b.header.WithdrawalsHash = &h + hash := DeriveSha(Withdrawals(withdrawals), hasher) + b.header.WithdrawalsHash = &hash + b.withdrawals = slices.Clone(withdrawals) } - return b.WithWithdrawals(withdrawals) + return b } // CopyHeader creates a deep copy of a block header. @@ -453,31 +451,17 @@ func (b *Block) WithSeal(header *Header) *Block { } } -// WithBody returns a copy of the block with the given transaction and uncle contents. -func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { - block := &Block{ - header: b.header, - transactions: make([]*Transaction, len(transactions)), - uncles: make([]*Header, len(uncles)), - withdrawals: b.withdrawals, - } - copy(block.transactions, transactions) - for i := range uncles { - block.uncles[i] = CopyHeader(uncles[i]) - } - return block -} - -// WithWithdrawals returns a copy of the block containing the given withdrawals. -func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block { +// WithBody returns a new block with the original header and a deep copy of the +// provided body. +func (b *Block) WithBody(body Body) *Block { block := &Block{ header: b.header, - transactions: b.transactions, - uncles: b.uncles, + transactions: slices.Clone(body.Transactions), + uncles: make([]*Header, len(body.Uncles)), + withdrawals: slices.Clone(body.Withdrawals), } - if withdrawals != nil { - block.withdrawals = make([]*Withdrawal, len(withdrawals)) - copy(block.withdrawals, withdrawals) + for i := range body.Uncles { + block.uncles[i] = CopyHeader(body.Uncles[i]) } return block } diff --git a/core/types/block_test.go b/core/types/block_test.go index 982d002242..1af5b9d7bf 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -254,7 +254,7 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts, blocktest.NewHasher()) + return NewBlock(header, &Body{Transactions: txs, Uncles: uncles}, receipts, blocktest.NewHasher()) } func TestRlpDecodeParentHash(t *testing.T) { diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index b8645d6be4..0586959f06 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -779,7 +779,7 @@ func setBlockhash(data *engine.ExecutableData) *engine.ExecutableData { Extra: data.ExtraData, MixDigest: data.Random, } - block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */) + block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs}) data.BlockHash = block.Hash() return data } @@ -935,7 +935,7 @@ func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) { Extra: data.ExtraData, MixDigest: data.Random, } - block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */) + block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs}) data.BlockHash = block.Hash() // Send the new payload resp2, err := api.NewPayloadV1(data) @@ -1554,7 +1554,7 @@ func TestBlockToPayloadWithBlobs(t *testing.T) { }, } - block := types.NewBlock(&header, txs, nil, nil, trie.NewStackTrie(nil)) + block := types.NewBlock(&header, &types.Body{Transactions: txs}, nil, trie.NewStackTrie(nil)) envelope := engine.BlockToExecutableData(block, nil, sidecars) var want int for _, tx := range txs { diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 941f575aa8..c5b56e91dd 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -1504,7 +1504,7 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { ) blocks := make([]*types.Block, len(results)) for i, result := range results { - blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) + blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body()) } // Downloaded blocks are always regarded as trusted after the // transition. Because the downloaded chain is guided by the @@ -1726,7 +1726,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state blocks := make([]*types.Block, len(results)) receipts := make([]types.Receipts, len(results)) for i, result := range results { - blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) + blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body()) receipts[i] = result.Receipts } if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil { @@ -1737,7 +1737,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state } func (d *Downloader) commitPivotBlock(result *fetchResult) error { - block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) + block := types.NewBlockWithHeader(result.Header).WithBody(result.body()) log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash()) // Commit the pivot block as the new head, will require full sync from here on diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 6ff858d755..267c23407f 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -87,6 +87,15 @@ func newFetchResult(header *types.Header, fastSync bool) *fetchResult { return item } +// body returns a representation of the fetch result as a types.Body object. +func (f *fetchResult) body() types.Body { + return types.Body{ + Transactions: f.Transactions, + Uncles: f.Uncles, + Withdrawals: f.Withdrawals, + } +} + // SetBodyDone flags the body as finished. func (f *fetchResult) SetBodyDone() { if v := f.pending.Load(); (v & (1 << bodyType)) != 0 { diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 5c3cb79dd6..390f085677 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -191,7 +191,12 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface } txs[i] = tx.tx } - return types.NewBlockWithHeader(head).WithBody(txs, uncles).WithWithdrawals(body.Withdrawals), nil + return types.NewBlockWithHeader(head).WithBody( + types.Body{ + Transactions: txs, + Uncles: uncles, + Withdrawals: body.Withdrawals, + }), nil } // HeaderByHash returns the block header with the given hash. diff --git a/internal/era/era.go b/internal/era/era.go index 2b9e622901..6ad7339b36 100644 --- a/internal/era/era.go +++ b/internal/era/era.go @@ -151,7 +151,7 @@ func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) { if err := rlp.Decode(r, &body); err != nil { return nil, err } - return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil + return types.NewBlockWithHeader(&header).WithBody(body), nil } // Accumulator reads the accumulator entry in the Era1 file. diff --git a/internal/era/iterator.go b/internal/era/iterator.go index cc4f27c201..f48aab46b4 100644 --- a/internal/era/iterator.go +++ b/internal/era/iterator.go @@ -73,7 +73,7 @@ func (it *Iterator) Block() (*types.Block, error) { if err := rlp.Decode(it.inner.Body, &body); err != nil { return nil, err } - return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil + return types.NewBlockWithHeader(&header).WithBody(body), nil } // Receipts returns the receipts for the iterator's current position. diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 1f62d0c6bd..a717ebdfae 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -1348,7 +1348,7 @@ func TestRPCMarshalBlock(t *testing.T) { } txs = append(txs, tx) } - block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, blocktest.NewHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, &types.Body{Transactions: txs}, nil, blocktest.NewHasher()) var testSuite = []struct { inclTx bool @@ -1559,7 +1559,7 @@ func TestRPCGetBlockOrHeader(t *testing.T) { Address: common.Address{0x12, 0x34}, Amount: 10, } - pending = types.NewBlockWithWithdrawals(&types.Header{Number: big.NewInt(11), Time: 42}, []*types.Transaction{tx}, nil, nil, []*types.Withdrawal{withdrawal}, blocktest.NewHasher()) + pending = types.NewBlock(&types.Header{Number: big.NewInt(11), Time: 42}, &types.Body{Transactions: types.Transactions{tx}, Withdrawals: types.Withdrawals{withdrawal}}, nil, blocktest.NewHasher()) ) backend := newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] diff --git a/miner/miner_test.go b/miner/miner_test.go index 7c39564240..da133ad8d0 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -78,7 +78,7 @@ func (bc *testBlockChain) CurrentBlock() *types.Header { } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { - return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil)) + return types.NewBlock(bc.CurrentBlock(), nil, nil, trie.NewStackTrie(nil)) } func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { From 45baf21111c03d2954c81fdf828e630a8d7b05c1 Mon Sep 17 00:00:00 2001 From: jwasinger Date: Tue, 30 Apr 2024 06:46:53 -0700 Subject: [PATCH 27/64] eth/downloader: purge pre-merge sync code (#29281) This PR removes pre-merge sync logic from the downloader. Now-irrelevant tests are removed and others have been updated. --- eth/downloader/beaconsync.go | 6 +- eth/downloader/downloader.go | 872 ++---------------- eth/downloader/downloader_test.go | 836 +++-------------- eth/downloader/fetchers.go | 45 - eth/downloader/fetchers_concurrent.go | 34 +- eth/downloader/fetchers_concurrent_bodies.go | 1 - eth/downloader/fetchers_concurrent_headers.go | 97 -- eth/downloader/testchain_test.go | 1 - 8 files changed, 182 insertions(+), 1710 deletions(-) delete mode 100644 eth/downloader/fetchers_concurrent_headers.go diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go index 7dfc419f4e..8088f16af9 100644 --- a/eth/downloader/beaconsync.go +++ b/eth/downloader/beaconsync.go @@ -106,7 +106,7 @@ func (b *beaconBackfiller) resume() { }() // If the downloader fails, report an error as in beacon chain mode there // should be no errors as long as the chain we're syncing to is valid. - if err := b.downloader.synchronise("", common.Hash{}, nil, nil, mode, true, b.started); err != nil { + if err := b.downloader.synchronise(mode, b.started); err != nil { log.Error("Beacon backfilling failed", "err", err) return } @@ -268,9 +268,9 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { return start, nil } -// fetchBeaconHeaders feeds skeleton headers to the downloader queue for scheduling +// fetchHeaders feeds skeleton headers to the downloader queue for scheduling // until sync errors or is finished. -func (d *Downloader) fetchBeaconHeaders(from uint64) error { +func (d *Downloader) fetchHeaders(from uint64) error { var head *types.Header _, tail, _, err := d.skeleton.Bounds() if err != nil { diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index c5b56e91dd..bb083260e4 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -41,17 +41,14 @@ import ( var ( MaxBlockFetch = 128 // Number of blocks to be fetched per retrieval request MaxHeaderFetch = 192 // Number of block headers to be fetched per retrieval request - MaxSkeletonSize = 128 // Number of header fetches needed for a skeleton assembly MaxReceiptFetch = 256 // Number of transaction receipts to allow fetching per request - maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) - maxHeadersProcess = 2048 // Number of header download results to import at once into the chain - maxResultsProcess = 2048 // Number of content download results to import at once into the chain - fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) - lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) + maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) + maxHeadersProcess = 2048 // Number of header download results to import at once into the chain + maxResultsProcess = 2048 // Number of content download results to import at once into the chain + fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) - reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection - reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs + reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download @@ -59,24 +56,16 @@ var ( ) var ( - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer is unknown or unhealthy") - errBadPeer = errors.New("action from bad peer ignored") - errStallingPeer = errors.New("peer is stalling") - errUnsyncedPeer = errors.New("unsynced peer") - errNoPeers = errors.New("no peers to keep download active") + errBusy = errors.New("busy") + errBadPeer = errors.New("action from bad peer ignored") + errTimeout = errors.New("timeout") - errEmptyHeaderSet = errors.New("empty header set by peer") - errPeersUnavailable = errors.New("no peers available or all tried for download") - errInvalidAncestor = errors.New("retrieved ancestor is invalid") errInvalidChain = errors.New("retrieved hash chain is invalid") errInvalidBody = errors.New("retrieved block body is invalid") errInvalidReceipt = errors.New("retrieved receipt is invalid") errCancelStateFetch = errors.New("state data download canceled (requested)") errCancelContentProcessing = errors.New("content processing canceled (requested)") errCanceled = errors.New("syncing canceled (requested)") - errTooOld = errors.New("peer's protocol version too old") - errNoAncestorFound = errors.New("no common ancestor found") errNoPivotHeader = errors.New("pivot header is not found") ErrMergeTransition = errors.New("legacy sync reached the merge") ) @@ -99,9 +88,8 @@ type Downloader struct { mode atomic.Uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode mux *event.TypeMux // Event multiplexer to announce sync operation events - genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) - queue *queue // Scheduler for selecting the hashes to download - peers *peerSet // Set of active peers from which download can proceed + queue *queue // Scheduler for selecting the hashes to download + peers *peerSet // Set of active peers from which download can proceed stateDB ethdb.Database // Database to state sync into (and deduplicate via) @@ -118,11 +106,10 @@ type Downloader struct { badBlock badBlockFn // Reports a block as rejected by the chain // Status - synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing - synchronising atomic.Bool - notified atomic.Bool - committed atomic.Bool - ancientLimit uint64 // The maximum block number which can be regarded as ancient data. + synchronising atomic.Bool + notified atomic.Bool + committed atomic.Bool + ancientLimit uint64 // The maximum block number which can be regarded as ancient data. // Channels headerProcCh chan *headerTask // Channel to feed the header processor new tasks @@ -138,7 +125,6 @@ type Downloader struct { stateSyncStart chan *stateSync // Cancellation and termination - cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) cancelCh chan struct{} // Channel to cancel mid-flight syncs cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. @@ -147,7 +133,6 @@ type Downloader struct { quitLock sync.Mutex // Lock to prevent double closes // Testing hooks - syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) @@ -326,39 +311,10 @@ func (d *Downloader) UnregisterPeer(id string) error { return nil } -// LegacySync tries to sync up our local block chain with a remote peer, both -// adding various sanity checks as well as wrapping it with various log entries. -func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error { - err := d.synchronise(id, head, td, ttd, mode, false, nil) - - switch err { - case nil, errBusy, errCanceled: - return err - } - if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || - errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || - errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { - log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) - if d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) - } else { - d.dropPeer(id) - } - return err - } - if errors.Is(err, ErrMergeTransition) { - return err // This is an expected fault, don't keep printing it in a spin-loop - } - log.Warn("Synchronisation failed, retrying", "err", err) - return err -} - // synchronise will select the peer and use it for synchronising. If an empty string is given // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous -func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error { +func (d *Downloader) synchronise(mode SyncMode, beaconPing chan struct{}) error { // The beacon header syncer is async. It will start this synchronization and // will continue doing other tasks. However, if synchronization needs to be // cancelled, the syncer needs to know if we reached the startup point (and @@ -373,10 +329,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, } }() } - // Mock out the synchronisation if testing - if d.synchroniseMock != nil { - return d.synchroniseMock(id, hash) - } // Make sure only one goroutine is ever allowed past this point at once if !d.synchronising.CompareAndSwap(false, true) { return errBusy @@ -424,7 +376,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, // Create cancel channel for aborting mid-flight and mark the master peer d.cancelLock.Lock() d.cancelCh = make(chan struct{}) - d.cancelPeer = id d.cancelLock.Unlock() defer d.Cancel() // No matter what, we can't leave the cancel channel open @@ -432,27 +383,19 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, // Atomically set the requested sync mode d.mode.Store(uint32(mode)) - // Retrieve the origin peer and initiate the downloading process - var p *peerConnection - if !beaconMode { // Beacon mode doesn't need a peer to sync from - p = d.peers.Peer(id) - if p == nil { - return errUnknownPeer - } - } if beaconPing != nil { close(beaconPing) } - return d.syncWithPeer(p, hash, td, ttd, beaconMode) + return d.syncToHead() } func (d *Downloader) getMode() SyncMode { return SyncMode(d.mode.Load()) } -// syncWithPeer starts a block synchronization based on the hash chain from the -// specified peer and head hash. -func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) { +// syncToHead starts a block synchronization based on the hash chain from +// the specified head hash. +func (d *Downloader) syncToHead() (err error) { d.mux.Post(StartEvent{}) defer func() { // reset on error @@ -465,52 +408,39 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * }() mode := d.getMode() - if !beaconMode { - log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) - } else { - log.Debug("Backfilling with the network", "mode", mode) - } + log.Debug("Backfilling with the network", "mode", mode) defer func(start time.Time) { log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start))) }(time.Now()) // Look up the sync boundaries: the common ancestor and the target block var latest, pivot, final *types.Header - if !beaconMode { - // In legacy mode, use the master peer to retrieve the headers from - latest, pivot, err = d.fetchHead(p) - if err != nil { - return err - } - } else { - // In beacon mode, use the skeleton chain to retrieve the headers from - latest, _, final, err = d.skeleton.Bounds() - if err != nil { - return err - } - if latest.Number.Uint64() > uint64(fsMinFullBlocks) { - number := latest.Number.Uint64() - uint64(fsMinFullBlocks) - - // Retrieve the pivot header from the skeleton chain segment but - // fallback to local chain if it's not found in skeleton space. - if pivot = d.skeleton.Header(number); pivot == nil { - _, oldest, _, _ := d.skeleton.Bounds() // error is already checked - if number < oldest.Number.Uint64() { - count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks - headers := d.readHeaderRange(oldest, count) - if len(headers) == count { - pivot = headers[len(headers)-1] - log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) - } + latest, _, final, err = d.skeleton.Bounds() + if err != nil { + return err + } + if latest.Number.Uint64() > uint64(fsMinFullBlocks) { + number := latest.Number.Uint64() - uint64(fsMinFullBlocks) + + // Retrieve the pivot header from the skeleton chain segment but + // fallback to local chain if it's not found in skeleton space. + if pivot = d.skeleton.Header(number); pivot == nil { + _, oldest, _, _ := d.skeleton.Bounds() // error is already checked + if number < oldest.Number.Uint64() { + count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks + headers := d.readHeaderRange(oldest, count) + if len(headers) == count { + pivot = headers[len(headers)-1] + log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) } } - // Print an error log and return directly in case the pivot header - // is still not found. It means the skeleton chain is not linked - // correctly with local chain. - if pivot == nil { - log.Error("Pivot header is not found", "number", number) - return errNoPivotHeader - } + } + // Print an error log and return directly in case the pivot header + // is still not found. It means the skeleton chain is not linked + // correctly with local chain. + if pivot == nil { + log.Error("Pivot header is not found", "number", number) + return errNoPivotHeader } } // If no pivot block was returned, the head is below the min full block @@ -522,19 +452,10 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * } height := latest.Number.Uint64() - var origin uint64 - if !beaconMode { - // In legacy mode, reach out to the network and find the ancestor - origin, err = d.findAncestor(p, latest) - if err != nil { - return err - } - } else { - // In beacon mode, use the skeleton chain for the ancestor lookup - origin, err = d.findBeaconAncestor() - if err != nil { - return err - } + // In beacon mode, use the skeleton chain for the ancestor lookup + origin, err := d.findBeaconAncestor() + if err != nil { + return err } d.syncStatsLock.Lock() if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { @@ -577,24 +498,15 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * // the ancientLimit through that. Otherwise calculate the ancient limit through // the advertised height of the remote peer. This most is mostly a fallback for // legacy networks, but should eventually be dropped. TODO(karalabe). - if beaconMode { - // Beacon sync, use the latest finalized block as the ancient limit - // or a reasonable height if no finalized block is yet announced. - if final != nil { - d.ancientLimit = final.Number.Uint64() - } else if height > fullMaxForkAncestry+1 { - d.ancientLimit = height - fullMaxForkAncestry - 1 - } else { - d.ancientLimit = 0 - } + // + // Beacon sync, use the latest finalized block as the ancient limit + // or a reasonable height if no finalized block is yet announced. + if final != nil { + d.ancientLimit = final.Number.Uint64() + } else if height > fullMaxForkAncestry+1 { + d.ancientLimit = height - fullMaxForkAncestry - 1 } else { - // Legacy sync, use the best announcement we have from the remote peer. - // TODO(karalabe): Drop this pathway. - if height > fullMaxForkAncestry+1 { - d.ancientLimit = height - fullMaxForkAncestry - 1 - } else { - d.ancientLimit = 0 - } + d.ancientLimit = 0 } frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. @@ -616,22 +528,13 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * } // Initiate the sync using a concurrent header and content retrieval algorithm d.queue.Prepare(origin+1, mode) - if d.syncInitHook != nil { - d.syncInitHook(origin, height) - } - var headerFetcher func() error - if !beaconMode { - // In legacy mode, headers are retrieved from the network - headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) } - } else { - // In beacon mode, headers are served by the skeleton syncer - headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) } - } + + // In beacon mode, headers are served by the skeleton syncer fetchers := []func() error{ - headerFetcher, // Headers are always retrieved - func() error { return d.fetchBodies(origin+1, beaconMode) }, // Bodies are retrieved during normal and snap sync - func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync - func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) }, + func() error { return d.fetchHeaders(origin + 1) }, // Headers are always retrieved + func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and snap sync + func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during snap sync + func() error { return d.processHeaders(origin + 1) }, } if mode == SnapSync { d.pivotLock.Lock() @@ -640,7 +543,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * fetchers = append(fetchers, func() error { return d.processSnapSyncContent() }) } else if mode == FullSync { - fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) }) + fetchers = append(fetchers, func() error { return d.processFullSyncContent() }) } return d.spawnSync(fetchers) } @@ -719,540 +622,12 @@ func (d *Downloader) Terminate() { d.Cancel() } -// fetchHead retrieves the head header and prior pivot block (if available) from -// a remote peer. -func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) { - p.log.Debug("Retrieving remote chain head") - mode := d.getMode() - - // Request the advertised remote head block and wait for the response - latest, _ := p.peer.Head() - fetch := 1 - if mode == SnapSync { - fetch = 2 // head + pivot headers - } - headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true) - if err != nil { - return nil, nil, err - } - // Make sure the peer gave us at least one and at most the requested headers - if len(headers) == 0 || len(headers) > fetch { - return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) - } - // The first header needs to be the head, validate against the request. If - // only 1 header was returned, make sure there's no pivot or there was not - // one requested. - head = headers[0] - if len(headers) == 1 { - if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) - } - p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0]) - return head, nil, nil - } - // At this point we have 2 headers in total and the first is the - // validated head of the chain. Check the pivot number and return, - pivot = headers[1] - if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) - } - return head, pivot, nil -} - -// calculateRequestSpan calculates what headers to request from a peer when trying to determine the -// common ancestor. -// It returns parameters to be used for peer.RequestHeadersByNumber: -// -// from - starting block number -// count - number of headers to request -// skip - number of headers to skip -// -// and also returns 'max', the last block which is expected to be returned by the remote peers, -// given the (from,count,skip) -func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { - var ( - from int - count int - MaxCount = MaxHeaderFetch / 16 - ) - // requestHead is the highest block that we will ask for. If requestHead is not offset, - // the highest block that we will get is 16 blocks back from head, which means we - // will fetch 14 or 15 blocks unnecessarily in the case the height difference - // between us and the peer is 1-2 blocks, which is most common - requestHead := int(remoteHeight) - 1 - if requestHead < 0 { - requestHead = 0 - } - // requestBottom is the lowest block we want included in the query - // Ideally, we want to include the one just below our own head - requestBottom := int(localHeight - 1) - if requestBottom < 0 { - requestBottom = 0 - } - totalSpan := requestHead - requestBottom - span := 1 + totalSpan/MaxCount - if span < 2 { - span = 2 - } - if span > 16 { - span = 16 - } - - count = 1 + totalSpan/span - if count > MaxCount { - count = MaxCount - } - if count < 2 { - count = 2 - } - from = requestHead - (count-1)*span - if from < 0 { - from = 0 - } - max := from + (count-1)*span - return int64(from), count, span - 1, uint64(max) -} - -// findAncestor tries to locate the common ancestor link of the local chain and -// a remote peers blockchain. In the general case when our node was in sync and -// on the correct chain, checking the top N links should already get us a match. -// In the rare scenario when we ended up on a long reorganisation (i.e. none of -// the head links match), we do a binary search to find the common ancestor. -func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { - // Figure out the valid ancestor range to prevent rewrite attacks - var ( - floor = int64(-1) - localHeight uint64 - remoteHeight = remoteHeader.Number.Uint64() - ) - mode := d.getMode() - switch mode { - case FullSync: - localHeight = d.blockchain.CurrentBlock().Number.Uint64() - case SnapSync: - localHeight = d.blockchain.CurrentSnapBlock().Number.Uint64() - default: - localHeight = d.lightchain.CurrentHeader().Number.Uint64() - } - p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) - - // Recap floor value for binary search - maxForkAncestry := fullMaxForkAncestry - if d.getMode() == LightSync { - maxForkAncestry = lightMaxForkAncestry - } - if localHeight >= maxForkAncestry { - // We're above the max reorg threshold, find the earliest fork point - floor = int64(localHeight - maxForkAncestry) - } - // If we're doing a light sync, ensure the floor doesn't go below the CHT, as - // all headers before that point will be missing. - if mode == LightSync { - // If we don't know the current CHT position, find it - if d.genesis == 0 { - header := d.lightchain.CurrentHeader() - for header != nil { - d.genesis = header.Number.Uint64() - if floor >= int64(d.genesis)-1 { - break - } - header = d.lightchain.GetHeaderByHash(header.ParentHash) - } - } - // We already know the "genesis" block number, cap floor to that - if floor < int64(d.genesis)-1 { - floor = int64(d.genesis) - 1 - } - } - - ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor) - if err == nil { - return ancestor, nil - } - // The returned error was not nil. - // If the error returned does not reflect that a common ancestor was not found, return it. - // If the error reflects that a common ancestor was not found, continue to binary search, - // where the error value will be reassigned. - if !errors.Is(err, errNoAncestorFound) { - return 0, err - } - - ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor) - if err != nil { - return 0, err - } - return ancestor, nil -} - -func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (uint64, error) { - from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) - - p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) - headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false) - if err != nil { - return 0, err - } - // Wait for the remote response to the head fetch - number, hash := uint64(0), common.Hash{} - - // Make sure the peer actually gave something valid - if len(headers) == 0 { - p.log.Warn("Empty head header set") - return 0, errEmptyHeaderSet - } - // Make sure the peer's reply conforms to the request - for i, header := range headers { - expectNumber := from + int64(i)*int64(skip+1) - if number := header.Number.Int64(); number != expectNumber { - p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) - return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) - } - } - // Check if a common ancestor was found - for i := len(headers) - 1; i >= 0; i-- { - // Skip any headers that underflow/overflow our requested set - if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { - continue - } - // Otherwise check if we already know the header or not - h := hashes[i] - n := headers[i].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case SnapSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if known { - number, hash = n, h - break - } - } - // If the head fetch already found an ancestor, return - if hash != (common.Hash{}) { - if int64(number) <= floor { - p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) - return 0, errInvalidAncestor - } - p.log.Debug("Found common ancestor", "number", number, "hash", hash) - return number, nil - } - return 0, errNoAncestorFound -} - -func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (uint64, error) { - hash := common.Hash{} - - // Ancestor not found, we need to binary search over our chain - start, end := uint64(0), remoteHeight - if floor > 0 { - start = uint64(floor) - } - p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) - - for start+1 < end { - // Split our chain interval in two, and request the hash to cross check - check := (start + end) / 2 - - headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false) - if err != nil { - return 0, err - } - // Make sure the peer actually gave something valid - if len(headers) != 1 { - p.log.Warn("Multiple headers for single request", "headers", len(headers)) - return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) - } - // Modify the search interval based on the response - h := hashes[0] - n := headers[0].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case SnapSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if !known { - end = check - continue - } - header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists - if header.Number.Uint64() != check { - p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) - return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) - } - start = check - hash = h - } - // Ensure valid ancestry and return - if int64(start) <= floor { - p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) - return 0, errInvalidAncestor - } - p.log.Debug("Found common ancestor", "number", start, "hash", hash) - return start, nil -} - -// fetchHeaders keeps retrieving headers concurrently from the number -// requested, until no more are returned, potentially throttling on the way. To -// facilitate concurrency but still protect against malicious nodes sending bad -// headers, we construct a header chain skeleton using the "origin" peer we are -// syncing with, and fill in the missing headers using anyone else. Headers from -// other peers are only accepted if they map cleanly to the skeleton. If no one -// can fill in the skeleton - not even the origin peer - it's assumed invalid and -// the origin is dropped. -func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) error { - p.log.Debug("Directing header downloads", "origin", from) - defer p.log.Debug("Header download terminated") - - // Start pulling the header chain skeleton until all is done - var ( - skeleton = true // Skeleton assembly phase or finishing up - pivoting = false // Whether the next request is pivot verification - ancestor = from - mode = d.getMode() - ) - for { - // Pull the next batch of headers, it either: - // - Pivot check to see if the chain moved too far - // - Skeleton retrieval to permit concurrent header fetches - // - Full header retrieval if we're near the chain head - var ( - headers []*types.Header - hashes []common.Hash - err error - ) - switch { - case pivoting: - d.pivotLock.RLock() - pivot := d.pivotHeader.Number.Uint64() - d.pivotLock.RUnlock() - - p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) - headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep - - case skeleton: - p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) - headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) - - default: - p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) - headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false) - } - switch err { - case nil: - // Headers retrieved, continue with processing - - case errCanceled: - // Sync cancelled, no issue, propagate up - return err - - default: - // Header retrieval either timed out, or the peer failed in some strange way - // (e.g. disconnect). Consider the master peer bad and drop - d.dropPeer(p.id) - - // Finish the sync gracefully instead of dumping the gathered data though - for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { - select { - case ch <- false: - case <-d.cancelCh: - } - } - select { - case d.headerProcCh <- nil: - case <-d.cancelCh: - } - return fmt.Errorf("%w: header request failed: %v", errBadPeer, err) - } - // If the pivot is being checked, move if it became stale and run the real retrieval - var pivot uint64 - - d.pivotLock.RLock() - if d.pivotHeader != nil { - pivot = d.pivotHeader.Number.Uint64() - } - d.pivotLock.RUnlock() - - if pivoting { - if len(headers) == 2 { - if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { - log.Warn("Peer sent invalid next pivot", "have", have, "want", want) - return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) - } - if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { - log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) - return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) - } - log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) - pivot = headers[0].Number.Uint64() - - d.pivotLock.Lock() - d.pivotHeader = headers[0] - d.pivotLock.Unlock() - - // Write out the pivot into the database so a rollback beyond - // it will reenable snap sync and update the state root that - // the state syncer will be downloading. - rawdb.WriteLastPivotNumber(d.stateDB, pivot) - } - // Disable the pivot check and fetch the next batch of headers - pivoting = false - continue - } - // If the skeleton's finished, pull any remaining head headers directly from the origin - if skeleton && len(headers) == 0 { - // A malicious node might withhold advertised headers indefinitely - if from+uint64(MaxHeaderFetch)-1 <= head { - p.log.Warn("Peer withheld skeleton headers", "advertised", head, "withheld", from+uint64(MaxHeaderFetch)-1) - return fmt.Errorf("%w: withheld skeleton headers: advertised %d, withheld #%d", errStallingPeer, head, from+uint64(MaxHeaderFetch)-1) - } - p.log.Debug("No skeleton, fetching headers directly") - skeleton = false - continue - } - // If no more headers are inbound, notify the content fetchers and return - if len(headers) == 0 { - // Don't abort header fetches while the pivot is downloading - if !d.committed.Load() && pivot <= from { - p.log.Debug("No headers, waiting for pivot commit") - select { - case <-time.After(fsHeaderContCheck): - continue - case <-d.cancelCh: - return errCanceled - } - } - // Pivot done (or not in snap sync) and no more headers, terminate the process - p.log.Debug("No more headers available") - select { - case d.headerProcCh <- nil: - return nil - case <-d.cancelCh: - return errCanceled - } - } - // If we received a skeleton batch, resolve internals concurrently - var progressed bool - if skeleton { - filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers) - if err != nil { - p.log.Debug("Skeleton chain invalid", "err", err) - return fmt.Errorf("%w: %v", errInvalidChain, err) - } - headers = filled[proced:] - hashes = hashset[proced:] - - progressed = proced > 0 - from += uint64(proced) - } else { - // A malicious node might withhold advertised headers indefinitely - if n := len(headers); n < MaxHeaderFetch && headers[n-1].Number.Uint64() < head { - p.log.Warn("Peer withheld headers", "advertised", head, "delivered", headers[n-1].Number.Uint64()) - return fmt.Errorf("%w: withheld headers: advertised %d, delivered %d", errStallingPeer, head, headers[n-1].Number.Uint64()) - } - // If we're closing in on the chain head, but haven't yet reached it, delay - // the last few headers so mini reorgs on the head don't cause invalid hash - // chain errors. - if n := len(headers); n > 0 { - // Retrieve the current head we're at - var head uint64 - if mode == LightSync { - head = d.lightchain.CurrentHeader().Number.Uint64() - } else { - head = d.blockchain.CurrentSnapBlock().Number.Uint64() - if full := d.blockchain.CurrentBlock().Number.Uint64(); head < full { - head = full - } - } - // If the head is below the common ancestor, we're actually deduplicating - // already existing chain segments, so use the ancestor as the fake head. - // Otherwise, we might end up delaying header deliveries pointlessly. - if head < ancestor { - head = ancestor - } - // If the head is way older than this batch, delay the last few headers - if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { - delay := reorgProtHeaderDelay - if delay > n { - delay = n - } - headers = headers[:n-delay] - hashes = hashes[:n-delay] - } - } - } - // If no headers have been delivered, or all of them have been delayed, - // sleep a bit and retry. Take care with headers already consumed during - // skeleton filling - if len(headers) == 0 && !progressed { - p.log.Trace("All headers delayed, waiting") - select { - case <-time.After(fsHeaderContCheck): - continue - case <-d.cancelCh: - return errCanceled - } - } - // Insert any remaining new headers and fetch the next batch - if len(headers) > 0 { - p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) - select { - case d.headerProcCh <- &headerTask{ - headers: headers, - hashes: hashes, - }: - case <-d.cancelCh: - return errCanceled - } - from += uint64(len(headers)) - } - // If we're still skeleton filling snap sync, check pivot staleness - // before continuing to the next skeleton filling - if skeleton && pivot > 0 { - pivoting = true - } - } -} - -// fillHeaderSkeleton concurrently retrieves headers from all our available peers -// and maps them to the provided skeleton header chain. -// -// Any partial results from the beginning of the skeleton is (if possible) forwarded -// immediately to the header processor to keep the rest of the pipeline full even -// in the case of header stalls. -// -// The method returns the entire filled skeleton and also the number of headers -// already forwarded for processing. -func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) { - log.Debug("Filling up skeleton", "from", from) - d.queue.ScheduleSkeleton(from, skeleton) - - err := d.concurrentFetch((*headerQueue)(d), false) - if err != nil { - log.Debug("Skeleton fill failed", "err", err) - } - filled, hashes, proced := d.queue.RetrieveHeaders() - if err == nil { - log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced) - } - return filled, hashes, proced, err -} - // fetchBodies iteratively downloads the scheduled block bodies, taking any // available peers, reserving a chunk of blocks for each, waiting for delivery // and also periodically checking for timeouts. -func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { +func (d *Downloader) fetchBodies(from uint64) error { log.Debug("Downloading block bodies", "origin", from) - err := d.concurrentFetch((*bodyQueue)(d), beaconMode) + err := d.concurrentFetch((*bodyQueue)(d)) log.Debug("Block body download terminated", "err", err) return err @@ -1261,9 +636,9 @@ func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { // fetchReceipts iteratively downloads the scheduled block receipts, taking any // available peers, reserving a chunk of receipts for each, waiting for delivery // and also periodically checking for timeouts. -func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { +func (d *Downloader) fetchReceipts(from uint64) error { log.Debug("Downloading receipts", "origin", from) - err := d.concurrentFetch((*receiptQueue)(d), beaconMode) + err := d.concurrentFetch((*receiptQueue)(d)) log.Debug("Receipt download terminated", "err", err) return err @@ -1272,11 +647,10 @@ func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { // processHeaders takes batches of retrieved headers from an input channel and // keeps processing and scheduling them into the header chain and downloader's // queue until the stream ends or a failure occurs. -func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { +func (d *Downloader) processHeaders(origin uint64) error { var ( - mode = d.getMode() - gotHeaders = false // Wait for batches of headers to process - timer = time.NewTimer(time.Second) + mode = d.getMode() + timer = time.NewTimer(time.Second) ) defer timer.Stop() @@ -1295,48 +669,11 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode case <-d.cancelCh: } } - // If we're in legacy sync mode, we need to check total difficulty - // violations from malicious peers. That is not needed in beacon - // mode and we can skip to terminating sync. - if !beaconMode { - // If no headers were retrieved at all, the peer violated its TD promise that it had a - // better chain compared to ours. The only exception is if its promised blocks were - // already imported by other means (e.g. fetcher): - // - // R , L : Both at block 10 - // R: Mine block 11, and propagate it to L - // L: Queue block 11 for import - // L: Notice that R's head and TD increased compared to ours, start sync - // L: Import of block 11 finishes - // L: Sync begins, and finds common ancestor at 11 - // L: Request new headers up from 11 (R's TD was higher, it must have something) - // R: Nothing to give - if mode != LightSync { - head := d.blockchain.CurrentBlock() - if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { - return errStallingPeer - } - } - // If snap or light syncing, ensure promised headers are indeed delivered. This is - // needed to detect scenarios where an attacker feeds a bad pivot and then bails out - // of delivering the post-pivot blocks that would flag the invalid content. - // - // This check cannot be executed "as is" for full imports, since blocks may still be - // queued for processing when the header download completes. However, as long as the - // peer gave us something useful, we're already happy/progressed (above check). - if mode == SnapSync || mode == LightSync { - head := d.lightchain.CurrentHeader() - if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { - return errStallingPeer - } - } - } return nil } // Otherwise split the chunk of headers into batches and process them headers, hashes := task.headers, task.hashes - gotHeaders = true for len(headers) > 0 { // Terminate if something failed in between processing chunks select { @@ -1357,44 +694,12 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode // Although the received headers might be all valid, a legacy // PoW/PoA sync must not accept post-merge headers. Make sure // that any transition is rejected at this point. - var ( - rejected []*types.Header - td *big.Int - ) - if !beaconMode && ttd != nil { - td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1) - if td == nil { - // This should never really happen, but handle gracefully for now - log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash) - return fmt.Errorf("%w: parent TD missing", errInvalidChain) - } - for i, header := range chunkHeaders { - td = new(big.Int).Add(td, header.Difficulty) - if td.Cmp(ttd) >= 0 { - // Terminal total difficulty reached, allow the last header in - if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 { - chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:] - if len(rejected) > 0 { - // Make a nicer user log as to the first TD truly rejected - td = new(big.Int).Add(td, rejected[0].Difficulty) - } - } else { - chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:] - } - break - } - } - } if len(chunkHeaders) > 0 { if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil { log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) return fmt.Errorf("%w: %v", errInvalidChain, err) } } - if len(rejected) != 0 { - log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) - return ErrMergeTransition - } } // Unless we're doing light chains, schedule the headers for associated content retrieval if mode == FullSync || mode == SnapSync { @@ -1436,7 +741,7 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode } // processFullSyncContent takes fetch results from the queue and imports them into the chain. -func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error { +func (d *Downloader) processFullSyncContent() error { for { results := d.queue.Results(true) if len(results) == 0 { @@ -1445,44 +750,9 @@ func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error if d.chainInsertHook != nil { d.chainInsertHook(results) } - // Although the received blocks might be all valid, a legacy PoW/PoA sync - // must not accept post-merge blocks. Make sure that pre-merge blocks are - // imported, but post-merge ones are rejected. - var ( - rejected []*fetchResult - td *big.Int - ) - if !beaconMode && ttd != nil { - td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1) - if td == nil { - // This should never really happen, but handle gracefully for now - log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash) - return fmt.Errorf("%w: parent TD missing", errInvalidChain) - } - for i, result := range results { - td = new(big.Int).Add(td, result.Header.Difficulty) - if td.Cmp(ttd) >= 0 { - // Terminal total difficulty reached, allow the last block in - if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 { - results, rejected = results[:i+1], results[i+1:] - if len(rejected) > 0 { - // Make a nicer user log as to the first TD truly rejected - td = new(big.Int).Add(td, rejected[0].Header.Difficulty) - } - } else { - results, rejected = results[:i], results[i:] - } - break - } - } - } if err := d.importBlockResults(results); err != nil { return err } - if len(rejected) != 0 { - log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd) - return ErrMergeTransition - } } } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index c810518d56..92403277fd 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -20,7 +20,6 @@ import ( "fmt" "math/big" "os" - "strings" "sync" "sync/atomic" "testing" @@ -94,35 +93,16 @@ func (dl *downloadTester) terminate() { os.RemoveAll(dl.freezer) } -// sync starts synchronizing with a remote peer, blocking until it completes. -func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { - head := dl.peers[id].chain.CurrentBlock() - if td == nil { - // If no particular TD was requested, load from the peer's blockchain - td = dl.peers[id].chain.GetTd(head.Hash(), head.Number.Uint64()) - } - // Synchronise with the chosen peer and ensure proper cleanup afterwards - err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil) - select { - case <-dl.downloader.cancelCh: - // Ok, downloader fully cancelled after sync cycle - default: - // Downloader is still accepting packets, can block a peer up - panic("downloader active post sync cycle") // panic will be caught by tester - } - return err -} - // newPeer registers a new block download source into the downloader. func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer { dl.lock.Lock() defer dl.lock.Unlock() peer := &downloadTesterPeer{ - dl: dl, - id: id, - chain: newTestBlockchain(blocks), - withholdHeaders: make(map[common.Hash]struct{}), + dl: dl, + id: id, + chain: newTestBlockchain(blocks), + withholdBodies: make(map[common.Hash]struct{}), } dl.peers[id] = peer @@ -146,11 +126,10 @@ func (dl *downloadTester) dropPeer(id string) { } type downloadTesterPeer struct { - dl *downloadTester - id string - chain *core.BlockChain - - withholdHeaders map[common.Hash]struct{} + dl *downloadTester + withholdBodies map[common.Hash]struct{} + id string + chain *core.BlockChain } // Head constructs a function to retrieve a peer's current head hash @@ -186,15 +165,6 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i Reverse: reverse, }, nil) headers := unmarshalRlpHeaders(rlpHeaders) - // If a malicious peer is simulated withholding headers, delete them - for hash := range dlp.withholdHeaders { - for i, header := range headers { - if header.Hash() == hash { - headers = append(headers[:i], headers[i+1:]...) - break - } - } - } hashes := make([]common.Hash, len(headers)) for i, header := range headers { hashes[i] = header.Hash() @@ -230,15 +200,6 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, Reverse: reverse, }, nil) headers := unmarshalRlpHeaders(rlpHeaders) - // If a malicious peer is simulated withholding headers, delete them - for hash := range dlp.withholdHeaders { - for i, header := range headers { - if header.Hash() == hash { - headers = append(headers[:i], headers[i+1:]...) - break - } - } - } hashes := make([]common.Hash, len(headers)) for i, header := range headers { hashes[i] = header.Hash() @@ -278,7 +239,13 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et ) hasher := trie.NewStackTrie(nil) for i, body := range bodies { - txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) + hash := types.DeriveSha(types.Transactions(body.Transactions), hasher) + if _, ok := dlp.withholdBodies[hash]; ok { + txsHashes = append(txsHashes[:i], txsHashes[i+1:]...) + uncleHashes = append(uncleHashes[:i], uncleHashes[i+1:]...) + continue + } + txsHashes[i] = hash uncleHashes[i] = types.CalcUncleHash(body.Uncles) } req := ð.Request{ @@ -442,7 +409,10 @@ func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ET func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) } func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + success := make(chan struct{}) + tester := newTesterWithNotification(t, func() { + close(success) + }) defer tester.terminate() // Create a small enough block chain to download @@ -450,10 +420,15 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { tester.newPeer("peer", protocol, chain.blocks[1:]) // Synchronise with the peer and make sure all relevant data was retrieved - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + t.Fatalf("failed to beacon-sync chain: %v", err) + } + select { + case <-success: + assertOwnChain(t, tester, len(chain.blocks)) + case <-time.NewTimer(time.Second * 3).C: + t.Fatalf("Failed to sync chain in three seconds") } - assertOwnChain(t, tester, len(chain.blocks)) } // Tests that if a large batch of blocks are being downloaded, it is throttled @@ -479,7 +454,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Start a synchronisation concurrently errc := make(chan error, 1) go func() { - errc <- tester.sync("peer", nil, mode) + errc <- tester.downloader.BeaconSync(mode, testChainBase.blocks[len(testChainBase.blocks)-1].Header(), nil) }() // Iteratively take some blocks, always checking the retrieval count for { @@ -535,132 +510,17 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { } } -// Tests that simple synchronization against a forked chain works correctly. In -// this test common ancestor lookup should *not* be short circuited, and a full -// binary search should be executed. -func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) } -func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) } -func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) } - -func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81) - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("fork A", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("fork B", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainB.blocks)) -} - -// Tests that synchronising against a much shorter but much heavier fork works -// currently and is not dropped. -func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) } -func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) } -func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) } - -func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) - chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79) - tester.newPeer("light", protocol, chainA.blocks[1:]) - tester.newPeer("heavy", protocol, chainB.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("light", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("heavy", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainB.blocks)) -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head, ensuring that malicious peers cannot waste resources by feeding -// long dead chains. -func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) } -func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) } -func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) } - -func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA - chainB := testChainForkLightB - tester.newPeer("original", protocol, chainA.blocks[1:]) - tester.newPeer("rewriter", protocol, chainB.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head for short but heavy forks too. These are a bit special because they -// take different ancestor lookup paths. -func TestBoundedHeavyForkedSync68Full(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, FullSync) -} -func TestBoundedHeavyForkedSync68Snap(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync) -} -func TestBoundedHeavyForkedSync68Light(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, LightSync) -} - -func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create a long enough forked chain - chainA := testChainForkLightA - chainB := testChainForkHeavy - tester.newPeer("original", protocol, chainA.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:]) - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - // Tests that a canceled download wipes all previously accumulated state. func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) } func testCancel(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + complete := make(chan struct{}) + success := func() { + close(complete) + } + tester := newTesterWithNotification(t, success) defer tester.terminate() chain := testChainBase.shorten(MaxHeaderFetch) @@ -672,38 +532,16 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { t.Errorf("download queue not idle") } // Synchronise with the peer, but cancel afterwards - if err := tester.sync("peer", nil, mode); err != nil { + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } + <-complete tester.downloader.Cancel() if !tester.downloader.queue.Idle() { t.Errorf("download queue not idle") } } -// Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) } -func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) } -func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) } - -func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create various peers with various parts of the chain - targetPeers := 8 - chain := testChainBase.shorten(targetPeers * 100) - - for i := 0; i < targetPeers; i++ { - id := fmt.Sprintf("peer #%d", i) - tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:]) - } - if err := tester.sync("peer #0", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } @@ -711,7 +549,11 @@ func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) } func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + complete := make(chan struct{}) + success := func() { + close(complete) + } + tester := newTesterWithNotification(t, success) defer tester.terminate() // Create a small enough block chain to download @@ -720,9 +562,14 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Create peers of every type tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) - // Synchronise with the requested peer and make sure all blocks were retrieved - if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + t.Fatalf("failed to start beacon sync: #{err}") + } + select { + case <-complete: + break + case <-time.NewTimer(time.Second * 3).C: + t.Fatalf("Failed to sync chain in three seconds") } assertOwnChain(t, tester, len(chain.blocks)) @@ -742,7 +589,10 @@ func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.E func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) } func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + success := make(chan struct{}) + tester := newTesterWithNotification(t, func() { + close(success) + }) defer tester.terminate() // Create a block chain to download @@ -757,10 +607,19 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { tester.downloader.receiptFetchHook = func(headers []*types.Header) { receiptsHave.Add(int32(len(headers))) } - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("peer", nil, mode); err != nil { + + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } + select { + case <-success: + checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ + HighestBlock: uint64(len(chain.blocks) - 1), + CurrentBlock: uint64(len(chain.blocks) - 1), + }) + case <-time.NewTimer(time.Second * 3).C: + t.Fatalf("Failed to sync chain in three seconds") + } assertOwnChain(t, tester, len(chain.blocks)) // Validate the number of block bodies that should have been requested @@ -783,195 +642,6 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { } } -// Tests that headers are enqueued continuously, preventing malicious nodes from -// stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) } -func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) } -func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) } - -func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{} - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain.blocks[1:]) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - -// Tests that if requested headers are shifted (i.e. first is missing), the queue -// detects the invalid numbering. -func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) } -func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) } -func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) } - -func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Attempt a full sync with an attacker feeding shifted headers - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{} - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain.blocks[1:]) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - -// Tests that a peer advertising a high TD doesn't get to stall the downloader -// afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack68Full(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, FullSync) -} -func TestHighTDStarvationAttack68Snap(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, SnapSync) -} -func TestHighTDStarvationAttack68Light(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, LightSync) -} - -func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(1) - tester.newPeer("attack", protocol, chain.blocks[1:]) - if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) - } -} - -// Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) } - -func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { - // Define the disconnection requirement for individual hash fetch errors - tests := []struct { - result error - drop bool - }{ - {nil, false}, // Sync succeeded, all is well - {errBusy, false}, // Sync is already in progress, no problem - {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop - {errBadPeer, true}, // Peer was deemed bad for some reason, drop it - {errStallingPeer, true}, // Peer was detected to be stalling, drop it - {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it - {errNoPeers, false}, // No peers to download from, soft race, no issue - {errTimeout, true}, // No hashes received in due time, drop the peer - {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end - {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser - {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter - {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop - {errInvalidBody, false}, // A bad peer was detected, but not the sync origin - {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin - {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop - } - // Run the tests and check disconnection status - tester := newTester(t) - defer tester.terminate() - chain := testChainBase.shorten(1) - - for i, tt := range tests { - // Register a new peer and ensure its presence - id := fmt.Sprintf("test %d", i) - tester.newPeer(id, protocol, chain.blocks[1:]) - if _, ok := tester.peers[id]; !ok { - t.Fatalf("test %d: registered peer not found", i) - } - // Simulate a synchronisation and check the required result - tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } - - tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync) - if _, ok := tester.peers[id]; !ok != tt.drop { - t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) - } - } -} - -// Tests that synchronisation progress (origin block number, current block number -// and highest block number) is tracked and updated correctly. -func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } -func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } -func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } - -func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise half the blocks and check initial progress - tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("peer-half", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks)/2 - 1), - }) - progress <- struct{}{} - pending.Wait() - - // Synchronise all the blocks and check continuation progress - tester.newPeer("peer-full", protocol, chain.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("peer-full", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - StartingBlock: uint64(len(chain.blocks)/2 - 1), - CurrentBlock: uint64(len(chain.blocks)/2 - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(len(chain.blocks)/2 - 1), - CurrentBlock: uint64(len(chain.blocks) - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) -} - func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { // Mark this method as a helper to report errors at callsite, not in here t.Helper() @@ -982,296 +652,12 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync } } -// Tests that synchronisation progress (origin block number and highest block -// number) is tracked and updated correctly in case of a fork (or manual head -// revertal). -func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) } -func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) } -func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) } - -func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("fork A", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chainA.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - - // Simulate a successful sync above the fork - tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight - - // Synchronise with the second fork and check progress resets - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("fork B", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ - StartingBlock: uint64(len(testChainBase.blocks)) - 1, - CurrentBlock: uint64(len(chainA.blocks) - 1), - HighestBlock: uint64(len(chainB.blocks) - 1), - }) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(len(testChainBase.blocks)) - 1, - CurrentBlock: uint64(len(chainB.blocks) - 1), - HighestBlock: uint64(len(chainB.blocks) - 1), - }) -} - -// Tests that if synchronisation is aborted due to some failure, then the progress -// origin is not updated in the next sync cycle, as it should be considered the -// continuation of the previous sync and not a new instance. -func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) } -func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) } - -func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Attempt a full sync with a faulty peer - missing := len(chain.blocks)/2 - 1 - - faulter := tester.newPeer("faulty", protocol, chain.blocks[1:]) - faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} - - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("faulty", nil, mode); err == nil { - panic("succeeded faulty synchronisation") - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - afterFailedSync := tester.downloader.Progress() - - // Synchronise with a good peer and check that the progress origin remind the same - // after a failure - tester.newPeer("valid", protocol, chain.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", afterFailedSync) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(len(chain.blocks) - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) -} - -// Tests that if an attacker fakes a chain height, after the attack is detected, -// the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) } -func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) } - -func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Create and sync with an attacker that promises a higher chain than available. - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - numMissing := 5 - for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- { - attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} - } - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("attack", nil, mode); err == nil { - panic("succeeded attacker synchronisation") - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - afterFailedSync := tester.downloader.Progress() - - // Synchronise with a good peer and check that the progress height has been reduced to - // the true value. - validChain := chain.shorten(len(chain.blocks) - numMissing) - tester.newPeer("valid", protocol, validChain.blocks[1:]) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - CurrentBlock: afterFailedSync.CurrentBlock, - HighestBlock: uint64(len(validChain.blocks) - 1), - }) - // Check final progress after successful sync. - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(len(validChain.blocks) - 1), - HighestBlock: uint64(len(validChain.blocks) - 1), - }) -} - -func TestRemoteHeaderRequestSpan(t *testing.T) { - testCases := []struct { - remoteHeight uint64 - localHeight uint64 - expected []int - }{ - // Remote is way higher. We should ask for the remote head and go backwards - {1500, 1000, - []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, - }, - {15000, 13006, - []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, - }, - // Remote is pretty close to us. We don't have to fetch as many - {1200, 1150, - []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, - }, - // Remote is equal to us (so on a fork with higher td) - // We should get the closest couple of ancestors - {1500, 1500, - []int{1497, 1499}, - }, - // We're higher than the remote! Odd - {1000, 1500, - []int{997, 999}, - }, - // Check some weird edgecases that it behaves somewhat rationally - {0, 1500, - []int{0, 2}, - }, - {6000000, 0, - []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, - }, - {0, 0, - []int{0, 2}, - }, - } - reqs := func(from, count, span int) []int { - var r []int - num := from - for len(r) < count { - r = append(r, num) - num += span + 1 - } - return r - } - for i, tt := range testCases { - from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) - data := reqs(int(from), count, span) - - if max != uint64(data[len(data)-1]) { - t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) - } - failed := false - if len(data) != len(tt.expected) { - failed = true - t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) - } else { - for j, n := range data { - if n != tt.expected[j] { - failed = true - break - } - } - } - if failed { - res := strings.ReplaceAll(fmt.Sprint(data), " ", ",") - exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",") - t.Logf("got: %v\n", res) - t.Logf("exp: %v\n", exp) - t.Errorf("test %d: wrong values", i) - } - } -} - // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) } func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) } func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { - //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - var cases = []struct { name string // The name of testing scenario local int // The length of local chain(canonical chain assumed), 0 means genesis is the head @@ -1312,81 +698,67 @@ func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { } } -// Tests that synchronisation progress (origin block number and highest block -// number) is tracked and updated correctly in case of manual head reversion -func TestBeaconForkedSyncProgress68Full(t *testing.T) { - testBeaconForkedSyncProgress(t, eth.ETH68, FullSync) -} -func TestBeaconForkedSyncProgress68Snap(t *testing.T) { - testBeaconForkedSyncProgress(t, eth.ETH68, SnapSync) -} -func TestBeaconForkedSyncProgress68Light(t *testing.T) { - testBeaconForkedSyncProgress(t, eth.ETH68, LightSync) -} +// Tests that synchronisation progress (origin block number, current block number +// and highest block number) is tracked and updated correctly. +func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } +func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } +func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } -func testBeaconForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { +func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { success := make(chan struct{}) tester := newTesterWithNotification(t, func() { success <- struct{}{} }) defer tester.terminate() + checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) + chain := testChainBase.shorten(blockCacheMaxItems - 15) + shortChain := chain.shorten(len(chain.blocks) / 2).blocks[1:] - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress + // Connect to peer that provides all headers and part of the bodies + faultyPeer := tester.newPeer("peer-half", protocol, shortChain) + for _, header := range shortChain { + faultyPeer.withholdBodies[header.Hash()] = struct{}{} } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.downloader.BeaconSync(mode, chainA.blocks[len(chainA.blocks)-1].Header(), nil); err != nil { - panic(fmt.Sprintf("failed to beacon sync: %v", err)) - } - }() - <-starting - progress <- struct{}{} + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)/2-1].Header(), nil); err != nil { + t.Fatalf("failed to beacon-sync chain: %v", err) + } select { case <-success: - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chainA.blocks) - 1), - CurrentBlock: uint64(len(chainA.blocks) - 1), + // Ok, downloader fully cancelled after sync cycle + checkProgress(t, tester.downloader, "peer-half", ethereum.SyncProgress{ + CurrentBlock: uint64(len(chain.blocks)/2 - 1), + HighestBlock: uint64(len(chain.blocks)/2 - 1), }) case <-time.NewTimer(time.Second * 3).C: t.Fatalf("Failed to sync chain in three seconds") } - // Set the head to a second fork - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.downloader.BeaconSync(mode, chainB.blocks[len(chainB.blocks)-1].Header(), nil); err != nil { - panic(fmt.Sprintf("failed to beacon sync: %v", err)) - } - }() - - <-starting - progress <- struct{}{} + // Synchronise all the blocks and check continuation progress + tester.newPeer("peer-full", protocol, chain.blocks[1:]) + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + t.Fatalf("failed to beacon-sync chain: %v", err) + } + var startingBlock uint64 + if mode == LightSync { + // in light-sync mode: + // * the starting block is 0 on the second sync cycle because blocks + // are never downloaded. + // * The current/highest blocks reported in the progress reflect the + // current/highest header. + startingBlock = 0 + } else { + startingBlock = uint64(len(chain.blocks)/2 - 1) + } - // reorg below available state causes the state sync to rewind to genesis select { case <-success: - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chainB.blocks) - 1), - CurrentBlock: uint64(len(chainB.blocks) - 1), - StartingBlock: 0, + // Ok, downloader fully cancelled after sync cycle + checkProgress(t, tester.downloader, "peer-full", ethereum.SyncProgress{ + StartingBlock: startingBlock, + CurrentBlock: uint64(len(chain.blocks) - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) case <-time.NewTimer(time.Second * 3).C: t.Fatalf("Failed to sync chain in three seconds") diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go index cc4279b0da..4ebb9bbc98 100644 --- a/eth/downloader/fetchers.go +++ b/eth/downloader/fetchers.go @@ -68,48 +68,3 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil } } - -// fetchHeadersByNumber is a blocking version of Peer.RequestHeadersByNumber which -// handles all the cancellation, interruption and timeout mechanisms of a data -// retrieval to allow blocking API calls. -func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) { - // Create the response sink and send the network request - start := time.Now() - resCh := make(chan *eth.Response) - - req, err := p.peer.RequestHeadersByNumber(number, amount, skip, reverse, resCh) - if err != nil { - return nil, nil, err - } - defer req.Close() - - // Wait until the response arrives, the request is cancelled or times out - ttl := d.peers.rates.TargetTimeout() - - timeoutTimer := time.NewTimer(ttl) - defer timeoutTimer.Stop() - - select { - case <-d.cancelCh: - return nil, nil, errCanceled - - case <-timeoutTimer.C: - // Header retrieval timed out, update the metrics - p.log.Debug("Header request timed out", "elapsed", ttl) - headerTimeoutMeter.Mark(1) - - return nil, nil, errTimeout - - case res := <-resCh: - // Headers successfully retrieved, update the metrics - headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) - - // Don't reject the packet even if it turns out to be bad, downloader will - // disconnect the peer on its own terms. Simply delivery the headers to - // be processed by the caller - res.Done <- nil - - return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil - } -} diff --git a/eth/downloader/fetchers_concurrent.go b/eth/downloader/fetchers_concurrent.go index 649aa27615..9d8cd114c1 100644 --- a/eth/downloader/fetchers_concurrent.go +++ b/eth/downloader/fetchers_concurrent.go @@ -76,7 +76,7 @@ type typedQueue interface { // concurrentFetch iteratively downloads scheduled block parts, taking available // peers, reserving a chunk of fetch requests for each and waiting for delivery // or timeouts. -func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { +func (d *Downloader) concurrentFetch(queue typedQueue) error { // Create a delivery channel to accept responses from all peers responses := make(chan *eth.Response) @@ -126,10 +126,6 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { // Prepare the queue and fetch block parts until the block header fetcher's done finished := false for { - // Short circuit if we lost all our peers - if d.peers.Len() == 0 && !beaconMode { - return errNoPeers - } // If there's nothing more to fetch, wait or terminate if queue.pending() == 0 { if len(pending) == 0 && finished { @@ -158,27 +154,20 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { } sort.Sort(&peerCapacitySort{idles, caps}) - var ( - progressed bool - throttled bool - queued = queue.pending() - ) + var throttled bool for _, peer := range idles { // Short circuit if throttling activated or there are no more // queued tasks to be retrieved if throttled { break } - if queued = queue.pending(); queued == 0 { + if queued := queue.pending(); queued == 0 { break } // Reserve a chunk of fetches for a peer. A nil can mean either that // no more headers are available, or that the peer is known not to // have them. - request, progress, throttle := queue.reserve(peer, queue.capacity(peer, d.peers.rates.TargetRoundTrip())) - if progress { - progressed = true - } + request, _, throttle := queue.reserve(peer, queue.capacity(peer, d.peers.rates.TargetRoundTrip())) if throttle { throttled = true throttleCounter.Inc(1) @@ -207,11 +196,6 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { timeout.Reset(ttl) } } - // Make sure that we have peers available for fetching. If all peers have been tried - // and all failed throw an error - if !progressed && !throttled && len(pending) == 0 && len(idles) == d.peers.Len() && queued > 0 && !beaconMode { - return errPeersUnavailable - } } // Wait for something to happen select { @@ -315,16 +299,6 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { queue.updateCapacity(peer, 0, 0) } else { d.dropPeer(peer.id) - - // If this peer was the master peer, abort sync immediately - d.cancelLock.RLock() - master := peer.id == d.cancelPeer - d.cancelLock.RUnlock() - - if master { - d.cancel() - return errTimeout - } } case res := <-responses: diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index 5105fda66b..56359b33c9 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -78,7 +78,6 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan if q.bodyFetchHook != nil { q.bodyFetchHook(req.Headers) } - hashes := make([]common.Hash, 0, len(req.Headers)) for _, header := range req.Headers { hashes = append(hashes, header.Hash()) diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go deleted file mode 100644 index 8201f4ca74..0000000000 --- a/eth/downloader/fetchers_concurrent_headers.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/log" -) - -// headerQueue implements typedQueue and is a type adapter between the generic -// concurrent fetcher and the downloader. -type headerQueue Downloader - -// waker returns a notification channel that gets pinged in case more header -// fetches have been queued up, so the fetcher might assign it to idle peers. -func (q *headerQueue) waker() chan bool { - return q.queue.headerContCh -} - -// pending returns the number of headers that are currently queued for fetching -// by the concurrent downloader. -func (q *headerQueue) pending() int { - return q.queue.PendingHeaders() -} - -// capacity is responsible for calculating how many headers a particular peer is -// estimated to be able to retrieve within the allotted round trip time. -func (q *headerQueue) capacity(peer *peerConnection, rtt time.Duration) int { - return peer.HeaderCapacity(rtt) -} - -// updateCapacity is responsible for updating how many headers a particular peer -// is estimated to be able to retrieve in a unit time. -func (q *headerQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) { - peer.UpdateHeaderRate(items, span) -} - -// reserve is responsible for allocating a requested number of pending headers -// from the download queue to the specified peer. -func (q *headerQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) { - return q.queue.ReserveHeaders(peer, items), false, false -} - -// unreserve is responsible for removing the current header retrieval allocation -// assigned to a specific peer and placing it back into the pool to allow -// reassigning to some other peer. -func (q *headerQueue) unreserve(peer string) int { - fails := q.queue.ExpireHeaders(peer) - if fails > 2 { - log.Trace("Header delivery timed out", "peer", peer) - } else { - log.Debug("Header delivery stalling", "peer", peer) - } - return fails -} - -// request is responsible for converting a generic fetch request into a header -// one and sending it to the remote peer for fulfillment. -func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) { - peer.log.Trace("Requesting new batch of headers", "from", req.From) - return peer.peer.RequestHeadersByNumber(req.From, MaxHeaderFetch, 0, false, resCh) -} - -// deliver is responsible for taking a generic response packet from the concurrent -// fetcher, unpacking the header data and delivering it to the downloader's queue. -func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - headers := *packet.Res.(*eth.BlockHeadersRequest) - hashes := packet.Meta.([]common.Hash) - - accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) - switch { - case err == nil && len(headers) == 0: - peer.log.Trace("Requested headers delivered") - case err == nil: - peer.log.Trace("Delivered new batch of headers", "count", len(headers), "accepted", accepted) - default: - peer.log.Debug("Failed to deliver retrieved headers", "err", err) - } - return accepted, err -} diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go index 46f3febd8b..6043f51372 100644 --- a/eth/downloader/testchain_test.go +++ b/eth/downloader/testchain_test.go @@ -58,7 +58,6 @@ var pregenerated bool func init() { // Reduce some of the parameters to make the tester faster fullMaxForkAncestry = 10000 - lightMaxForkAncestry = 10000 blockCacheMaxItems = 1024 fsHeaderSafetyNet = 256 fsHeaderContCheck = 500 * time.Millisecond From f8820f170cba2744198a781fd92ddb1f91f1440f Mon Sep 17 00:00:00 2001 From: Bin <49082129+songzhibin97@users.noreply.github.com> Date: Tue, 30 Apr 2024 21:47:21 +0800 Subject: [PATCH 28/64] accounts, cmd/geth, core: close opened files (#29598) * fix: open file used up but not closed * feat: more same case * feat: accept conversation --- accounts/keystore/keystore.go | 7 +++---- accounts/scwallet/hub.go | 1 + cmd/geth/logging_test.go | 2 ++ core/mkalloc.go | 1 + 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 5c978cf0b4..df3dda60b6 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -312,11 +312,10 @@ func (ks *KeyStore) Unlock(a accounts.Account, passphrase string) error { // Lock removes the private key with the given address from memory. func (ks *KeyStore) Lock(addr common.Address) error { ks.mu.Lock() - if unl, found := ks.unlocked[addr]; found { - ks.mu.Unlock() + unl, found := ks.unlocked[addr] + ks.mu.Unlock() + if found { ks.expire(addr, unl, time.Duration(0)*time.Nanosecond) - } else { - ks.mu.Unlock() } return nil } diff --git a/accounts/scwallet/hub.go b/accounts/scwallet/hub.go index 5f1f369ca2..1b1899dc8e 100644 --- a/accounts/scwallet/hub.go +++ b/accounts/scwallet/hub.go @@ -95,6 +95,7 @@ func (hub *Hub) readPairings() error { } return err } + defer pairingFile.Close() pairingData, err := io.ReadAll(pairingFile) if err != nil { diff --git a/cmd/geth/logging_test.go b/cmd/geth/logging_test.go index b5ce03f4b8..f426b138bb 100644 --- a/cmd/geth/logging_test.go +++ b/cmd/geth/logging_test.go @@ -73,6 +73,7 @@ func testConsoleLogging(t *testing.T, format string, tStart, tEnd int) { if err != nil { t.Fatal(err) } + defer readFile.Close() wantLines := split(readFile) haveLines := split(bytes.NewBuffer(haveB)) for i, want := range wantLines { @@ -109,6 +110,7 @@ func TestJsonLogging(t *testing.T) { if err != nil { t.Fatal(err) } + defer readFile.Close() wantLines := split(readFile) haveLines := split(bytes.NewBuffer(haveB)) for i, wantLine := range wantLines { diff --git a/core/mkalloc.go b/core/mkalloc.go index 201c2fe7de..cc4955f038 100644 --- a/core/mkalloc.go +++ b/core/mkalloc.go @@ -101,6 +101,7 @@ func main() { if err != nil { panic(err) } + defer file.Close() if err := json.NewDecoder(file).Decode(g); err != nil { panic(err) } From 9f96e07c1cf87fdd4d044f95de9c1b5e0b85b47f Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 30 Apr 2024 22:25:35 +0800 Subject: [PATCH 29/64] core/rawdb, trie: improve db APIs for accessing trie nodes (#29362) * core/rawdb, trie: improve db APIs for accessing trie nodes * triedb/pathdb: fix --- cmd/devp2p/internal/ethtest/snap.go | 5 +- cmd/geth/dbcmd.go | 10 ++- core/genesis_test.go | 2 +- core/rawdb/accessors_trie.go | 125 +++++++++------------------- core/rawdb/ancient_utils.go | 7 +- eth/handler.go | 3 +- eth/protocols/snap/gentrie.go | 4 +- eth/protocols/snap/sync.go | 7 +- eth/protocols/snap/sync_test.go | 4 +- trie/hasher.go | 3 +- trie/stacktrie_fuzzer_test.go | 5 +- trie/sync.go | 34 ++++++-- trie/trie_test.go | 4 +- trie/trienode/node.go | 10 --- trie/triestate/state.go | 3 +- triedb/database.go | 9 +- triedb/database/database.go | 3 + triedb/hashdb/database.go | 5 -- triedb/pathdb/database.go | 12 ++- triedb/pathdb/database_test.go | 4 +- triedb/pathdb/difflayer_test.go | 8 +- triedb/pathdb/disklayer.go | 27 +++--- triedb/pathdb/journal.go | 18 ++-- triedb/pathdb/nodebuffer.go | 9 +- 24 files changed, 141 insertions(+), 180 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go index 8ff3f1f71a..4f1b6f8656 100644 --- a/cmd/devp2p/internal/ethtest/snap.go +++ b/cmd/devp2p/internal/ethtest/snap.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) func (c *Conn) snapRequest(code uint64, msg any) (any, error) { @@ -905,7 +904,7 @@ func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error { // that the serving node is missing var ( bytecodes = res.Codes - hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher = crypto.NewKeccakState() hash = make([]byte, 32) codes = make([][]byte, len(req.Hashes)) ) @@ -964,7 +963,7 @@ func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error { // Cross reference the requested trienodes with the response to find gaps // that the serving node is missing - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() hash := make([]byte, 32) trienodes := res.Nodes if got, want := len(trienodes), len(tc.expHashes); got != want { diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 4e91a4ff25..742eadd5f3 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -246,11 +246,17 @@ func removeDB(ctx *cli.Context) error { ancientDir = config.Node.ResolvePath(ancientDir) } // Delete state data - statePaths := []string{rootDir, filepath.Join(ancientDir, rawdb.StateFreezerName)} + statePaths := []string{ + rootDir, + filepath.Join(ancientDir, rawdb.StateFreezerName), + } confirmAndRemoveDB(statePaths, "state data", ctx, removeStateDataFlag.Name) // Delete ancient chain - chainPaths := []string{filepath.Join(ancientDir, rawdb.ChainFreezerName)} + chainPaths := []string{filepath.Join( + ancientDir, + rawdb.ChainFreezerName, + )} confirmAndRemoveDB(chainPaths, "ancient chain", ctx, removeChainDataFlag.Name) return nil } diff --git a/core/genesis_test.go b/core/genesis_test.go index 61be0bd252..31401e214c 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -322,7 +322,7 @@ func TestVerkleGenesisCommit(t *testing.T) { t.Fatalf("expected trie to be verkle") } - if !rawdb.ExistsAccountTrieNode(db, nil) { + if !rawdb.HasAccountTrieNode(db, nil) { t.Fatal("could not find node") } } diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index e34b24fd76..44eb715d04 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "golang.org/x/crypto/sha3" ) // HashScheme is the legacy hash-based state scheme with which trie nodes are @@ -50,7 +49,7 @@ const PathScheme = "path" type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: crypto.NewKeccakState()} }, } func newHasher() *hasher { @@ -65,33 +64,15 @@ func (h *hasher) release() { hasherPool.Put(h) } -// ReadAccountTrieNode retrieves the account trie node and the associated node -// hash with the specified node path. -func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) { - data, err := db.Get(accountTrieNodeKey(path)) - if err != nil { - return nil, common.Hash{} - } - h := newHasher() - defer h.release() - return data, h.hash(data) -} - -// HasAccountTrieNode checks the account trie node presence with the specified -// node path and the associated node hash. -func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool { - data, err := db.Get(accountTrieNodeKey(path)) - if err != nil { - return false - } - h := newHasher() - defer h.release() - return h.hash(data) == hash +// ReadAccountTrieNode retrieves the account trie node with the specified node path. +func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) []byte { + data, _ := db.Get(accountTrieNodeKey(path)) + return data } -// ExistsAccountTrieNode checks the presence of the account trie node with the +// HasAccountTrieNode checks the presence of the account trie node with the // specified node path, regardless of the node hash. -func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool { +func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool { has, err := db.Has(accountTrieNodeKey(path)) if err != nil { return false @@ -113,33 +94,15 @@ func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) { } } -// ReadStorageTrieNode retrieves the storage trie node and the associated node -// hash with the specified node path. -func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) { - data, err := db.Get(storageTrieNodeKey(accountHash, path)) - if err != nil { - return nil, common.Hash{} - } - h := newHasher() - defer h.release() - return data, h.hash(data) -} - -// HasStorageTrieNode checks the storage trie node presence with the provided -// node path and the associated node hash. -func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool { - data, err := db.Get(storageTrieNodeKey(accountHash, path)) - if err != nil { - return false - } - h := newHasher() - defer h.release() - return h.hash(data) == hash +// ReadStorageTrieNode retrieves the storage trie node with the specified node path. +func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) []byte { + data, _ := db.Get(storageTrieNodeKey(accountHash, path)) + return data } -// ExistsStorageTrieNode checks the presence of the storage trie node with the +// HasStorageTrieNode checks the presence of the storage trie node with the // specified account hash and node path, regardless of the node hash. -func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool { +func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool { has, err := db.Has(storageTrieNodeKey(accountHash, path)) if err != nil { return false @@ -198,10 +161,18 @@ func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash c case HashScheme: return HasLegacyTrieNode(db, hash) case PathScheme: + var blob []byte if owner == (common.Hash{}) { - return HasAccountTrieNode(db, path, hash) + blob = ReadAccountTrieNode(db, path) + } else { + blob = ReadStorageTrieNode(db, owner, path) } - return HasStorageTrieNode(db, owner, path, hash) + if len(blob) == 0 { + return false + } + h := newHasher() + defer h.release() + return h.hash(blob) == hash // exists but not match default: panic(fmt.Sprintf("Unknown scheme %v", scheme)) } @@ -209,43 +180,35 @@ func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash c // ReadTrieNode retrieves the trie node from database with the provided node info // and associated node hash. -// hashScheme-based lookup requires the following: -// - hash -// -// pathScheme-based lookup requires the following: -// - owner -// - path func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte { switch scheme { case HashScheme: return ReadLegacyTrieNode(db, hash) case PathScheme: - var ( - blob []byte - nHash common.Hash - ) + var blob []byte if owner == (common.Hash{}) { - blob, nHash = ReadAccountTrieNode(db, path) + blob = ReadAccountTrieNode(db, path) } else { - blob, nHash = ReadStorageTrieNode(db, owner, path) + blob = ReadStorageTrieNode(db, owner, path) } - if nHash != hash { + if len(blob) == 0 { return nil } + h := newHasher() + defer h.release() + if h.hash(blob) != hash { + return nil // exists but not match + } return blob default: panic(fmt.Sprintf("Unknown scheme %v", scheme)) } } -// WriteTrieNode writes the trie node into database with the provided node info -// and associated node hash. -// hashScheme-based lookup requires the following: -// - hash +// WriteTrieNode writes the trie node into database with the provided node info. // -// pathScheme-based lookup requires the following: -// - owner -// - path +// hash-scheme requires the node hash as the identifier. +// path-scheme requires the node owner and path as the identifier. func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) { switch scheme { case HashScheme: @@ -261,14 +224,10 @@ func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash } } -// DeleteTrieNode deletes the trie node from database with the provided node info -// and associated node hash. -// hashScheme-based lookup requires the following: -// - hash +// DeleteTrieNode deletes the trie node from database with the provided node info. // -// pathScheme-based lookup requires the following: -// - owner -// - path +// hash-scheme requires the node hash as the identifier. +// path-scheme requires the node owner and path as the identifier. func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) { switch scheme { case HashScheme: @@ -287,9 +246,8 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, has // ReadStateScheme reads the state scheme of persistent state, or none // if the state is not present in database. func ReadStateScheme(db ethdb.Reader) string { - // Check if state in path-based scheme is present - blob, _ := ReadAccountTrieNode(db, nil) - if len(blob) != 0 { + // Check if state in path-based scheme is present. + if HasAccountTrieNode(db, nil) { return PathScheme } // The root node might be deleted during the initial snap sync, check @@ -304,8 +262,7 @@ func ReadStateScheme(db ethdb.Reader) string { if header == nil { return "" // empty datadir } - blob = ReadLegacyTrieNode(db, header.Root) - if len(blob) == 0 { + if !HasLegacyTrieNode(db, header.Root) { return "" // no state in disk } return HashScheme diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index 428cda544b..1c69639c9d 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -89,20 +89,17 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { infos = append(infos, info) case StateFreezerName: - if ReadStateScheme(db) != PathScheme { - continue - } datadir, err := db.AncientDatadir() if err != nil { return nil, err } f, err := NewStateFreezer(datadir, true) if err != nil { - return nil, err + continue // might be possible the state freezer is not existent } defer f.Close() - info, err := inspect(StateFreezerName, stateFreezerNoSnappy, f) + info, err := inspect(freezer, stateFreezerNoSnappy, f) if err != nil { return nil, err } diff --git a/eth/handler.go b/eth/handler.go index c7c582af40..143ac2a8a5 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -42,7 +42,6 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/triedb/pathdb" - "golang.org/x/crypto/sha3" ) const ( @@ -480,7 +479,7 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { var ( signer = types.LatestSignerForChainID(h.chain.Config().ChainID) // Don't care about chain status, we just need *a* sender - hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher = crypto.NewKeccakState() hash = make([]byte, 32) ) for _, tx := range txs { diff --git a/eth/protocols/snap/gentrie.go b/eth/protocols/snap/gentrie.go index 81c2640b62..6255fb221d 100644 --- a/eth/protocols/snap/gentrie.go +++ b/eth/protocols/snap/gentrie.go @@ -164,7 +164,7 @@ func (t *pathTrie) deleteAccountNode(path []byte, inner bool) { } else { accountOuterLookupGauge.Inc(1) } - if !rawdb.ExistsAccountTrieNode(t.db, path) { + if !rawdb.HasAccountTrieNode(t.db, path) { return } if inner { @@ -181,7 +181,7 @@ func (t *pathTrie) deleteStorageNode(path []byte, inner bool) { } else { storageOuterLookupGauge.Inc(1) } - if !rawdb.ExistsStorageTrieNode(t.db, t.owner, path) { + if !rawdb.HasStorageTrieNode(t.db, t.owner, path) { return } if inner { diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index b0ddb8e403..ffda718700 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -42,7 +42,6 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) const ( @@ -2653,7 +2652,7 @@ func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error // Cross reference the requested bytecodes with the response to find gaps // that the serving node is missing - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() hash := make([]byte, 32) codes := make([][]byte, len(req.hashes)) @@ -2901,7 +2900,7 @@ func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error // Cross reference the requested trienodes with the response to find gaps // that the serving node is missing var ( - hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher = crypto.NewKeccakState() hash = make([]byte, 32) nodes = make([][]byte, len(req.hashes)) fills uint64 @@ -3007,7 +3006,7 @@ func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) e // Cross reference the requested bytecodes with the response to find gaps // that the serving node is missing - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() hash := make([]byte, 32) codes := make([][]byte, len(req.hashes)) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index f35babb731..5f6826373a 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -64,7 +64,7 @@ func TestHashing(t *testing.T) { } } var new = func() { - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() var hash = make([]byte, 32) for i := 0; i < len(bytecodes); i++ { hasher.Reset() @@ -96,7 +96,7 @@ func BenchmarkHashing(b *testing.B) { } } var new = func() { - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() var hash = make([]byte, 32) for i := 0; i < len(bytecodes); i++ { hasher.Reset() diff --git a/trie/hasher.go b/trie/hasher.go index 1e063d8020..abf654c709 100644 --- a/trie/hasher.go +++ b/trie/hasher.go @@ -21,7 +21,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" ) // hasher is a type used for the trie Hash operation. A hasher has some @@ -38,7 +37,7 @@ var hasherPool = sync.Pool{ New: func() interface{} { return &hasher{ tmp: make([]byte, 0, 550), // cap is as large as a full fullNode. - sha: sha3.NewLegacyKeccak256().(crypto.KeccakState), + sha: crypto.NewKeccakState(), encbuf: rlp.NewEncoderBuffer(nil), } }, diff --git a/trie/stacktrie_fuzzer_test.go b/trie/stacktrie_fuzzer_test.go index 5126e0bd07..418b941d94 100644 --- a/trie/stacktrie_fuzzer_test.go +++ b/trie/stacktrie_fuzzer_test.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) func FuzzStackTrie(f *testing.F) { @@ -41,10 +40,10 @@ func fuzz(data []byte, debugging bool) { // This spongeDb is used to check the sequence of disk-db-writes var ( input = bytes.NewReader(data) - spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()} + spongeA = &spongeDb{sponge: crypto.NewKeccakState()} dbA = newTestDatabase(rawdb.NewDatabase(spongeA), rawdb.HashScheme) trieA = NewEmpty(dbA) - spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()} + spongeB = &spongeDb{sponge: crypto.NewKeccakState()} dbB = newTestDatabase(rawdb.NewDatabase(spongeB), rawdb.HashScheme) trieB = NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme()) diff --git a/trie/sync.go b/trie/sync.go index 589d28364b..f6b20b2240 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -546,9 +547,9 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { // the performance impact negligible. var exists bool if owner == (common.Hash{}) { - exists = rawdb.ExistsAccountTrieNode(s.database, append(inner, key[:i]...)) + exists = rawdb.HasAccountTrieNode(s.database, append(inner, key[:i]...)) } else { - exists = rawdb.ExistsStorageTrieNode(s.database, owner, append(inner, key[:i]...)) + exists = rawdb.HasStorageTrieNode(s.database, owner, append(inner, key[:i]...)) } if exists { s.membatch.delNode(owner, append(inner, key[:i]...)) @@ -691,13 +692,14 @@ func (s *Sync) hasNode(owner common.Hash, path []byte, hash common.Hash) (exists } // If node is running with path scheme, check the presence with node path. var blob []byte - var dbHash common.Hash if owner == (common.Hash{}) { - blob, dbHash = rawdb.ReadAccountTrieNode(s.database, path) + blob = rawdb.ReadAccountTrieNode(s.database, path) } else { - blob, dbHash = rawdb.ReadStorageTrieNode(s.database, owner, path) + blob = rawdb.ReadStorageTrieNode(s.database, owner, path) } - exists = hash == dbHash + h := newBlobHasher() + defer h.release() + exists = hash == h.hash(blob) inconsistent = !exists && len(blob) != 0 return exists, inconsistent } @@ -712,3 +714,23 @@ func ResolvePath(path []byte) (common.Hash, []byte) { } return owner, path } + +// blobHasher is used to compute the sha256 hash of the provided data. +type blobHasher struct{ state crypto.KeccakState } + +// blobHasherPool is the pool for reusing pre-allocated hash state. +var blobHasherPool = sync.Pool{ + New: func() interface{} { return &blobHasher{state: crypto.NewKeccakState()} }, +} + +func newBlobHasher() *blobHasher { + return blobHasherPool.Get().(*blobHasher) +} + +func (h *blobHasher) hash(data []byte) common.Hash { + return crypto.HashData(h.state, data) +} + +func (h *blobHasher) release() { + blobHasherPool.Put(h) +} diff --git a/trie/trie_test.go b/trie/trie_test.go index 6ecd20c218..da60a7423d 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -886,7 +886,7 @@ func TestCommitSequence(t *testing.T) { } { addresses, accounts := makeAccounts(tc.count) // This spongeDb is used to check the sequence of disk-db-writes - s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} + s := &spongeDb{sponge: crypto.NewKeccakState()} db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme) trie := NewEmpty(db) // Fill the trie with elements @@ -917,7 +917,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) { } { prng := rand.New(rand.NewSource(int64(i))) // This spongeDb is used to check the sequence of disk-db-writes - s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} + s := &spongeDb{sponge: crypto.NewKeccakState()} db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme) trie := NewEmpty(db) // Fill the trie with elements diff --git a/trie/trienode/node.go b/trie/trienode/node.go index bc93e3ca88..aa8a0f6d99 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -135,16 +135,6 @@ func (set *NodeSet) Size() (int, int) { return set.updates, set.deletes } -// Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can -// we get rid of it? -func (set *NodeSet) Hashes() []common.Hash { - ret := make([]common.Hash, 0, len(set.Nodes)) - for _, node := range set.Nodes { - ret = append(ret, node.Hash) - } - return ret -} - // Summary returns a string-representation of the NodeSet. func (set *NodeSet) Summary() string { var out = new(strings.Builder) diff --git a/trie/triestate/state.go b/trie/triestate/state.go index aa4d32f852..9db9211e8c 100644 --- a/trie/triestate/state.go +++ b/trie/triestate/state.go @@ -26,7 +26,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) // Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia @@ -257,7 +256,7 @@ func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error { type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: crypto.NewKeccakState()} }, } func newHasher() *hasher { diff --git a/triedb/database.go b/triedb/database.go index 261a47dcc2..10f77982f3 100644 --- a/triedb/database.go +++ b/triedb/database.go @@ -20,6 +20,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie" @@ -48,9 +49,6 @@ var HashDefaults = &Config{ // backend defines the methods needed to access/update trie nodes in different // state scheme. type backend interface { - // Scheme returns the identifier of used storage scheme. - Scheme() string - // Initialized returns an indicator if the state data is already initialized // according to the state scheme. Initialized(genesisRoot common.Hash) bool @@ -181,7 +179,10 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool { // Scheme returns the node scheme used in the database. func (db *Database) Scheme() string { - return db.backend.Scheme() + if db.config.PathDB != nil { + return rawdb.PathScheme + } + return rawdb.HashScheme } // Close flushes the dangling preimages to disk and closes the trie database. diff --git a/triedb/database/database.go b/triedb/database/database.go index 18a8f454e2..f11c7e9bbd 100644 --- a/triedb/database/database.go +++ b/triedb/database/database.go @@ -25,6 +25,9 @@ type Reader interface { // Node retrieves the trie node blob with the provided trie identifier, // node path and the corresponding node hash. No error will be returned // if the node is not found. + // + // Don't modify the returned byte slice since it's not deep-copied and + // still be referenced by database. Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) } diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go index 7d5499eb69..ebb5d72057 100644 --- a/triedb/hashdb/database.go +++ b/triedb/hashdb/database.go @@ -623,11 +623,6 @@ func (db *Database) Close() error { return nil } -// Scheme returns the node scheme used in the database. -func (db *Database) Scheme() string { - return rawdb.HashScheme -} - // Reader retrieves a node reader belonging to the given state root. // An error will be returned if the requested state is not available. func (db *Database) Reader(root common.Hash) (*reader, error) { diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 50beebced1..05a28aa1ef 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -306,8 +307,10 @@ func (db *Database) Enable(root common.Hash) error { } // Ensure the provided state root matches the stored one. root = types.TrieRootHash(root) - _, stored := rawdb.ReadAccountTrieNode(db.diskdb, nil) - stored = types.TrieRootHash(stored) + stored := types.EmptyRootHash + if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 { + stored = crypto.Keccak256Hash(blob) + } if stored != root { return fmt.Errorf("state root mismatch: stored %x, synced %x", stored, root) } @@ -480,11 +483,6 @@ func (db *Database) SetBufferSize(size int) error { return db.tree.bottom().setBufferSize(db.bufferSize) } -// Scheme returns the node scheme used in the database. -func (db *Database) Scheme() string { - return rawdb.PathScheme -} - // modifyAllowed returns the indicator if mutation is allowed. This function // assumes the db.lock is already held. func (db *Database) modifyAllowed() error { diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go index 29de534589..7b24082315 100644 --- a/triedb/pathdb/database_test.go +++ b/triedb/pathdb/database_test.go @@ -474,7 +474,7 @@ func TestDisable(t *testing.T) { tester := newTester(t, 0) defer tester.release() - _, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) + stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)) if err := tester.db.Disable(); err != nil { t.Fatalf("Failed to deactivate database: %v", err) } @@ -580,7 +580,7 @@ func TestCorruptedJournal(t *testing.T) { t.Errorf("Failed to journal, err: %v", err) } tester.db.Close() - _, root := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) + root := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)) // Mutate the journal in disk, it should be regarded as invalid blob := rawdb.ReadTrieJournal(tester.db.diskdb) diff --git a/triedb/pathdb/difflayer_test.go b/triedb/pathdb/difflayer_test.go index bf4c6502ef..1e93a3f892 100644 --- a/triedb/pathdb/difflayer_test.go +++ b/triedb/pathdb/difflayer_test.go @@ -70,10 +70,10 @@ func benchmarkSearch(b *testing.B, depth int, total int) { blob = testrand.Bytes(100) node = trienode.New(crypto.Keccak256Hash(blob), blob) ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + nodes[common.Hash{}][string(path)] = node if npath == nil && depth == index { npath = common.CopyBytes(path) - nblob = common.CopyBytes(node.Blob) + nblob = common.CopyBytes(blob) } } return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) @@ -116,7 +116,7 @@ func BenchmarkPersist(b *testing.B) { blob = testrand.Bytes(100) node = trienode.New(crypto.Keccak256Hash(blob), blob) ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + nodes[common.Hash{}][string(path)] = node } return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) } @@ -154,7 +154,7 @@ func BenchmarkJournal(b *testing.B) { blob = testrand.Bytes(100) node = trienode.New(crypto.Keccak256Hash(blob), blob) ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + nodes[common.Hash{}][string(path)] = node } // TODO(rjl493456442) a non-nil state set is expected. return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go index ec7c91bcac..964ad2ef77 100644 --- a/triedb/pathdb/disklayer.go +++ b/triedb/pathdb/disklayer.go @@ -27,7 +27,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" - "golang.org/x/crypto/sha3" ) // diskLayer is a low level persistent layer built on top of a key-value store. @@ -117,12 +116,12 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co dirtyMissMeter.Mark(1) // Try to retrieve the trie node from the clean memory cache + h := newHasher() + defer h.release() + key := cacheKey(owner, path) if dl.cleans != nil { if blob := dl.cleans.Get(nil, key); len(blob) > 0 { - h := newHasher() - defer h.release() - cleanHitMeter.Mark(1) cleanReadMeter.Mark(int64(len(blob))) return blob, h.hash(blob), &nodeLoc{loc: locCleanCache, depth: depth}, nil @@ -130,20 +129,18 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co cleanMissMeter.Mark(1) } // Try to retrieve the trie node from the disk. - var ( - nBlob []byte - nHash common.Hash - ) + var blob []byte if owner == (common.Hash{}) { - nBlob, nHash = rawdb.ReadAccountTrieNode(dl.db.diskdb, path) + blob = rawdb.ReadAccountTrieNode(dl.db.diskdb, path) } else { - nBlob, nHash = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path) + blob = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path) } - if dl.cleans != nil && len(nBlob) > 0 { - dl.cleans.Set(key, nBlob) - cleanWriteMeter.Mark(int64(len(nBlob))) + if dl.cleans != nil && len(blob) > 0 { + dl.cleans.Set(key, blob) + cleanWriteMeter.Mark(int64(len(blob))) } - return nBlob, nHash, &nodeLoc{loc: locDiskLayer, depth: depth}, nil + + return blob, h.hash(blob), &nodeLoc{loc: locDiskLayer, depth: depth}, nil } // update implements the layer interface, returning a new diff layer on top @@ -303,7 +300,7 @@ func (dl *diskLayer) resetCache() { type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: crypto.NewKeccakState()} }, } func newHasher() *hasher { diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go index 3a0b7ebae2..1740ec5935 100644 --- a/triedb/pathdb/journal.go +++ b/triedb/pathdb/journal.go @@ -120,9 +120,10 @@ func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) { // loadLayers loads a pre-existing state layer backed by a key-value store. func (db *Database) loadLayers() layer { // Retrieve the root node of persistent state. - _, root := rawdb.ReadAccountTrieNode(db.diskdb, nil) - root = types.TrieRootHash(root) - + var root = types.EmptyRootHash + if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 { + root = crypto.Keccak256Hash(blob) + } // Load the layers by resolving the journal head, err := db.loadJournal(root) if err == nil { @@ -361,14 +362,13 @@ func (db *Database) Journal(root common.Hash) error { if err := rlp.Encode(journal, journalVersion); err != nil { return err } - // The stored state in disk might be empty, convert the - // root to emptyRoot in this case. - _, diskroot := rawdb.ReadAccountTrieNode(db.diskdb, nil) - diskroot = types.TrieRootHash(diskroot) - // Secondly write out the state root in disk, ensure all layers // on top are continuous with disk. - if err := rlp.Encode(journal, diskroot); err != nil { + diskRoot := types.EmptyRootHash + if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 { + diskRoot = crypto.Keccak256Hash(blob) + } + if err := rlp.Encode(journal, diskRoot); err != nil { return err } // Finally write out the journal of each layer in reverse order. diff --git a/triedb/pathdb/nodebuffer.go b/triedb/pathdb/nodebuffer.go index 4a13fcc44e..5675f14123 100644 --- a/triedb/pathdb/nodebuffer.go +++ b/triedb/pathdb/nodebuffer.go @@ -17,6 +17,7 @@ package pathdb import ( + "bytes" "fmt" "time" @@ -148,14 +149,14 @@ func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[s // // In case of database rollback, don't panic if this "clean" // node occurs which is not present in buffer. - var nhash common.Hash + var blob []byte if owner == (common.Hash{}) { - _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path)) + blob = rawdb.ReadAccountTrieNode(db, []byte(path)) } else { - _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) + blob = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) } // Ignore the clean node in the case described above. - if nhash == n.Hash { + if bytes.Equal(blob, n.Blob) { continue } panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex())) From 682ee820fa116b8a081d269c9d155ec19411959b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 2 May 2024 11:18:27 +0300 Subject: [PATCH 30/64] core/state: parallelise parts of state commit (#29681) * core/state, internal/workerpool: parallelize parts of state commit * core, internal: move workerpool into syncx * core/state: use errgroups, commit accounts concurrently * core: resurrect detailed commit timers to almost-accuracy --- core/blockchain.go | 2 +- core/state/state_object.go | 3 + core/state/statedb.go | 126 +++++++++++++++++++++++++------------ 3 files changed, 89 insertions(+), 42 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 9de4baccca..654b4fbdca 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1963,7 +1963,7 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them - blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits) + blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits) blockInsertTimer.UpdateSince(start) return &blockProcessingResult{usedGas: usedGas, procTime: proctime, status: status}, nil diff --git a/core/state/state_object.go b/core/state/state_object.go index 1454f7a459..d75ba01376 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -403,6 +403,9 @@ func (s *stateObject) updateRoot() { // commit obtains a set of dirty storage trie nodes and updates the account data. // The returned set can be nil if nothing to commit. This function assumes all // storage mutations have already been flushed into trie by updateRoot. +// +// Note, commit may run concurrently across all the state objects. Do not assume +// thread-safe access to the statedb. func (s *stateObject) commit() (*trienode.NodeSet, error) { // Short circuit if trie is not even loaded, don't bother with committing anything if s.trie == nil { diff --git a/core/state/statedb.go b/core/state/statedb.go index 6d9cc907e0..66cfc8f05a 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -23,6 +23,7 @@ import ( "math/big" "slices" "sort" + "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -37,6 +38,7 @@ import ( "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" "github.com/holiman/uint256" + "golang.org/x/sync/errgroup" ) type revision struct { @@ -1146,66 +1148,108 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er storageTrieNodesUpdated int storageTrieNodesDeleted int nodes = trienode.NewMergedNodeSet() - codeWriter = s.db.DiskDB().NewBatch() ) // Handle all state deletions first if err := s.handleDestruction(nodes); err != nil { return common.Hash{}, err } - // Handle all state updates afterwards + // Handle all state updates afterwards, concurrently to one another to shave + // off some milliseconds from the commit operation. Also accumulate the code + // writes to run in parallel with the computations. start := time.Now() + var ( + code = s.db.DiskDB().NewBatch() + lock sync.Mutex + root common.Hash + workers errgroup.Group + ) + // Schedule the account trie first since that will be the biggest, so give + // it the most time to crunch. + // + // TODO(karalabe): This account trie commit is *very* heavy. 5-6ms at chain + // heads, which seems excessive given that it doesn't do hashing, it just + // shuffles some data. For comparison, the *hashing* at chain head is 2-3ms. + // We need to investigate what's happening as it seems something's wonky. + // Obviously it's not an end of the world issue, just something the original + // code didn't anticipate for. + workers.Go(func() error { + // Write the account trie changes, measuring the amount of wasted time + newroot, set, err := s.trie.Commit(true) + if err != nil { + return err + } + root = newroot + + // Merge the dirty nodes of account trie into global set + lock.Lock() + defer lock.Unlock() + + if set != nil { + if err = nodes.Merge(set); err != nil { + return err + } + accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() + } + s.AccountCommits = time.Since(start) + return nil + }) + // Schedule each of the storage tries that need to be updated, so they can + // run concurrently to one another. + // + // TODO(karalabe): Experimentally, the account commit takes approximately the + // same time as all the storage commits combined, so we could maybe only have + // 2 threads in total. But that kind of depends on the account commit being + // more expensive than it should be, so let's fix that and revisit this todo. for addr, op := range s.mutations { if op.isDelete() { continue } - obj := s.stateObjects[addr] - // Write any contract code associated with the state object + obj := s.stateObjects[addr] if obj.code != nil && obj.dirtyCode { - rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) + rawdb.WriteCode(code, common.BytesToHash(obj.CodeHash()), obj.code) obj.dirtyCode = false } - // Write any storage changes in the state object to its storage trie - set, err := obj.commit() - if err != nil { - return common.Hash{}, err - } - // Merge the dirty nodes of storage trie into global set. It is possible - // that the account was destructed and then resurrected in the same block. - // In this case, the node set is shared by both accounts. - if set != nil { - if err := nodes.Merge(set); err != nil { - return common.Hash{}, err + // Run the storage updates concurrently to one another + workers.Go(func() error { + // Write any storage changes in the state object to its storage trie + set, err := obj.commit() + if err != nil { + return err } - updates, deleted := set.Size() - storageTrieNodesUpdated += updates - storageTrieNodesDeleted += deleted - } + // Merge the dirty nodes of storage trie into global set. It is possible + // that the account was destructed and then resurrected in the same block. + // In this case, the node set is shared by both accounts. + lock.Lock() + defer lock.Unlock() + + if set != nil { + if err = nodes.Merge(set); err != nil { + return err + } + updates, deleted := set.Size() + storageTrieNodesUpdated += updates + storageTrieNodesDeleted += deleted + } + s.StorageCommits = time.Since(start) // overwrite with the longest storage commit runtime + return nil + }) } - s.StorageCommits += time.Since(start) - - if codeWriter.ValueSize() > 0 { - if err := codeWriter.Write(); err != nil { - log.Crit("Failed to commit dirty codes", "error", err) + // Schedule the code commits to run concurrently too. This shouldn't really + // take much since we don't often commit code, but since it's disk access, + // it's always yolo. + workers.Go(func() error { + if code.ValueSize() > 0 { + if err := code.Write(); err != nil { + log.Crit("Failed to commit dirty codes", "error", err) + } } - } - // Write the account trie changes, measuring the amount of wasted time - start = time.Now() - - root, set, err := s.trie.Commit(true) - if err != nil { + return nil + }) + // Wait for everything to finish and update the metrics + if err := workers.Wait(); err != nil { return common.Hash{}, err } - // Merge the dirty nodes of account trie into global set - if set != nil { - if err := nodes.Merge(set); err != nil { - return common.Hash{}, err - } - accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() - } - // Report the commit metrics - s.AccountCommits += time.Since(start) - accountUpdatedMeter.Mark(int64(s.AccountUpdated)) storageUpdatedMeter.Mark(int64(s.StorageUpdated)) accountDeletedMeter.Mark(int64(s.AccountDeleted)) From bc609e852afd1c10b0dc9e677d8c37cfde17fc04 Mon Sep 17 00:00:00 2001 From: Aaron Chen Date: Thu, 2 May 2024 16:18:59 +0800 Subject: [PATCH 31/64] core/vm: remove redundant error checks (#29692) --- core/vm/contracts.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 527d9f4f47..8b648062e9 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -1123,9 +1123,6 @@ func (c *bls12381MapG1) Run(input []byte) ([]byte, error) { // Compute mapping r := bls12381.MapToG1(fe) - if err != nil { - return nil, err - } // Encode the G1 point to 128 bytes return encodePointG1(&r), nil @@ -1159,9 +1156,6 @@ func (c *bls12381MapG2) Run(input []byte) ([]byte, error) { // Compute mapping r := bls12381.MapToG2(bls12381.E2{A0: c0, A1: c1}) - if err != nil { - return nil, err - } // Encode the G2 point to 256 bytes return encodePointG2(&r), nil From fbf6238ae9c4ee61d1a5f60b523763e75e11d07b Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 2 May 2024 16:21:11 +0800 Subject: [PATCH 32/64] params: fix misleading comments (#29684) --- params/config.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/params/config.go b/params/config.go index 62acaddaad..534e57831a 100644 --- a/params/config.go +++ b/params/config.go @@ -566,17 +566,17 @@ func (c *ChainConfig) IsShanghai(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.ShanghaiTime, time) } -// IsCancun returns whether num is either equal to the Cancun fork time or greater. +// IsCancun returns whether time is either equal to the Cancun fork time or greater. func (c *ChainConfig) IsCancun(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.CancunTime, time) } -// IsPrague returns whether num is either equal to the Prague fork time or greater. +// IsPrague returns whether time is either equal to the Prague fork time or greater. func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.PragueTime, time) } -// IsVerkle returns whether num is either equal to the Verkle fork time or greater. +// IsVerkle returns whether time is either equal to the Verkle fork time or greater. func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) } From 2c67fab0d7cb95319c111e150a7ac857819e2a74 Mon Sep 17 00:00:00 2001 From: maskpp Date: Thu, 2 May 2024 17:35:45 +0800 Subject: [PATCH 33/64] trie/pathdb: preallocate map capacity (#29690) * preallocated capacity for map's certain usege of memory * preallocated capacity for map's certain usege of memory --- triedb/pathdb/nodebuffer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/triedb/pathdb/nodebuffer.go b/triedb/pathdb/nodebuffer.go index 5675f14123..ff09484100 100644 --- a/triedb/pathdb/nodebuffer.go +++ b/triedb/pathdb/nodebuffer.go @@ -90,7 +90,7 @@ func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *no // The nodes belong to original diff layer are still accessible even // after merging, thus the ownership of nodes map should still belong // to original layer and any mutation on it should be prevented. - current = make(map[string]*trienode.Node) + current = make(map[string]*trienode.Node, len(subset)) for path, n := range subset { current[path] = n delta += int64(len(n.Blob) + len(path)) From 86a1f0c39494c8f5caddf6bd9fbddd4bdfa944fd Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 2 May 2024 18:26:07 +0800 Subject: [PATCH 34/64] core/rawdb: fix ancient root folder (#29697) --- core/rawdb/database.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 0a9f6f73c7..3436958de7 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -197,10 +197,11 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // Create the idle freezer instance. If the given ancient directory is empty, // in-memory chain freezer is used (e.g. dev mode); otherwise the regular // file-based freezer is created. - if ancient != "" { - ancient = resolveChainFreezerDir(ancient) + chainFreezerDir := ancient + if chainFreezerDir != "" { + chainFreezerDir = resolveChainFreezerDir(chainFreezerDir) } - frdb, err := newChainFreezer(ancient, namespace, readonly) + frdb, err := newChainFreezer(chainFreezerDir, namespace, readonly) if err != nil { printChainMetadata(db) return nil, err From 905e325cd85c55d6408373f7bf20ea7c525f6900 Mon Sep 17 00:00:00 2001 From: Kiarash Hajian <133909368+kiarash8112@users.noreply.github.com> Date: Mon, 6 May 2024 07:17:19 -0400 Subject: [PATCH 35/64] p2p/discover/v5wire: add tests for invalid handshake and auth data size (#29708) --- p2p/discover/v5wire/encoding_test.go | 36 +++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go index a5387311a5..27966f2afc 100644 --- a/p2p/discover/v5wire/encoding_test.go +++ b/p2p/discover/v5wire/encoding_test.go @@ -30,6 +30,7 @@ import ( "testing" "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/crypto" @@ -283,9 +284,38 @@ func TestDecodeErrorsV5(t *testing.T) { b = make([]byte, 63) net.nodeA.expectDecodeErr(t, errInvalidHeader, b) - // TODO some more tests would be nice :) - // - check invalid authdata sizes - // - check invalid handshake data sizes + t.Run("invalid-handshake-datasize", func(t *testing.T) { + requiredNumber := 108 + + testDataFile := filepath.Join("testdata", "v5.1-ping-handshake"+".txt") + enc := hexFile(testDataFile) + //delete some byte from handshake to make it invalid + enc = enc[:len(enc)-requiredNumber] + net.nodeB.expectDecodeErr(t, errMsgTooShort, enc) + }) + + t.Run("invalid-auth-datasize", func(t *testing.T) { + testPacket := []byte{} + testDataFiles := []string{"v5.1-whoareyou", "v5.1-ping-handshake"} + for counter, name := range testDataFiles { + file := filepath.Join("testdata", name+".txt") + enc := hexFile(file) + if counter == 0 { + //make whoareyou header + testPacket = enc[:sizeofStaticPacketData-1] + testPacket = append(testPacket, 255) + } + if counter == 1 { + //append invalid auth size + testPacket = append(testPacket, enc[sizeofStaticPacketData:]...) + } + } + + wantErr := "invalid auth size" + if _, err := net.nodeB.decode(testPacket); strings.HasSuffix(err.Error(), wantErr) { + t.Fatal(fmt.Errorf("(%s) got err %q, want %q", net.nodeB.ln.ID().TerminalString(), err, wantErr)) + } + }) } // This test checks that all test vectors can be decoded. From a09a6103846544a20dfccbe16532d2843f277c5f Mon Sep 17 00:00:00 2001 From: Matthieu Vachon Date: Mon, 6 May 2024 07:21:55 -0400 Subject: [PATCH 36/64] core/tracing: add system call callback when performing `ProcessBeaconBlockRoot` (#29355) Added a start/end system where tracer can be notified that processing of some Ethereum system calls is starting processing and also notifies it when the processing has completed. Doing a start/end for system call will enable tracers to "route" incoming next tracing events to go to a separate bucket than other EVM calls. Those not interested by this fact can simply avoid registering the hooks. The EVM call is going to be traced normally afterward between the signals provided by those 2 new hooks but outside of a transaction context OnTxStart/End. That something implementors of live tracers will need to be aware of (since only "trx tracers" are not concerned by ProcessBeaconRoot). --------- Co-authored-by: Sina Mahmoodi --- core/state_processor.go | 7 +++++++ core/tracing/CHANGELOG.md | 12 +++++++++++- core/tracing/hooks.go | 38 ++++++++++++++++++++++++++++++++------ 3 files changed, 50 insertions(+), 7 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index b1a8938f67..7166ed8bd8 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -186,6 +186,13 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo // ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root // contract. This method is exported to be used in tests. func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) { + if vmenv.Config.Tracer != nil && vmenv.Config.Tracer.OnSystemCallStart != nil { + vmenv.Config.Tracer.OnSystemCallStart() + } + if vmenv.Config.Tracer != nil && vmenv.Config.Tracer.OnSystemCallEnd != nil { + defer vmenv.Config.Tracer.OnSystemCallEnd() + } + // If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with // the new root msg := &Message{ diff --git a/core/tracing/CHANGELOG.md b/core/tracing/CHANGELOG.md index 77eda4ad76..93b91cf479 100644 --- a/core/tracing/CHANGELOG.md +++ b/core/tracing/CHANGELOG.md @@ -4,6 +4,15 @@ All notable changes to the tracing interface will be documented in this file. ## [Unreleased] +There have been minor backwards-compatible changes to the tracing interface to explicitly mark the execution of **system** contracts. As of now the only system call updates the parent beacon block root as per [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788). Other system calls are being considered for the future hardfork. + +### New methods + +- `OnSystemCallStart()`: This hook is called when EVM starts processing a system call. Note system calls happen outside the scope of a transaction. This event will be followed by normal EVM execution events. +- `OnSystemCallEnd()`: This hook is called when EVM finishes processing a system call. + +## [v1.14.0] + There has been a major breaking change in the tracing interface for custom native tracers. JS and built-in tracers are not affected by this change and tracing API methods may be used as before. This overhaul has been done as part of the new live tracing feature ([#29189](https://github.com/ethereum/go-ethereum/pull/29189)). To learn more about live tracing please refer to the [docs](https://geth.ethereum.org/docs/developers/evm-tracing/live-tracing). **The `EVMLogger` interface which the tracers implemented has been removed.** It has been replaced by a new struct `tracing.Hooks`. `Hooks` keeps pointers to event listening functions. Internally the EVM will use these function pointers to emit events and can skip an event if the tracer has opted not to implement it. In fact this is the main reason for this change of approach. Another benefit is the ease of adding new hooks in future, and dynamically assigning event receivers. @@ -66,4 +75,5 @@ The hooks `CaptureStart` and `CaptureEnd` have been removed. These hooks signale - `CaptureState` -> `OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error)`. `op` is of type `byte` which can be cast to `vm.OpCode` when necessary. A `*vm.ScopeContext` is not passed anymore. It is replaced by `tracing.OpContext` which offers access to the memory, stack and current contract. - `CaptureFault` -> `OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error)`. Similar to above. -[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.13.14...master \ No newline at end of file +[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.14.0...master +[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0 \ No newline at end of file diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go index 9ca6ee39fb..41bae63d9f 100644 --- a/core/tracing/hooks.go +++ b/core/tracing/hooks.go @@ -81,6 +81,10 @@ type ( TxEndHook = func(receipt *types.Receipt, err error) // EnterHook is invoked when the processing of a message starts. + // + // Take note that EnterHook, when in the context of a live tracer, can be invoked + // outside of the `OnTxStart` and `OnTxEnd` hooks when dealing with system calls, + // see [OnSystemCallStartHook] and [OnSystemCallEndHook] for more information. EnterHook = func(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) // ExitHook is invoked when the processing of a message ends. @@ -89,6 +93,10 @@ type ( // ran out of gas when attempting to persist the code to database did not // count as a call failure and did not cause a revert of the call. This will // be indicated by `reverted == false` and `err == ErrCodeStoreOutOfGas`. + // + // Take note that ExitHook, when in the context of a live tracer, can be invoked + // outside of the `OnTxStart` and `OnTxEnd` hooks when dealing with system calls, + // see [OnSystemCallStartHook] and [OnSystemCallEndHook] for more information. ExitHook = func(depth int, output []byte, gasUsed uint64, err error, reverted bool) // OpcodeHook is invoked just prior to the execution of an opcode. @@ -125,6 +133,22 @@ type ( // GenesisBlockHook is called when the genesis block is being processed. GenesisBlockHook = func(genesis *types.Block, alloc types.GenesisAlloc) + // OnSystemCallStartHook is called when a system call is about to be executed. Today, + // this hook is invoked when the EIP-4788 system call is about to be executed to set the + // beacon block root. + // + // After this hook, the EVM call tracing will happened as usual so you will receive a `OnEnter/OnExit` + // as well as state hooks between this hook and the `OnSystemCallEndHook`. + // + // Note that system call happens outside normal transaction execution, so the `OnTxStart/OnTxEnd` hooks + // will not be invoked. + OnSystemCallStartHook = func() + + // OnSystemCallEndHook is called when a system call has finished executing. Today, + // this hook is invoked when the EIP-4788 system call is about to be executed to set the + // beacon block root. + OnSystemCallEndHook = func() + /* - State events - */ @@ -155,12 +179,14 @@ type Hooks struct { OnFault FaultHook OnGasChange GasChangeHook // Chain events - OnBlockchainInit BlockchainInitHook - OnClose CloseHook - OnBlockStart BlockStartHook - OnBlockEnd BlockEndHook - OnSkippedBlock SkippedBlockHook - OnGenesisBlock GenesisBlockHook + OnBlockchainInit BlockchainInitHook + OnClose CloseHook + OnBlockStart BlockStartHook + OnBlockEnd BlockEndHook + OnSkippedBlock SkippedBlockHook + OnGenesisBlock GenesisBlockHook + OnSystemCallStart OnSystemCallStartHook + OnSystemCallEnd OnSystemCallEndHook // State events OnBalanceChange BalanceChangeHook OnNonceChange NonceChangeHook From 43cbcd78ea78a690cf8604cfa3035a3d1e67a794 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 6 May 2024 13:28:53 +0200 Subject: [PATCH 37/64] core, core/state: move TriesInMemory to state package (#29701) --- core/blockchain.go | 17 ++++++++--------- core/blockchain_test.go | 36 ++++++++++++++++++------------------ core/state/statedb.go | 9 ++++++--- 3 files changed, 32 insertions(+), 30 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 654b4fbdca..56e00e85b6 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -100,7 +100,6 @@ const ( blockCacheLimit = 256 receiptsCacheLimit = 32 txLookupCacheLimit = 1024 - TriesInMemory = 128 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. // @@ -1128,7 +1127,7 @@ func (bc *BlockChain) Stop() { if !bc.cacheConfig.TrieDirtyDisabled { triedb := bc.triedb - for _, offset := range []uint64{0, 1, TriesInMemory - 1} { + for _, offset := range []uint64{0, 1, state.TriesInMemory - 1} { if number := bc.CurrentBlock().Number.Uint64(); number > offset { recent := bc.GetBlockByNumber(number - offset) @@ -1452,7 +1451,7 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error { // writeBlockWithState writes block, metadata and corresponding state data to the // database. -func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error { +func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error { // Calculate the total difficulty of the block ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) if ptd == nil { @@ -1469,12 +1468,12 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) rawdb.WriteBlock(blockBatch, block) rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) - rawdb.WritePreimages(blockBatch, state.Preimages()) + rawdb.WritePreimages(blockBatch, statedb.Preimages()) if err := blockBatch.Write(); err != nil { log.Crit("Failed to write block into disk", "err", err) } // Commit all cached state changes into underlying memory database. - root, err := state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number())) + root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number())) if err != nil { return err } @@ -1493,7 +1492,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // Flush limits are not considered for the first TriesInMemory blocks. current := block.NumberU64() - if current <= TriesInMemory { + if current <= state.TriesInMemory { return nil } // If we exceeded our memory allowance, flush matured singleton nodes to disk @@ -1505,7 +1504,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. bc.triedb.Cap(limit - ethdb.IdealBatchSize) } // Find the next state trie we need to commit - chosen := current - TriesInMemory + chosen := current - state.TriesInMemory flushInterval := time.Duration(bc.flushInterval.Load()) // If we exceeded time allowance, flush an entire trie to disk if bc.gcproc > flushInterval { @@ -1517,8 +1516,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } else { // If we're exceeding limits but haven't reached a large enough memory gap, // warn the user that the system is becoming unstable. - if chosen < bc.lastWrite+TriesInMemory && bc.gcproc >= 2*flushInterval { - log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/TriesInMemory) + if chosen < bc.lastWrite+state.TriesInMemory && bc.gcproc >= 2*flushInterval { + log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/state.TriesInMemory) } // Flush an entire trie and restart the counters bc.triedb.Commit(header.Root, true) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index f20252da8c..ea8ea7e242 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -1712,7 +1712,7 @@ func TestTrieForkGC(t *testing.T) { Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee), } - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) + genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) // Generate a bunch of fork blocks, each side forking from the canonical chain forks := make([]*types.Block, len(blocks)) @@ -1740,7 +1740,7 @@ func TestTrieForkGC(t *testing.T) { } } // Dereference all the recent tries and ensure no past trie is left in - for i := 0; i < TriesInMemory; i++ { + for i := 0; i < state.TriesInMemory; i++ { chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) } @@ -1764,8 +1764,8 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) - competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) + original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) + competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) // Import the shared chain and the original canonical one db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) @@ -1804,7 +1804,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { } // In path-based trie database implementation, it will keep 128 diff + 1 disk // layers, totally 129 latest states available. In hash-based it's 128. - states := TriesInMemory + states := state.TriesInMemory if scheme == rawdb.PathScheme { states = states + 1 } @@ -1972,7 +1972,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) { } // We must use a pretty long chain to ensure that the fork doesn't overtake us // until after at least 128 blocks post tip - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*TriesInMemory, func(i int, b *BlockGen) { + genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) b.OffsetTime(-9) }) @@ -1992,7 +1992,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) { } // Generate fork chain, starting from an early block parent := blocks[10] - fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*TriesInMemory, func(i int, b *BlockGen) { + fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) @@ -2055,7 +2055,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon // Set the terminal total difficulty in the config gspec.Config.TerminalTotalDifficulty = big.NewInt(0) } - genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) { + genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key) if err != nil { t.Fatalf("failed to create tx: %v", err) @@ -2070,9 +2070,9 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon t.Fatalf("block %d: failed to insert into chain: %v", n, err) } - lastPrunedIndex := len(blocks) - TriesInMemory - 1 + lastPrunedIndex := len(blocks) - state.TriesInMemory - 1 lastPrunedBlock := blocks[lastPrunedIndex] - firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory] + firstNonPrunedBlock := blocks[len(blocks)-state.TriesInMemory] // Verify pruning of lastPrunedBlock if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) { @@ -2099,7 +2099,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon // Generate fork chain, make it longer than canon parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock parent := blocks[parentIndex] - fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { + fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) if int(b.header.Number.Uint64()) >= mergeBlock { b.SetPoS() @@ -2742,7 +2742,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } // Generate and import the canonical chain - _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil) + _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { @@ -2755,9 +2755,9 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { } // In path-based trie database implementation, it will keep 128 diff + 1 disk // layers, totally 129 latest states available. In hash-based it's 128. - states := TriesInMemory + states := state.TriesInMemory if scheme == rawdb.PathScheme { - states = TriesInMemory + 1 + states = state.TriesInMemory + 1 } lastPrunedIndex := len(blocks) - states - 1 lastPrunedBlock := blocks[lastPrunedIndex] @@ -3638,7 +3638,7 @@ func testSetCanonical(t *testing.T, scheme string) { engine = ethash.NewFaker() ) // Generate and import the canonical chain - _, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) { + _, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key) if err != nil { panic(err) @@ -3659,7 +3659,7 @@ func testSetCanonical(t *testing.T, scheme string) { } // Generate the side chain and import them - _, side, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) { + _, side, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1), params.TxGas, gen.header.BaseFee, nil), signer, key) if err != nil { panic(err) @@ -3698,8 +3698,8 @@ func testSetCanonical(t *testing.T, scheme string) { verify(side[len(side)-1]) // Reset the chain head to original chain - chain.SetCanonical(canon[TriesInMemory-1]) - verify(canon[TriesInMemory-1]) + chain.SetCanonical(canon[state.TriesInMemory-1]) + verify(canon[state.TriesInMemory-1]) } // TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted diff --git a/core/state/statedb.go b/core/state/statedb.go index 66cfc8f05a..ac37d4ceeb 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -41,6 +41,9 @@ import ( "golang.org/x/sync/errgroup" ) +// TriesInMemory represents the number of layers that are kept in RAM. +const TriesInMemory = 128 + type revision struct { id int journalIndex int @@ -1269,12 +1272,12 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er if err := s.snaps.Update(root, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil { log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err) } - // Keep 128 diff layers in the memory, persistent layer is 129th. + // Keep TriesInMemory diff layers in the memory, persistent layer is 129th. // - head layer is paired with HEAD state // - head-1 layer is paired with HEAD-1 state // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state - if err := s.snaps.Cap(root, 128); err != nil { - log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err) + if err := s.snaps.Cap(root, TriesInMemory); err != nil { + log.Warn("Failed to cap snapshot tree", "root", root, "layers", TriesInMemory, "err", err) } } s.SnapshotCommits += time.Since(start) From 3e896c875a372b4d09dc82f986b4fb4bf7fe1041 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik <10907694+magicxyyz@users.noreply.github.com> Date: Mon, 6 May 2024 13:42:22 +0200 Subject: [PATCH 38/64] ethdb/pebble: fix pebble metrics registration (#29699) ethdb/pebble: use GetOrRegister instead of NewRegistered when creating metrics --- ethdb/pebble/pebble.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 01bfb4be3d..ee4e5dd75a 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -240,19 +240,19 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e } db.db = innerDB - db.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil) - db.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil) - db.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil) - db.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil) - db.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil) - db.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil) - db.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil) - db.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil) - db.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil) - db.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil) - db.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil) - db.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil) - db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil) + db.compTimeMeter = metrics.GetOrRegisterMeter(namespace+"compact/time", nil) + db.compReadMeter = metrics.GetOrRegisterMeter(namespace+"compact/input", nil) + db.compWriteMeter = metrics.GetOrRegisterMeter(namespace+"compact/output", nil) + db.diskSizeGauge = metrics.GetOrRegisterGauge(namespace+"disk/size", nil) + db.diskReadMeter = metrics.GetOrRegisterMeter(namespace+"disk/read", nil) + db.diskWriteMeter = metrics.GetOrRegisterMeter(namespace+"disk/write", nil) + db.writeDelayMeter = metrics.GetOrRegisterMeter(namespace+"compact/writedelay/duration", nil) + db.writeDelayNMeter = metrics.GetOrRegisterMeter(namespace+"compact/writedelay/counter", nil) + db.memCompGauge = metrics.GetOrRegisterGauge(namespace+"compact/memory", nil) + db.level0CompGauge = metrics.GetOrRegisterGauge(namespace+"compact/level0", nil) + db.nonlevel0CompGauge = metrics.GetOrRegisterGauge(namespace+"compact/nonlevel0", nil) + db.seekCompGauge = metrics.GetOrRegisterGauge(namespace+"compact/seek", nil) + db.manualMemAllocGauge = metrics.GetOrRegisterGauge(namespace+"memory/manualalloc", nil) // Start up the metrics gathering and return go db.meter(metricsGatheringInterval, namespace) @@ -543,7 +543,7 @@ func (d *Database) meter(refresh time.Duration, namespace string) { for i, level := range stats.Levels { // Append metrics for additional layers if i >= len(d.levelsGauge) { - d.levelsGauge = append(d.levelsGauge, metrics.NewRegisteredGauge(namespace+fmt.Sprintf("tables/level%v", i), nil)) + d.levelsGauge = append(d.levelsGauge, metrics.GetOrRegisterGauge(namespace+fmt.Sprintf("tables/level%v", i), nil)) } d.levelsGauge[i].Update(level.NumFiles) } From e4b8058d5a5832cdebdac7da385cf6d829c0d433 Mon Sep 17 00:00:00 2001 From: Nathan Date: Tue, 7 May 2024 15:25:15 +0800 Subject: [PATCH 39/64] eth/gasprice: add query limit for FeeHistory to defend DDOS attack (#29644) * eth/gasprice: add query limit for FeeHistory to defend DDOS attack * fix return values after cherry-pick --------- Co-authored-by: Eric <45141191+zlacfzy@users.noreply.github.com> --- eth/gasprice/feehistory.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 0410ae6b2d..d039bcb401 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -44,6 +44,7 @@ const ( // maxBlockFetchers is the max number of goroutines to spin up to pull blocks // for the fee history calculation (mostly relevant for LES). maxBlockFetchers = 4 + maxQueryLimit = 100 ) // blockFees represents a single block for processing @@ -240,6 +241,9 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedL if len(rewardPercentiles) != 0 { maxFeeHistory = oracle.maxBlockHistory } + if len(rewardPercentiles) > maxQueryLimit { + return common.Big0, nil, nil, nil, nil, nil, fmt.Errorf("%w: over the query limit %d", errInvalidPercentile, maxQueryLimit) + } if blocks > maxFeeHistory { log.Warn("Sanitizing fee history length", "requested", blocks, "truncated", maxFeeHistory) blocks = maxFeeHistory From d6e91e2e05f52271e4d5e2bd6eeddf7a8426b86d Mon Sep 17 00:00:00 2001 From: nand2 Date: Tue, 7 May 2024 14:27:14 +0200 Subject: [PATCH 40/64] eth/gasestimator: include blobs in virtual balance computation (#29703) Fixes #29702 Co-authored-by: Felix Lange --- eth/gasestimator/gasestimator.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/eth/gasestimator/gasestimator.go b/eth/gasestimator/gasestimator.go index f07f98956e..ac3b59e97e 100644 --- a/eth/gasestimator/gasestimator.go +++ b/eth/gasestimator/gasestimator.go @@ -80,6 +80,16 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin } available.Sub(available, call.Value) } + if opts.Config.IsCancun(opts.Header.Number, opts.Header.Time) && len(call.BlobHashes) > 0 { + blobGasPerBlob := new(big.Int).SetInt64(params.BlobTxBlobGasPerBlob) + blobBalanceUsage := new(big.Int).SetInt64(int64(len(call.BlobHashes))) + blobBalanceUsage.Mul(blobBalanceUsage, blobGasPerBlob) + blobBalanceUsage.Mul(blobBalanceUsage, call.BlobGasFeeCap) + if blobBalanceUsage.Cmp(available) >= 0 { + return 0, nil, core.ErrInsufficientFunds + } + available.Sub(available, blobBalanceUsage) + } allowance := new(big.Int).Div(available, feeCap) // If the allowance is larger than maximum uint64, skip checking From 71aa15c98f88ee03097e5b30ccbb564734180ca3 Mon Sep 17 00:00:00 2001 From: Martin HS Date: Tue, 7 May 2024 21:24:58 +0200 Subject: [PATCH 41/64] travis: use ubuntu noble (24.04) instead of bionic (18.04) (#29723) --- .travis.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index 8c0af291a3..722a3a9256 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,7 +15,7 @@ jobs: if: type = push os: linux arch: amd64 - dist: bionic + dist: noble go: 1.22.x env: - docker @@ -32,7 +32,7 @@ jobs: if: type = push os: linux arch: arm64 - dist: bionic + dist: noble go: 1.22.x env: - docker @@ -49,7 +49,7 @@ jobs: - stage: build if: type = push os: linux - dist: bionic + dist: noble sudo: required go: 1.22.x env: @@ -100,7 +100,7 @@ jobs: - stage: build os: linux arch: amd64 - dist: bionic + dist: noble go: 1.22.x script: - travis_wait 30 go run build/ci.go test $TEST_PACKAGES @@ -109,14 +109,14 @@ jobs: if: type = pull_request os: linux arch: arm64 - dist: bionic + dist: noble go: 1.21.x script: - travis_wait 30 go run build/ci.go test $TEST_PACKAGES - stage: build os: linux - dist: bionic + dist: noble go: 1.21.x script: - travis_wait 30 go run build/ci.go test $TEST_PACKAGES @@ -125,7 +125,7 @@ jobs: - stage: build if: type = cron || (type = push && tag ~= /^v[0-9]/) os: linux - dist: bionic + dist: noble go: 1.22.x env: - ubuntu-ppa @@ -148,7 +148,7 @@ jobs: - stage: build if: type = cron os: linux - dist: bionic + dist: noble go: 1.22.x env: - azure-purge @@ -161,7 +161,7 @@ jobs: - stage: build if: type = cron os: linux - dist: bionic + dist: noble go: 1.22.x script: - travis_wait 30 go run build/ci.go test -race $TEST_PACKAGES From e96de6489cfa7431592fe14cbfd4eb563331362b Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 7 May 2024 22:08:29 +0200 Subject: [PATCH 42/64] build: upgrade to go 1.22.3 (#29725) --- build/checksums.txt | 56 +++++++++++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/build/checksums.txt b/build/checksums.txt index 767fc88ce5..da2988452a 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -5,22 +5,48 @@ # https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/ ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz -# version:golang 1.22.2 +# version:golang 1.22.3 # https://go.dev/dl/ -374ea82b289ec738e968267cac59c7d5ff180f9492250254784b2044e90df5a9 go1.22.2.src.tar.gz -33e7f63077b1c5bce4f1ecadd4d990cf229667c40bfb00686990c950911b7ab7 go1.22.2.darwin-amd64.tar.gz -660298be38648723e783ba0398e90431de1cb288c637880cdb124f39bd977f0d go1.22.2.darwin-arm64.tar.gz -efc7162b0cad2f918ac566a923d4701feb29dc9c0ab625157d49b1cbcbba39da go1.22.2.freebsd-386.tar.gz -d753428296e6709527e291fd204700a587ffef2c0a472b21aebea11618245929 go1.22.2.freebsd-amd64.tar.gz -586d9eb7fe0489ab297ad80dd06414997df487c5cf536c490ffeaa8d8f1807a7 go1.22.2.linux-386.tar.gz -5901c52b7a78002aeff14a21f93e0f064f74ce1360fce51c6ee68cd471216a17 go1.22.2.linux-amd64.tar.gz -36e720b2d564980c162a48c7e97da2e407dfcc4239e1e58d98082dfa2486a0c1 go1.22.2.linux-arm64.tar.gz -9243dfafde06e1efe24d59df6701818e6786b4adfdf1191098050d6d023c5369 go1.22.2.linux-armv6l.tar.gz -251a8886c5113be6490bdbb955ddee98763b49c9b1bf4c8364c02d3b482dab00 go1.22.2.linux-ppc64le.tar.gz -2b39019481c28c560d65e9811a478ae10e3ef765e0f59af362031d386a71bfef go1.22.2.linux-s390x.tar.gz -651753c06df037020ef4d162c5b273452e9ba976ed17ae39e66ef7ee89d8147e go1.22.2.windows-386.zip -8e581cf330f49d3266e936521a2d8263679ef7e2fc2cbbceb85659122d883596 go1.22.2.windows-amd64.zip -ddfca5beb9a0c62254266c3090c2555d899bf3e7aa26243e7de3621108f06875 go1.22.2.windows-arm64.zip +80648ef34f903193d72a59c0dff019f5f98ae0c9aa13ade0b0ecbff991a76f68 go1.22.3.src.tar.gz +adc9f5fee89cd53d907eb542d3b269d9d8a08a66bf1ab42175450ffbb58733fb go1.22.3.aix-ppc64.tar.gz +610e48c1df4d2f852de8bc2e7fd2dc1521aac216f0c0026625db12f67f192024 go1.22.3.darwin-amd64.tar.gz +02abeab3f4b8981232237ebd88f0a9bad933bc9621791cd7720a9ca29eacbe9d go1.22.3.darwin-arm64.tar.gz +a5b3d54905f17af2ceaf7fcfe92edee67a5bd4eccd962dd89df719ace3e0894d go1.22.3.dragonfly-amd64.tar.gz +b9989ca87695ae93bacde6f3aa7b13cde5f3825515eb9ed9bbef014273739889 go1.22.3.freebsd-386.tar.gz +7483961fae29d7d768afd5c9c0f229354ca3263ab7119c20bc182761f87cbc74 go1.22.3.freebsd-amd64.tar.gz +edf1f0b8ecf68b14faeedb4f5d868a58c4777a0282bd85e5115c39c010cd0130 go1.22.3.freebsd-arm.tar.gz +572eb70e5e835fbff7d53ebf473f611d7eb458c428f8dbd98a49196883c3309e go1.22.3.freebsd-arm64.tar.gz +ef94eb2b74402e436dce970584222c4e454eb3093908591149bd2ded6862b8af go1.22.3.freebsd-riscv64.tar.gz +3c3f498c68334cbd11f72aadfb6bcb507eb8436cebc50f437a0523cd4c5e03d1 go1.22.3.illumos-amd64.tar.gz +fefba30bb0d3dd1909823ee38c9f1930c3dc5337a2ac4701c2277a329a386b57 go1.22.3.linux-386.tar.gz +8920ea521bad8f6b7bc377b4824982e011c19af27df88a815e3586ea895f1b36 go1.22.3.linux-amd64.tar.gz +6c33e52a5b26e7aa021b94475587fce80043a727a54ceb0eee2f9fc160646434 go1.22.3.linux-arm64.tar.gz +f2bacad20cd2b96f23a86d4826525d42b229fd431cc6d0dec61ff3bc448ef46e go1.22.3.linux-armv6l.tar.gz +41e9328340544893482b2928ae18a9a88ba18b2fdd29ac77f4d33cf1815bbdc2 go1.22.3.linux-loong64.tar.gz +cf4d5faff52e642492729eaf396968f43af179518be769075b90bc1bf650abf6 go1.22.3.linux-mips.tar.gz +3bd009fe2e3d2bfd52433a11cb210d1dfa50b11b4c347a293951efd9e36de945 go1.22.3.linux-mips64.tar.gz +5913b82a042188ef698f7f2dfd0cd0c71f0508a4739de9e41fceff3f4dc769b4 go1.22.3.linux-mips64le.tar.gz +441afebca555be5313867b4577f237c7b5c0fff4386e22e47875b9f805abbec5 go1.22.3.linux-mipsle.tar.gz +f3b53190a76f4a35283501ba6d94cbb72093be0c62ff735c6f9e586a1c983381 go1.22.3.linux-ppc64.tar.gz +04b7b05283de30dd2da20bf3114b2e22cc727938aed3148babaf35cc951051ac go1.22.3.linux-ppc64le.tar.gz +d4992d4a85696e3f1de06cefbfc2fd840c9c6695d77a0f35cfdc4e28b2121c20 go1.22.3.linux-riscv64.tar.gz +2aba796417a69be5f3ed489076bac79c1c02b36e29422712f9f3bf51da9cf2d4 go1.22.3.linux-s390x.tar.gz +d6e6113542dd9f23db899e177fe23772bac114a5ea5e8ee436b9da68628335a8 go1.22.3.netbsd-386.tar.gz +c33cee3075bd18ceefddd75bafa8efb51fbdc17b5ee74275122e7a927a237a4c go1.22.3.netbsd-amd64.tar.gz +1ab251df3c85f3b391a09565ca52fb6e1306527d72852d553e9ab74eabb4ecf8 go1.22.3.netbsd-arm.tar.gz +1d194fe53f5d82f9a612f848950d8af8cab7cb40ccc03f10c4eb1c9808ff1a0c go1.22.3.netbsd-arm64.tar.gz +91d6601727f08506e938640885d3ded784925045e3a4444fd9b4b936efe1b1e0 go1.22.3.openbsd-386.tar.gz +09d0c91ae35a4eea92615426992062ca236cc2f66444fb0b0a24cd3b13bd5297 go1.22.3.openbsd-amd64.tar.gz +338da30cc2c97b9458e0b4caa2509f67bba55d3de16fb7d31775baca82d2e3dc go1.22.3.openbsd-arm.tar.gz +53eadfabd2b7dd09a64941421afee2a2888e2a4f94f353b27919b1dad1171a21 go1.22.3.openbsd-arm64.tar.gz +8a1a2842ae8dcf2374bb05dff58074b368bb698dc9c211c794c1ff119cd9fdc7 go1.22.3.plan9-386.tar.gz +f9816d3dd9e730cad55085ea08c1f0c925720728f9c945fff59cd24d2ac2db7b go1.22.3.plan9-amd64.tar.gz +f4d3d7b17c9e1b1635fcb287b5b5ab5b60acc9db3ba6a27f2b2f5d6537a2ef95 go1.22.3.plan9-arm.tar.gz +46b7999ee94d91b21ad6940b5a3131ff6fe53ef97be9a34e582e2a3ad7263e95 go1.22.3.solaris-amd64.tar.gz +f60f63b8a0885e0d924f39fd284aee5438fe87d8c3d8545a312adf43e0d9edac go1.22.3.windows-386.zip +cab2af6951a6e2115824263f6df13ff069c47270f5788714fa1d776f7f60cb39 go1.22.3.windows-amd64.zip +40b37f4b068fc759f3a0dd61176a0f7570a4ba48bed8561c31d3967a3583981a go1.22.3.windows-arm.zip +59b76ee22b9b1c3afbf7f50e3cb4edb954d6c0d25e5e029ab5483a6804d61e71 go1.22.3.windows-arm64.zip # version:golangci 1.55.2 # https://github.com/golangci/golangci-lint/releases/ From 9ec50080eb26d0cea6c5cea3ec4049a5dfb48ae8 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 8 May 2024 14:43:33 +0800 Subject: [PATCH 43/64] core: use in-memory freezer for tests (#29720) * core: simplify chain tests * core, eth, cmd: use in-memory freezer for tests * core: restore tests --- cmd/utils/history_test.go | 3 +-- core/block_validator_test.go | 2 -- core/blockchain_test.go | 33 ++++++++++++++++--------------- core/txindexer_test.go | 5 +---- eth/downloader/downloader_test.go | 12 +++-------- 5 files changed, 22 insertions(+), 33 deletions(-) diff --git a/cmd/utils/history_test.go b/cmd/utils/history_test.go index b6703c59ed..a631eaf490 100644 --- a/cmd/utils/history_test.go +++ b/cmd/utils/history_test.go @@ -162,8 +162,7 @@ func TestHistoryImportAndExport(t *testing.T) { } // Now import Era. - freezer := t.TempDir() - db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) + db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { panic(err) } diff --git a/core/block_validator_test.go b/core/block_validator_test.go index 2f86b2d751..c573ef91fa 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -154,12 +154,10 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) { preHeaders := make([]*types.Header, len(preBlocks)) for i, block := range preBlocks { preHeaders[i] = block.Header() - t.Logf("Pre-merge header: %d", block.NumberU64()) } postHeaders := make([]*types.Header, len(postBlocks)) for i, block := range postBlocks { postHeaders[i] = block.Header() - t.Logf("Post-merge header: %d", block.NumberU64()) } // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index ea8ea7e242..e4bc3e09a6 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -785,7 +785,7 @@ func testFastVsFullChains(t *testing.T, scheme string) { t.Fatalf("failed to insert receipt %d: %v", n, err) } // Freezer style fast import the chain. - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -875,12 +875,12 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } ) - height := uint64(1024) + height := uint64(64) _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil) // makeDb creates a db instance for testing. makeDb := func() ethdb.Database { - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1768,7 +1768,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) // Import the shared chain and the original canonical one - db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) defer db.Close() chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) @@ -1833,7 +1833,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) { funds = big.NewInt(1000000000) gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: funds}}} ) - height := uint64(1024) + height := uint64(64) _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil) // Import the chain as a ancient-first node and ensure all pointers are updated @@ -1908,7 +1908,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) { } // Set up a BlockChain that uses the ancient store. - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1978,7 +1978,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) { }) // Import the canonical chain - diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) defer diskdb.Close() chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) @@ -2190,7 +2190,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) { b.OffsetTime(-9) // A higher difficulty }) // Import the shared chain and the original canonical one - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2361,7 +2361,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i } }) // Import the shared chain and the original canonical one - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -3634,18 +3634,19 @@ func testSetCanonical(t *testing.T, scheme string) { Alloc: types.GenesisAlloc{address: {Balance: funds}}, BaseFee: big.NewInt(params.InitialBaseFee), } - signer = types.LatestSigner(gspec.Config) - engine = ethash.NewFaker() + signer = types.LatestSigner(gspec.Config) + engine = ethash.NewFaker() + chainLength = 10 ) // Generate and import the canonical chain - _, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) { + _, canon, _ := GenerateChainWithGenesis(gspec, engine, chainLength, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key) if err != nil { panic(err) } gen.AddTx(tx) }) - diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) defer diskdb.Close() chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) @@ -3659,7 +3660,7 @@ func testSetCanonical(t *testing.T, scheme string) { } // Generate the side chain and import them - _, side, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) { + _, side, _ := GenerateChainWithGenesis(gspec, engine, chainLength, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1), params.TxGas, gen.header.BaseFee, nil), signer, key) if err != nil { panic(err) @@ -3698,8 +3699,8 @@ func testSetCanonical(t *testing.T, scheme string) { verify(side[len(side)-1]) // Reset the chain head to original chain - chain.SetCanonical(canon[state.TriesInMemory-1]) - verify(canon[state.TriesInMemory-1]) + chain.SetCanonical(canon[chainLength-1]) + verify(canon[chainLength-1]) } // TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted diff --git a/core/txindexer_test.go b/core/txindexer_test.go index 7b5ff1f206..0a606ed8fa 100644 --- a/core/txindexer_test.go +++ b/core/txindexer_test.go @@ -18,7 +18,6 @@ package core import ( "math/big" - "os" "testing" "github.com/ethereum/go-ethereum/common" @@ -211,8 +210,7 @@ func TestTxIndexer(t *testing.T) { }, } for _, c := range cases { - frdir := t.TempDir() - db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) + db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0)) // Index the initial blocks from ancient store @@ -238,6 +236,5 @@ func TestTxIndexer(t *testing.T) { verify(db, 0, indexer) db.Close() - os.RemoveAll(frdir) } } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 92403277fd..e5329b7b39 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -19,7 +19,6 @@ package downloader import ( "fmt" "math/big" - "os" "sync" "sync/atomic" "testing" @@ -43,7 +42,6 @@ import ( // downloadTester is a test simulator for mocking out local block chain. type downloadTester struct { - freezer string chain *core.BlockChain downloader *Downloader @@ -58,8 +56,7 @@ func newTester(t *testing.T) *downloadTester { // newTesterWithNotification creates a new downloader test mocker. func newTesterWithNotification(t *testing.T, success func()) *downloadTester { - freezer := t.TempDir() - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { panic(err) } @@ -76,9 +73,8 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester { panic(err) } tester := &downloadTester{ - freezer: freezer, - chain: chain, - peers: make(map[string]*downloadTesterPeer), + chain: chain, + peers: make(map[string]*downloadTesterPeer), } tester.downloader = New(db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success) return tester @@ -89,8 +85,6 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester { func (dl *downloadTester) terminate() { dl.downloader.Terminate() dl.chain.Stop() - - os.RemoveAll(dl.freezer) } // newPeer registers a new block download source into the downloader. From dd4afb9fecb266ae4317406ad5b14b177e5cd8a9 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 8 May 2024 11:08:55 +0200 Subject: [PATCH 44/64] .travis.yml: fix install of gcc-multilib (#29733) --- .travis.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index 722a3a9256..9d54ab82f3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -56,14 +56,13 @@ jobs: - azure-linux git: submodules: false # avoid cloning ethereum/tests - addons: - apt: - packages: - - gcc-multilib script: - # Build for the primary platforms that Trusty can manage + # build amd64 - go run build/ci.go install -dlgo - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + + # build 386 + - sudo -E apt-get install -yq --no-install-suggests --no-install-recommends --force-yes install gcc-multilib - go run build/ci.go install -dlgo -arch 386 - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds @@ -165,4 +164,3 @@ jobs: go: 1.22.x script: - travis_wait 30 go run build/ci.go test -race $TEST_PACKAGES - From 6154f87c33303698ad962427f619c6f129640c1e Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 8 May 2024 11:33:07 +0200 Subject: [PATCH 45/64] .travis.yml: fix apt-get options (#29734) --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 9d54ab82f3..11ea43ef51 100644 --- a/.travis.yml +++ b/.travis.yml @@ -62,7 +62,7 @@ jobs: - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # build 386 - - sudo -E apt-get install -yq --no-install-suggests --no-install-recommends --force-yes install gcc-multilib + - sudo -E apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib - go run build/ci.go install -dlgo -arch 386 - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds From dd09f7e3facd973cdaa17b7403f82c8bbff28de3 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 8 May 2024 14:28:40 +0200 Subject: [PATCH 46/64] params: release go-ethereum v1.14.1 stable --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 319f21b2a8..9e7e20b291 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 14 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 14 // Minor version component of the current release + VersionPatch = 1 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From 14f4228472662c5da2b8fd4a11f5f8a3ac4f2aff Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 8 May 2024 14:30:18 +0200 Subject: [PATCH 47/64] params: begin v1.14.2 release cycle --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 9e7e20b291..02ab763544 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 14 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 14 // Minor version component of the current release + VersionPatch = 2 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From eeb22089fd24ee1377a6a1f52deb41673c38419c Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 8 May 2024 14:34:58 +0200 Subject: [PATCH 48/64] .travis.yml: fix package install on PPA builder --- .travis.yml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index 11ea43ef51..29c6d6b521 100644 --- a/.travis.yml +++ b/.travis.yml @@ -130,15 +130,8 @@ jobs: - ubuntu-ppa git: submodules: false # avoid cloning ethereum/tests - addons: - apt: - packages: - - devscripts - - debhelper - - dput - - fakeroot - - python-bzrlib - - python-paramiko + before_install: + - sudo -E apt-get -yq --no-install-suggests --no-install-recommends install devscripts debhelper dput fakeroot python-bzrlib python-paramiko script: - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " From 35b2d07f4bf735958b41cd141a5feda5e61be73e Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 8 May 2024 16:26:01 +0200 Subject: [PATCH 49/64] params: release go-ethereum v1.14.2 stable --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index 02ab763544..fb40a0dd71 100644 --- a/params/version.go +++ b/params/version.go @@ -24,7 +24,7 @@ const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 14 // Minor version component of the current release VersionPatch = 2 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From 1a79f8fe58f9c42db1dcfd8599f386c8158069e8 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 8 May 2024 16:27:44 +0200 Subject: [PATCH 50/64] params: begin v1.14.3 release cycle --- params/version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/params/version.go b/params/version.go index fb40a0dd71..2eccf93384 100644 --- a/params/version.go +++ b/params/version.go @@ -23,8 +23,8 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 14 // Minor version component of the current release - VersionPatch = 2 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionPatch = 3 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From faff03c40329be2aa87017dc90ffebf0b7285388 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 8 May 2024 20:28:05 +0200 Subject: [PATCH 51/64] .travis.yml: enable PPA upload on push and fix apt-get command (#29741) --- .travis.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 29c6d6b521..6dca0ea5f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -122,7 +122,8 @@ jobs: # This builder does the Ubuntu PPA nightly uploads - stage: build - if: type = cron || (type = push && tag ~= /^v[0-9]/) + # if: type = cron || (type = push && tag ~= /^v[0-9]/) + if: type = push os: linux dist: noble go: 1.22.x @@ -131,7 +132,7 @@ jobs: git: submodules: false # avoid cloning ethereum/tests before_install: - - sudo -E apt-get -yq --no-install-suggests --no-install-recommends install devscripts debhelper dput fakeroot python-bzrlib python-paramiko + - sudo -E apt-get -yq --no-install-suggests --no-install-recommends install devscripts debhelper dput fakeroot script: - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " From 804afb8faa0e3796fb8e6f39beabce356bc98298 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 8 May 2024 20:46:54 +0200 Subject: [PATCH 52/64] .travis.yml: restore PPA condition and bump timeouts (#29742) --- .travis.yml | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6dca0ea5f6..488ec1e7d2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -102,28 +102,18 @@ jobs: dist: noble go: 1.22.x script: - - travis_wait 30 go run build/ci.go test $TEST_PACKAGES - - - stage: build - if: type = pull_request - os: linux - arch: arm64 - dist: noble - go: 1.21.x - script: - - travis_wait 30 go run build/ci.go test $TEST_PACKAGES + - travis_wait 45 go run build/ci.go test $TEST_PACKAGES - stage: build os: linux dist: noble go: 1.21.x script: - - travis_wait 30 go run build/ci.go test $TEST_PACKAGES + - travis_wait 45 go run build/ci.go test $TEST_PACKAGES # This builder does the Ubuntu PPA nightly uploads - stage: build - # if: type = cron || (type = push && tag ~= /^v[0-9]/) - if: type = push + if: type = cron || (type = push && tag ~= /^v[0-9]/) os: linux dist: noble go: 1.22.x @@ -157,4 +147,4 @@ jobs: dist: noble go: 1.22.x script: - - travis_wait 30 go run build/ci.go test -race $TEST_PACKAGES + - travis_wait 50 go run build/ci.go test -race $TEST_PACKAGES From ab48ba42f4f34873d65fd1737fabac5c680baff6 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 9 May 2024 12:34:54 +0200 Subject: [PATCH 53/64] params: release go-ethereum v1.14.3 stable --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 2eccf93384..0220cb6a6b 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 14 // Minor version component of the current release - VersionPatch = 3 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 14 // Minor version component of the current release + VersionPatch = 3 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From 25b12e89986ee91c7a49cf3afb414b0e8b3b0653 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Mon, 5 Aug 2024 08:22:01 +0530 Subject: [PATCH 54/64] fix test --- core/vm/contracts.go | 6 ++++++ eth/tracers/internal/tracetest/calltrace_test.go | 6 +++--- .../internal/tracetest/testdata/call_tracer/blob_tx.json | 2 +- .../internal/tracetest/testdata/call_tracer/create.json | 2 +- .../internal/tracetest/testdata/call_tracer/deep_calls.json | 2 +- .../tracetest/testdata/call_tracer/delegatecall.json | 2 +- .../testdata/call_tracer/inner_create_oog_outer_throw.json | 2 +- .../tracetest/testdata/call_tracer/inner_instafail.json | 2 +- .../tracetest/testdata/call_tracer/inner_revert_reason.json | 2 +- .../testdata/call_tracer/inner_throw_outer_revert.json | 2 +- .../internal/tracetest/testdata/call_tracer/oog.json | 2 +- .../internal/tracetest/testdata/call_tracer/revert.json | 2 +- .../tracetest/testdata/call_tracer/revert_reason.json | 2 +- .../tracetest/testdata/call_tracer/selfdestruct.json | 2 +- .../internal/tracetest/testdata/call_tracer/simple.json | 2 +- .../tracetest/testdata/call_tracer/simple_onlytop.json | 2 +- .../internal/tracetest/testdata/call_tracer/throw.json | 2 +- .../tracetest/testdata/call_tracer_withLog/calldata.json | 2 +- .../testdata/call_tracer_withLog/delegatecall.json | 2 +- .../call_tracer_withLog/frontier_create_outofstorage.json | 2 +- .../testdata/call_tracer_withLog/multi_contracts.json | 2 +- .../tracetest/testdata/call_tracer_withLog/multilogs.json | 2 +- .../tracetest/testdata/call_tracer_withLog/notopic.json | 2 +- .../tracetest/testdata/call_tracer_withLog/simple.json | 2 +- .../tracetest/testdata/call_tracer_withLog/tx_failed.json | 2 +- .../testdata/call_tracer_withLog/tx_partial_failed.json | 2 +- .../testdata/call_tracer_withLog/with_onlyTopCall.json | 2 +- 27 files changed, 34 insertions(+), 28 deletions(-) diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 103c41856e..907dbb245c 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -112,6 +112,12 @@ var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{0xa}): &kzgPointEvaluation{}, } +// PrecompiledContractsP256Verify contains the precompiled Ethereum +// contract specified in EIP-7212. +var PrecompiledContractsP256Verify = map[common.Address]PrecompiledContract{ + common.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{}, +} + // PrecompiledContractsPrague contains the set of pre-compiled Ethereum // contracts used in the Prague release. var PrecompiledContractsPrague = map[common.Address]PrecompiledContract{ diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index a682cf06af..3a4f4f9ee7 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -301,13 +301,13 @@ func TestInternals(t *testing.T) { byte(vm.CALL), }, tracer: mkTracer("callTracer", nil), - want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0xe3a8"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x54d8"}],"from":"%s","gas":"0x13880","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0xe01a","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), + want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0xe3a8"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x0"}],"from":"%s","gas":"0x13880","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0xe01a","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), }, { name: "Stack depletion in LOG0", code: []byte{byte(vm.LOG3)}, tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), - want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0x0"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x13880"}],"from":"%s","gas":"0x13880","gasUsed":"0x13880","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), + want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0x0"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x0"}],"from":"%s","gas":"0x13880","gasUsed":"0x13880","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), }, { name: "Mem expansion in LOG0", @@ -320,7 +320,7 @@ func TestInternals(t *testing.T) { byte(vm.LOG0), }, tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), - want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0xdce2"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x5b9e"}],"from":"%s","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","position":"0x0"}],"value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), + want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0xdce2"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x0"}],"from":"%s","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","position":"0x0"}],"value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), }, { // Leads to OOM on the prestate tracer diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json b/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json index 9b48434e11..a78775b53a 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json @@ -77,7 +77,7 @@ "purpose": "tip", "from": null, "to": "0x0000000000000000000000000000000000000000", - "value": "0x200b20" + "value": "0x0" } ], "from": "0x0c2c51a0990aee1d73c1228de158688341557508", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/create.json b/eth/tracers/internal/tracetest/testdata/call_tracer/create.json index 903ff57c57..39a82690c5 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/create.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/create.json @@ -65,7 +65,7 @@ "purpose": "tip", "from": null, "to": "0xD049bfd667cB46Aa3Ef5Df0dA3e57DB3Be39E511", - "value": "0x2a0383e2a65c00" + "value": "0x0" } ], "from": "0x13e4acefe6a6700604929946e70e6443e4e73447", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json b/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json index a5cca5d434..0c39115b14 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json @@ -128,7 +128,7 @@ "purpose": "tip", "from": null, "to": "0x1977C248e1014Cc103929Dd7f154199C916E39Ec", - "value": "0x700fefccd9800" + "value": "0x0" } ], "calls": [ diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json index 26788a6ce4..04a22cf363 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json @@ -81,7 +81,7 @@ "purpose": "tip", "from": null, "to": "0x5659922cE141EedBC2733678f9806c77b4eEBEE8", - "value": "0x371a55e8d6800" + "value": "0x0" } ], "calls": [ diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json index 41d541761f..c449347bae 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json @@ -73,7 +73,7 @@ "purpose": "tip", "from": null, "to": "0x1585936b53834b021f68CC13eEeFdEc2EfC8e724", - "value": "0x2aa1efb94e0000" + "value": "0x0" } ], "calls": [ diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json index 460c3d91b8..bf52a05f3e 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json @@ -69,7 +69,7 @@ "purpose": "tip", "from": null, "to": "0xc822ef32e6d26E170B70Cf761e204c1806265914", - "value": "0x216e3433f7200" + "value": "0x0" } ], "type": "CALL", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json index 2e06a33f5f..f792d62cf1 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json @@ -69,7 +69,7 @@ "purpose": "tip", "from": null, "to": "0x0000000000000000000000000000000000000000", - "value": "0x15bd3c0591000" + "value": "0x0" } ], "from": "0x3623191d4ccfbbdf09e8ebf6382a1f8257417bc1", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json index 9ebd222e29..6b5d6cac36 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json @@ -76,7 +76,7 @@ "purpose": "tip", "from": null, "to": "0x00d8Ae40D9a06d0E7a2877B62E32eB959Afbe16D", - "value": "0x25ed5d81941000" + "value": "0x0" } ], "calls": [ diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json b/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json index 8f11c63911..84e4ba7626 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json @@ -67,7 +67,7 @@ "purpose": "tip", "from": null, "to": "0xD049bfd667cB46Aa3Ef5Df0dA3e57DB3Be39E511", - "value": "0x49874422212000" + "value": "0x0" } ], "error": "out of gas", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json index 1ea1a8a55a..ebeab98fe3 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json @@ -65,7 +65,7 @@ "purpose": "tip", "from": null, "to": "0xF4D8e706CfB25c0DECBbDd4D2E2Cc10C66376a3F", - "value": "0x2954557199e00" + "value": "0x0" } ], "error": "execution reverted", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json b/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json index 610ecc9b4a..1a234038be 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json @@ -70,7 +70,7 @@ "purpose": "tip", "from": null, "to": "0x0000000000000000000000000000000000000000", - "value": "0x198f3fb76000" + "value": "0x0" } ], "error": "execution reverted", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json b/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json index eb16322eae..67237b44a1 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json @@ -71,7 +71,7 @@ "purpose": "tip", "from": null, "to": "0x1585936b53834b021f68CC13eEeFdEc2EfC8e724", - "value": "0x30cdd67dd6400" + "value": "0x0" }, { "purpose": "selfDestruct", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json b/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json index 92f7a68a18..db8953b564 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json @@ -76,7 +76,7 @@ "purpose": "tip", "from": null, "to": "0x1585936b53834b021f68CC13eEeFdEc2EfC8e724", - "value": "0x420eed1bd6c00" + "value": "0x0" } ], "calls": [ diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json b/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json index deb7d5cddf..317913cdbc 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json @@ -79,7 +79,7 @@ "purpose": "tip", "from": null, "to": "0x1585936b53834b021f68CC13eEeFdEc2EfC8e724", - "value": "0x420eed1bd6c00" + "value": "0x0" } ], "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json index 4bdfbd349b..4407bc69d0 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json @@ -69,7 +69,7 @@ "purpose": "tip", "from": null, "to": "0x294e5d6c39A36Ce38aF1DCA70c1060f78Dee8070", - "value": "0x11c37937e08000" + "value": "0x0" } ], "error": "invalid jump destination", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json index 53d561f395..b4b248f0eb 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json @@ -95,7 +95,7 @@ "purpose": "tip", "from": null, "to": "0xf8b483DbA2c3B7176a3Da549ad41A48BB3121069", - "value": "0x119b54eb98e126" + "value": "0x0" } ], "from": "0x4f5777744b500616697cb655dcb02ee6cd51deb5", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json index 288043d3da..fb089b01e4 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json @@ -152,7 +152,7 @@ "purpose": "tip", "from": null, "to": "0x61C808D82A3Ac53231750daDc13c777b59310bD9", - "value": "0xb9e774e8d5800" + "value": "0x0" } ], "from": "0x3de712784baf97260455ae25fb74f574ec9c1add", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json index b2e1ea79cd..1e87fdae85 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json @@ -98,7 +98,7 @@ "purpose": "tip", "from": null, "to": "0xe48430c4E88A929bBa0eE3DCe284866a9937b609", - "value": "0x6a8fb3d104cc00" + "value": "0x0" } ], "from": "0x0047a8033cc6d6ca2ed5044674fd421f44884de8", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json index df7f7a8318..9594749fbc 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json @@ -317,7 +317,7 @@ "purpose": "tip", "from": null, "to": "0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8", - "value": "0xe25384e63aa600" + "value": "0x0" } ], "from": "0xbe3ae5cb97c253dda67181c6e34e43f5c275e08b", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json index 55848ae7ab..f1cad828a5 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json @@ -185,7 +185,7 @@ "purpose": "tip", "from": null, "to": "0x2a65Aca4D5fC5B5C859090a6c34d164135398226", - "value": "0x1b3dd214f1b8c00" + "value": "0x0" } ], "from": "0x3fcb0342353c541e210013aaddc2e740b9a33d08", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json index f2316eb1b9..8484e12bf4 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json @@ -120,7 +120,7 @@ "purpose": "tip", "from": null, "to": "0x61C808D82A3Ac53231750daDc13c777b59310bD9", - "value": "0xb30d0148e0600" + "value": "0x0" } ], "from": "0x6412becf35cc7e2a9e7e47966e443f295e1e4f4a", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json index 82d0596304..5b031011f3 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json @@ -81,7 +81,7 @@ "purpose": "tip", "from": null, "to": "0xe2fE6B13287f28E193333fDfe7Fedf2f6Df6124A", - "value": "0x90886d609c400" + "value": "0x0" } ], "from": "0xd1220a0cf47c7b9be7a2e6ba89f429762e7b9adb", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json index 730fea23e1..c0157602cd 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json @@ -155,7 +155,7 @@ "purpose": "tip", "from": null, "to": "0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8", - "value": "0x81fc72632f7340" + "value": "0x0" } ], "from": "0xe6002189a74b43e6868b20c1311bc108e38aac57", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json index 8a46028598..fbe1bfc2d4 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json @@ -93,7 +93,7 @@ "purpose": "tip", "from": null, "to": "0x61C808D82A3Ac53231750daDc13c777b59310bD9", - "value": "0xbcd242cec0800" + "value": "0x0" } ], "from": "0x01115b41bd2731353dd3e6abf44818fdc035aaf1", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json index 50b3b0b276..e4c05184ed 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json @@ -96,7 +96,7 @@ "purpose": "tip", "from": null, "to": "0xf8b483DbA2c3B7176a3Da549ad41A48BB3121069", - "value": "0x119b54eb98e126" + "value": "0x0" } ], "from": "0x4f5777744b500616697cb655dcb02ee6cd51deb5", From 8140b3de8ca06804bfd8c97c2e429e2b4dbc4b63 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 5 Aug 2024 10:54:14 -0500 Subject: [PATCH 55/64] Add a notion of zombies to preserve empty account behavior --- core/state/journal.go | 22 +++++++++++++++++++++ core/state/journal_arbitrum.go | 30 +++++++++++++++++++++++++++++ core/state/journal_arbitrum_test.go | 14 ++++++++++++++ core/state/statedb.go | 19 ++++++++++++++++-- 4 files changed, 83 insertions(+), 2 deletions(-) create mode 100644 core/state/journal_arbitrum_test.go diff --git a/core/state/journal.go b/core/state/journal.go index c0f5615c98..37775c602b 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -40,6 +40,8 @@ type journalEntry interface { // commit. These are tracked to be able to be reverted in the case of an execution // exception or request for reversal. type journal struct { + zombieEntries map[common.Address]int // Arbitrum: number of createZombieChange entries for each address + entries []journalEntry // Current changes tracked by the journal dirties map[common.Address]int // Dirty accounts and the number of changes } @@ -47,6 +49,8 @@ type journal struct { // newJournal creates a new initialized journal. func newJournal() *journal { return &journal{ + zombieEntries: make(map[common.Address]int), + dirties: make(map[common.Address]int), } } @@ -56,6 +60,10 @@ func (j *journal) append(entry journalEntry) { j.entries = append(j.entries, entry) if addr := entry.dirtied(); addr != nil { j.dirties[*addr]++ + // Arbitrum: also track the number of zombie changes + if isZombie(entry) { + j.zombieEntries[*addr]++ + } } } @@ -70,6 +78,13 @@ func (j *journal) revert(statedb *StateDB, snapshot int) { if addr := j.entries[i].dirtied(); addr != nil { if j.dirties[*addr]--; j.dirties[*addr] == 0 { delete(j.dirties, *addr) + + // Revert zombieEntries tracking + if isZombie(j.entries[i]) { + if j.zombieEntries[*addr]--; j.zombieEntries[*addr] == 0 { + delete(j.zombieEntries, *addr) + } + } } } } @@ -95,6 +110,8 @@ func (j *journal) copy() *journal { entries = append(entries, j.entries[i].copy()) } return &journal{ + zombieEntries: maps.Clone(j.zombieEntries), + entries: entries, dirties: maps.Clone(j.dirties), } @@ -106,6 +123,11 @@ type ( account *common.Address } + // Changes to the account trie without being marked as dirty. + createZombieChange struct { + account *common.Address + } + // createContractChange represents an account becoming a contract-account. // This event happens prior to executing initcode. The journal-event simply // manages the created-flag, in order to allow same-tx destruction. diff --git a/core/state/journal_arbitrum.go b/core/state/journal_arbitrum.go index b286303150..59767edae7 100644 --- a/core/state/journal_arbitrum.go +++ b/core/state/journal_arbitrum.go @@ -77,3 +77,33 @@ func (ch EvictWasm) copy() journalEntry { Debug: ch.Debug, } } + +// Arbitrum: only implemented by createZombieChange +type possibleZombie interface { + // Arbitrum: return true if this change should, on its own, create an empty account. + // If combined with another non-zombie change the empty account will be cleaned up. + isZombie() bool +} + +func isZombie(entry journalEntry) bool { + possiblyZombie, isPossiblyZombie := entry.(possibleZombie) + return isPossiblyZombie && possiblyZombie.isZombie() +} + +func (ch createZombieChange) revert(s *StateDB) { + delete(s.stateObjects, *ch.account) +} + +func (ch createZombieChange) dirtied() *common.Address { + return ch.account +} + +func (ch createZombieChange) copy() journalEntry { + return createZombieChange{ + account: ch.account, + } +} + +func (ch createZombieChange) isZombie() bool { + return true +} diff --git a/core/state/journal_arbitrum_test.go b/core/state/journal_arbitrum_test.go new file mode 100644 index 0000000000..9ef6d7539c --- /dev/null +++ b/core/state/journal_arbitrum_test.go @@ -0,0 +1,14 @@ +package state + +import "testing" + +func TestIsZombie(t *testing.T) { + var nonZombie journalEntry = createObjectChange{} + if isZombie(nonZombie) { + t.Error("createObjectChange should not be a zombie") + } + var zombie journalEntry = createZombieChange{} + if !isZombie(zombie) { + t.Error("createZombieChange should be a zombie") + } +} diff --git a/core/state/statedb.go b/core/state/statedb.go index 5cf8e066dc..3908771de6 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -436,6 +436,11 @@ func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int, reason tr // SubBalance subtracts amount from the account associated with addr. func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) { + // Arbitrum: this behavior created empty accounts in old geth versions. + if amount.IsZero() && s.getStateObject(addr) == nil { + s.createZombie(addr) + return + } stateObject := s.getOrNewStateObject(addr) if stateObject != nil { s.arbExtraData.unexpectedBalanceDelta.Sub(s.arbExtraData.unexpectedBalanceDelta, amount.ToBig()) @@ -695,6 +700,15 @@ func (s *StateDB) createObject(addr common.Address) *stateObject { return obj } +// createObject creates a new state object. The assumption is held there is no +// existing account with the given address, otherwise it will be silently overwritten. +func (s *StateDB) createZombie(addr common.Address) *stateObject { + obj := newObject(s, addr, nil) + s.journal.append(createZombieChange{account: &addr}) + s.setStateObject(obj) + return obj +} + // CreateAccount explicitly creates a new state object, assuming that the // account did not previously exist in the state. If the account already // exists, this function will silently overwrite it which might lead to a @@ -842,7 +856,8 @@ func (s *StateDB) GetRefund() uint64 { // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) - for addr := range s.journal.dirties { + for addr, dirtyCount := range s.journal.dirties { + isZombie := s.journal.zombieEntries[addr] == dirtyCount obj, exist := s.stateObjects[addr] if !exist { // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 @@ -853,7 +868,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // Thus, we can safely ignore it here continue } - if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { + if obj.selfDestructed || (deleteEmptyObjects && obj.empty() && !isZombie) { delete(s.stateObjects, obj.address) s.markDelete(addr) From cbe2004902414123cd85bf1343f1012fd667a633 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 6 Aug 2024 09:22:50 -0500 Subject: [PATCH 56/64] Fix zombie creation to only happen for destructed objects --- core/state/statedb.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 3908771de6..a4bc16de80 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -438,8 +438,10 @@ func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int, reason tr func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) { // Arbitrum: this behavior created empty accounts in old geth versions. if amount.IsZero() && s.getStateObject(addr) == nil { - s.createZombie(addr) - return + if _, destructed := s.stateObjectsDestruct[addr]; destructed { + s.createZombie(addr) + return + } } stateObject := s.getOrNewStateObject(addr) if stateObject != nil { From ee114edd89a52d98f1148cc55592a131bca8e230 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 6 Aug 2024 09:40:37 -0500 Subject: [PATCH 57/64] Fix tracing of subtracting zero balance --- core/state/statedb.go | 1 - 1 file changed, 1 deletion(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index a4bc16de80..e61ad16bc0 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -440,7 +440,6 @@ func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int, reason tr if amount.IsZero() && s.getStateObject(addr) == nil { if _, destructed := s.stateObjectsDestruct[addr]; destructed { s.createZombie(addr) - return } } stateObject := s.getOrNewStateObject(addr) From 0adf3d1a797e12f43b6d257827bd75a9d08e4ac5 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 9 Oct 2024 22:42:36 -0500 Subject: [PATCH 58/64] Make zombie creation explicit --- core/state/statedb.go | 6 ------ core/state/statedb_arbitrum.go | 9 +++++++++ core/vm/interface.go | 3 +++ 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index e61ad16bc0..273cee9b4e 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -436,12 +436,6 @@ func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int, reason tr // SubBalance subtracts amount from the account associated with addr. func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) { - // Arbitrum: this behavior created empty accounts in old geth versions. - if amount.IsZero() && s.getStateObject(addr) == nil { - if _, destructed := s.stateObjectsDestruct[addr]; destructed { - s.createZombie(addr) - } - } stateObject := s.getOrNewStateObject(addr) if stateObject != nil { s.arbExtraData.unexpectedBalanceDelta.Sub(s.arbExtraData.unexpectedBalanceDelta, amount.ToBig()) diff --git a/core/state/statedb_arbitrum.go b/core/state/statedb_arbitrum.go index e459ad4570..73bb5c77df 100644 --- a/core/state/statedb_arbitrum.go +++ b/core/state/statedb_arbitrum.go @@ -134,6 +134,15 @@ func (s *StateDB) AddStylusPagesEver(new uint16) { s.arbExtraData.everWasmPages = common.SaturatingUAdd(s.arbExtraData.everWasmPages, new) } +// Arbitrum: certain behavior created empty accounts in old geth versions. +func (s *StateDB) CreateZombieIfDeleted(addr common.Address) { + if s.getStateObject(addr) == nil { + if _, destructed := s.stateObjectsDestruct[addr]; destructed { + s.createZombie(addr) + } + } +} + func NewDeterministic(root common.Hash, db Database) (*StateDB, error) { sdb, err := New(root, db, nil) if err != nil { diff --git a/core/vm/interface.go b/core/vm/interface.go index 0818f75b32..49ca835fba 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -44,6 +44,9 @@ type StateDB interface { AddStylusPages(new uint16) (uint16, uint16) AddStylusPagesEver(new uint16) + // Arbitrum: preserve old empty account behavior + CreateZombieIfDeleted(common.Address) + Deterministic() bool Database() state.Database From 5cbfbafeea2e13345236508154a34cadd66492e6 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 9 Oct 2024 22:46:00 -0500 Subject: [PATCH 59/64] Clarify CreateZombieIfDeleted comment --- core/state/statedb_arbitrum.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state/statedb_arbitrum.go b/core/state/statedb_arbitrum.go index 73bb5c77df..de223605f4 100644 --- a/core/state/statedb_arbitrum.go +++ b/core/state/statedb_arbitrum.go @@ -134,7 +134,7 @@ func (s *StateDB) AddStylusPagesEver(new uint16) { s.arbExtraData.everWasmPages = common.SaturatingUAdd(s.arbExtraData.everWasmPages, new) } -// Arbitrum: certain behavior created empty accounts in old geth versions. +// Arbitrum: preserve empty account behavior from old geth and ArbOS versions. func (s *StateDB) CreateZombieIfDeleted(addr common.Address) { if s.getStateObject(addr) == nil { if _, destructed := s.stateObjectsDestruct[addr]; destructed { From 190c916ad8e04105847733d724422b9b923447b6 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Tue, 12 Nov 2024 15:51:42 +0530 Subject: [PATCH 60/64] Changes based on PR comments --- core/blockchain.go | 11 +++++------ core/state/statedb.go | 6 +++--- core/state_transition.go | 1 + 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 60e956b4d8..1524ace48b 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -97,11 +97,10 @@ var ( ) const ( - bodyCacheLimit = 256 - blockCacheLimit = 256 - receiptsCacheLimit = 32 - txLookupCacheLimit = 1024 - DefaultTriesInMemory = 128 + bodyCacheLimit = 256 + blockCacheLimit = 256 + receiptsCacheLimit = 32 + txLookupCacheLimit = 1024 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. // @@ -186,7 +185,7 @@ func (c *CacheConfig) triedbConfig(isVerkle bool) *triedb.Config { var defaultCacheConfig = &CacheConfig{ // Arbitrum Config Options - TriesInMemory: DefaultTriesInMemory, + TriesInMemory: state.DefaultTriesInMemory, TrieRetention: 30 * time.Minute, MaxNumberOfBlocksToSkipStateSaving: 0, MaxAmountOfGasToSkipStateSaving: 0, diff --git a/core/state/statedb.go b/core/state/statedb.go index 9a4f31c4d9..9a5a6ee8f2 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -43,7 +43,7 @@ import ( ) // TriesInMemory represents the number of layers that are kept in RAM. -const TriesInMemory = 128 +const DefaultTriesInMemory = 128 type revision struct { id int @@ -1374,8 +1374,8 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er // - head layer is paired with HEAD state // - head-1 layer is paired with HEAD-1 state // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state - if err := s.snaps.Cap(root, TriesInMemory); err != nil { - log.Warn("Failed to cap snapshot tree", "root", root, "layers", TriesInMemory, "err", err) + if err := s.snaps.Cap(root, DefaultTriesInMemory); err != nil { + log.Warn("Failed to cap snapshot tree", "root", root, "layers", DefaultTriesInMemory, "err", err) } } s.SnapshotCommits += time.Since(start) diff --git a/core/state_transition.go b/core/state_transition.go index 21d542ea8c..dec1f1f807 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -524,6 +524,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { fee := new(uint256.Int).SetUint64(st.gasUsed()) fee.Mul(fee, effectiveTipU256) st.state.AddBalance(st.evm.Context.Coinbase, fee, tracing.BalanceIncreaseRewardTransactionFee) + tipAmount = fee.ToBig() } // Arbitrum: record the tip From 9cf4acc45a2b0cea723e8323d2b811e358b04ca0 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Tue, 12 Nov 2024 15:57:04 +0530 Subject: [PATCH 61/64] fix lint --- core/blockchain_test.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 16fd44407d..5ef864b9de 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -1712,7 +1712,7 @@ func TestTrieForkGC(t *testing.T) { Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee), } - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) + genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.DefaultTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) // Generate a bunch of fork blocks, each side forking from the canonical chain forks := make([]*types.Block, len(blocks)) @@ -1740,7 +1740,7 @@ func TestTrieForkGC(t *testing.T) { } } // Dereference all the recent tries and ensure no past trie is left in - for i := 0; i < state.TriesInMemory; i++ { + for i := 0; i < state.DefaultTriesInMemory; i++ { chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) } @@ -1764,8 +1764,8 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) - competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) + original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.DefaultTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) + competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.DefaultTriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) // Import the shared chain and the original canonical one db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) @@ -1804,7 +1804,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { } // In path-based trie database implementation, it will keep 128 diff + 1 disk // layers, totally 129 latest states available. In hash-based it's 128. - states := state.TriesInMemory + states := state.DefaultTriesInMemory if scheme == rawdb.PathScheme { states = states + 1 } @@ -1972,7 +1972,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) { } // We must use a pretty long chain to ensure that the fork doesn't overtake us // until after at least 128 blocks post tip - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*state.TriesInMemory, func(i int, b *BlockGen) { + genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*state.DefaultTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) b.OffsetTime(-9) }) @@ -1992,7 +1992,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) { } // Generate fork chain, starting from an early block parent := blocks[10] - fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*state.TriesInMemory, func(i int, b *BlockGen) { + fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*state.DefaultTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) @@ -2055,7 +2055,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon // Set the terminal total difficulty in the config gspec.Config.TerminalTotalDifficulty = big.NewInt(0) } - genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) { + genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*state.DefaultTriesInMemory, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key) if err != nil { t.Fatalf("failed to create tx: %v", err) @@ -2070,9 +2070,9 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon t.Fatalf("block %d: failed to insert into chain: %v", n, err) } - lastPrunedIndex := len(blocks) - state.TriesInMemory - 1 + lastPrunedIndex := len(blocks) - state.DefaultTriesInMemory - 1 lastPrunedBlock := blocks[lastPrunedIndex] - firstNonPrunedBlock := blocks[len(blocks)-state.TriesInMemory] + firstNonPrunedBlock := blocks[len(blocks)-state.DefaultTriesInMemory] // Verify pruning of lastPrunedBlock if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) { @@ -2099,7 +2099,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon // Generate fork chain, make it longer than canon parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock parent := blocks[parentIndex] - fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*state.TriesInMemory, func(i int, b *BlockGen) { + fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*state.DefaultTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) if int(b.header.Number.Uint64()) >= mergeBlock { b.SetPoS() @@ -2742,7 +2742,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } // Generate and import the canonical chain - _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, nil) + _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.DefaultTriesInMemory, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), nil, genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { @@ -2755,9 +2755,9 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { } // In path-based trie database implementation, it will keep 128 diff + 1 disk // layers, totally 129 latest states available. In hash-based it's 128. - states := state.TriesInMemory + states := state.DefaultTriesInMemory if scheme == rawdb.PathScheme { - states = state.TriesInMemory + 1 + states = state.DefaultTriesInMemory + 1 } lastPrunedIndex := len(blocks) - states - 1 lastPrunedBlock := blocks[lastPrunedIndex] From d0aed8532ac68f505eff9c43d593cb403a2f9caf Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Tue, 12 Nov 2024 16:30:49 +0530 Subject: [PATCH 62/64] undo test changes --- eth/tracers/internal/tracetest/calltrace_test.go | 6 +++--- .../internal/tracetest/testdata/call_tracer/blob_tx.json | 2 +- .../internal/tracetest/testdata/call_tracer/create.json | 2 +- .../internal/tracetest/testdata/call_tracer/deep_calls.json | 2 +- .../tracetest/testdata/call_tracer/delegatecall.json | 2 +- .../testdata/call_tracer/inner_create_oog_outer_throw.json | 2 +- .../tracetest/testdata/call_tracer/inner_instafail.json | 2 +- .../tracetest/testdata/call_tracer/inner_revert_reason.json | 2 +- .../testdata/call_tracer/inner_throw_outer_revert.json | 2 +- .../internal/tracetest/testdata/call_tracer/oog.json | 2 +- .../internal/tracetest/testdata/call_tracer/revert.json | 2 +- .../tracetest/testdata/call_tracer/revert_reason.json | 2 +- .../tracetest/testdata/call_tracer/selfdestruct.json | 2 +- .../internal/tracetest/testdata/call_tracer/simple.json | 2 +- .../tracetest/testdata/call_tracer/simple_onlytop.json | 2 +- .../internal/tracetest/testdata/call_tracer/throw.json | 2 +- .../tracetest/testdata/call_tracer_withLog/calldata.json | 2 +- .../testdata/call_tracer_withLog/delegatecall.json | 2 +- .../call_tracer_withLog/frontier_create_outofstorage.json | 2 +- .../testdata/call_tracer_withLog/multi_contracts.json | 2 +- .../tracetest/testdata/call_tracer_withLog/multilogs.json | 2 +- .../tracetest/testdata/call_tracer_withLog/notopic.json | 2 +- .../tracetest/testdata/call_tracer_withLog/simple.json | 2 +- .../tracetest/testdata/call_tracer_withLog/tx_failed.json | 2 +- .../testdata/call_tracer_withLog/tx_partial_failed.json | 2 +- .../testdata/call_tracer_withLog/with_onlyTopCall.json | 2 +- 26 files changed, 28 insertions(+), 28 deletions(-) diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index 3a4f4f9ee7..a682cf06af 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -301,13 +301,13 @@ func TestInternals(t *testing.T) { byte(vm.CALL), }, tracer: mkTracer("callTracer", nil), - want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0xe3a8"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x0"}],"from":"%s","gas":"0x13880","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0xe01a","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), + want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0xe3a8"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x54d8"}],"from":"%s","gas":"0x13880","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0xe01a","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), }, { name: "Stack depletion in LOG0", code: []byte{byte(vm.LOG3)}, tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), - want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0x0"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x0"}],"from":"%s","gas":"0x13880","gasUsed":"0x13880","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), + want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0x0"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x13880"}],"from":"%s","gas":"0x13880","gasUsed":"0x13880","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), }, { name: "Mem expansion in LOG0", @@ -320,7 +320,7 @@ func TestInternals(t *testing.T) { byte(vm.LOG0), }, tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), - want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0xdce2"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x0"}],"from":"%s","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","position":"0x0"}],"value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), + want: fmt.Sprintf(`{"beforeEVMTransfers":[{"purpose":"feePayment","from":"%s","to":null,"value":"0x13880"}],"afterEVMTransfers":[{"purpose":"gasRefund","from":null,"to":"%s","value":"0xdce2"},{"purpose":"tip","from":null,"to":"0x0000000000000000000000000000000000000000","value":"0x5b9e"}],"from":"%s","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","position":"0x0"}],"value":"0x0","type":"CALL"}`, origin.Hex(), origin.Hex(), originHex), }, { // Leads to OOM on the prestate tracer diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json b/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json index a78775b53a..9b48434e11 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/blob_tx.json @@ -77,7 +77,7 @@ "purpose": "tip", "from": null, "to": "0x0000000000000000000000000000000000000000", - "value": "0x0" + "value": "0x200b20" } ], "from": "0x0c2c51a0990aee1d73c1228de158688341557508", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/create.json b/eth/tracers/internal/tracetest/testdata/call_tracer/create.json index 39a82690c5..903ff57c57 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/create.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/create.json @@ -65,7 +65,7 @@ "purpose": "tip", "from": null, "to": "0xD049bfd667cB46Aa3Ef5Df0dA3e57DB3Be39E511", - "value": "0x0" + "value": "0x2a0383e2a65c00" } ], "from": "0x13e4acefe6a6700604929946e70e6443e4e73447", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json b/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json index 0c39115b14..a5cca5d434 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json @@ -128,7 +128,7 @@ "purpose": "tip", "from": null, "to": "0x1977C248e1014Cc103929Dd7f154199C916E39Ec", - "value": "0x0" + "value": "0x700fefccd9800" } ], "calls": [ diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json index 04a22cf363..26788a6ce4 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json @@ -81,7 +81,7 @@ "purpose": "tip", "from": null, "to": "0x5659922cE141EedBC2733678f9806c77b4eEBEE8", - "value": "0x0" + "value": "0x371a55e8d6800" } ], "calls": [ diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json index c449347bae..41d541761f 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json @@ -73,7 +73,7 @@ "purpose": "tip", "from": null, "to": "0x1585936b53834b021f68CC13eEeFdEc2EfC8e724", - "value": "0x0" + "value": "0x2aa1efb94e0000" } ], "calls": [ diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json index bf52a05f3e..460c3d91b8 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json @@ -69,7 +69,7 @@ "purpose": "tip", "from": null, "to": "0xc822ef32e6d26E170B70Cf761e204c1806265914", - "value": "0x0" + "value": "0x216e3433f7200" } ], "type": "CALL", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json index f792d62cf1..2e06a33f5f 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json @@ -69,7 +69,7 @@ "purpose": "tip", "from": null, "to": "0x0000000000000000000000000000000000000000", - "value": "0x0" + "value": "0x15bd3c0591000" } ], "from": "0x3623191d4ccfbbdf09e8ebf6382a1f8257417bc1", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json index 6b5d6cac36..9ebd222e29 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json @@ -76,7 +76,7 @@ "purpose": "tip", "from": null, "to": "0x00d8Ae40D9a06d0E7a2877B62E32eB959Afbe16D", - "value": "0x0" + "value": "0x25ed5d81941000" } ], "calls": [ diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json b/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json index 84e4ba7626..8f11c63911 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json @@ -67,7 +67,7 @@ "purpose": "tip", "from": null, "to": "0xD049bfd667cB46Aa3Ef5Df0dA3e57DB3Be39E511", - "value": "0x0" + "value": "0x49874422212000" } ], "error": "out of gas", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json index ebeab98fe3..1ea1a8a55a 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json @@ -65,7 +65,7 @@ "purpose": "tip", "from": null, "to": "0xF4D8e706CfB25c0DECBbDd4D2E2Cc10C66376a3F", - "value": "0x0" + "value": "0x2954557199e00" } ], "error": "execution reverted", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json b/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json index 1a234038be..610ecc9b4a 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json @@ -70,7 +70,7 @@ "purpose": "tip", "from": null, "to": "0x0000000000000000000000000000000000000000", - "value": "0x0" + "value": "0x198f3fb76000" } ], "error": "execution reverted", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json b/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json index 67237b44a1..eb16322eae 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json @@ -71,7 +71,7 @@ "purpose": "tip", "from": null, "to": "0x1585936b53834b021f68CC13eEeFdEc2EfC8e724", - "value": "0x0" + "value": "0x30cdd67dd6400" }, { "purpose": "selfDestruct", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json b/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json index db8953b564..92f7a68a18 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json @@ -76,7 +76,7 @@ "purpose": "tip", "from": null, "to": "0x1585936b53834b021f68CC13eEeFdEc2EfC8e724", - "value": "0x0" + "value": "0x420eed1bd6c00" } ], "calls": [ diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json b/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json index 317913cdbc..deb7d5cddf 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json @@ -79,7 +79,7 @@ "purpose": "tip", "from": null, "to": "0x1585936b53834b021f68CC13eEeFdEc2EfC8e724", - "value": "0x0" + "value": "0x420eed1bd6c00" } ], "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json index 4407bc69d0..4bdfbd349b 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json @@ -69,7 +69,7 @@ "purpose": "tip", "from": null, "to": "0x294e5d6c39A36Ce38aF1DCA70c1060f78Dee8070", - "value": "0x0" + "value": "0x11c37937e08000" } ], "error": "invalid jump destination", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json index b4b248f0eb..53d561f395 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json @@ -95,7 +95,7 @@ "purpose": "tip", "from": null, "to": "0xf8b483DbA2c3B7176a3Da549ad41A48BB3121069", - "value": "0x0" + "value": "0x119b54eb98e126" } ], "from": "0x4f5777744b500616697cb655dcb02ee6cd51deb5", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json index fb089b01e4..288043d3da 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json @@ -152,7 +152,7 @@ "purpose": "tip", "from": null, "to": "0x61C808D82A3Ac53231750daDc13c777b59310bD9", - "value": "0x0" + "value": "0xb9e774e8d5800" } ], "from": "0x3de712784baf97260455ae25fb74f574ec9c1add", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json index 1e87fdae85..b2e1ea79cd 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json @@ -98,7 +98,7 @@ "purpose": "tip", "from": null, "to": "0xe48430c4E88A929bBa0eE3DCe284866a9937b609", - "value": "0x0" + "value": "0x6a8fb3d104cc00" } ], "from": "0x0047a8033cc6d6ca2ed5044674fd421f44884de8", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json index 9594749fbc..df7f7a8318 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json @@ -317,7 +317,7 @@ "purpose": "tip", "from": null, "to": "0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8", - "value": "0x0" + "value": "0xe25384e63aa600" } ], "from": "0xbe3ae5cb97c253dda67181c6e34e43f5c275e08b", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json index f1cad828a5..55848ae7ab 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json @@ -185,7 +185,7 @@ "purpose": "tip", "from": null, "to": "0x2a65Aca4D5fC5B5C859090a6c34d164135398226", - "value": "0x0" + "value": "0x1b3dd214f1b8c00" } ], "from": "0x3fcb0342353c541e210013aaddc2e740b9a33d08", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json index 8484e12bf4..f2316eb1b9 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json @@ -120,7 +120,7 @@ "purpose": "tip", "from": null, "to": "0x61C808D82A3Ac53231750daDc13c777b59310bD9", - "value": "0x0" + "value": "0xb30d0148e0600" } ], "from": "0x6412becf35cc7e2a9e7e47966e443f295e1e4f4a", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json index 5b031011f3..82d0596304 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json @@ -81,7 +81,7 @@ "purpose": "tip", "from": null, "to": "0xe2fE6B13287f28E193333fDfe7Fedf2f6Df6124A", - "value": "0x0" + "value": "0x90886d609c400" } ], "from": "0xd1220a0cf47c7b9be7a2e6ba89f429762e7b9adb", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json index c0157602cd..730fea23e1 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json @@ -155,7 +155,7 @@ "purpose": "tip", "from": null, "to": "0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8", - "value": "0x0" + "value": "0x81fc72632f7340" } ], "from": "0xe6002189a74b43e6868b20c1311bc108e38aac57", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json index fbe1bfc2d4..8a46028598 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json @@ -93,7 +93,7 @@ "purpose": "tip", "from": null, "to": "0x61C808D82A3Ac53231750daDc13c777b59310bD9", - "value": "0x0" + "value": "0xbcd242cec0800" } ], "from": "0x01115b41bd2731353dd3e6abf44818fdc035aaf1", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json index e4c05184ed..50b3b0b276 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json @@ -96,7 +96,7 @@ "purpose": "tip", "from": null, "to": "0xf8b483DbA2c3B7176a3Da549ad41A48BB3121069", - "value": "0x0" + "value": "0x119b54eb98e126" } ], "from": "0x4f5777744b500616697cb655dcb02ee6cd51deb5", From 2c34a6d3869bd72f74cd46263434f438de321f7c Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Tue, 19 Nov 2024 15:24:05 +0530 Subject: [PATCH 63/64] Changes based on PR comments --- core/state/statedb.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 9a5a6ee8f2..acdad20481 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1342,11 +1342,14 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er s.arbExtraData.activatedWasms = make(map[common.Hash]ActivatedWasm) } - if wasmCodeWriter.ValueSize() > 0 { - if err := wasmCodeWriter.Write(); err != nil { - log.Crit("Failed to commit dirty stylus codes", "error", err) + workers.Go(func() error { + if wasmCodeWriter.ValueSize() > 0 { + if err := wasmCodeWriter.Write(); err != nil { + log.Crit("Failed to commit dirty stylus codes", "error", err) + } } - } + return nil + }) // Wait for everything to finish and update the metrics if err := workers.Wait(); err != nil { return common.Hash{}, err From 0844684605ef6a151c5afd9e38b5ee3cafea0819 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 17 Dec 2024 13:12:36 -0700 Subject: [PATCH 64/64] Revert accidental change to tipReceipient --- core/state_transition.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state_transition.go b/core/state_transition.go index dec1f1f807..8c01b2c285 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -523,7 +523,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } else { fee := new(uint256.Int).SetUint64(st.gasUsed()) fee.Mul(fee, effectiveTipU256) - st.state.AddBalance(st.evm.Context.Coinbase, fee, tracing.BalanceIncreaseRewardTransactionFee) + st.state.AddBalance(tipReceipient, fee, tracing.BalanceIncreaseRewardTransactionFee) tipAmount = fee.ToBig() }