From cebea6fb233da8ba7b348c68525df7d7e3068757 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 02:24:52 +0000 Subject: [PATCH 01/16] add storage keys caching --- arbos/addressSet/addressSet.go | 4 +- arbos/addressTable/addressTable.go | 2 +- arbos/arbosState/arbosstate.go | 32 +++++------ arbos/arbosState/arbosstate_test.go | 2 +- arbos/blockhash/blockhash.go | 2 +- arbos/l1pricing/batchPoster.go | 8 +-- arbos/l1pricing/l1pricing.go | 4 +- arbos/queue_test.go | 2 +- arbos/retryables/retryable.go | 14 ++--- arbos/storage/queue.go | 2 +- arbos/storage/storage.go | 63 ++++++++++++++++++---- util/containers/safe_lru.go | 82 +++++++++++++++++++++++++++++ 12 files changed, 171 insertions(+), 46 deletions(-) create mode 100644 util/containers/safe_lru.go diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index ae2e6a34c1..72200c0373 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -24,9 +24,9 @@ func Initialize(sto *storage.Storage) error { func OpenAddressSet(sto *storage.Storage) *AddressSet { return &AddressSet{ - sto, + sto.NoCacheCopy(), sto.OpenStorageBackedUint64(0), - sto.OpenSubStorage([]byte{0}), + sto.OpenSubStorage([]byte{0}, false), } } diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index 220c2700f4..56f04badff 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -25,7 +25,7 @@ func Initialize(sto *storage.Storage) { func Open(sto *storage.Storage) *AddressTable { numItems := sto.OpenStorageBackedUint64(0) - return &AddressTable{sto, sto.OpenSubStorage([]byte{}), numItems} + return &AddressTable{sto.NoCacheCopy(), sto.OpenSubStorage([]byte{}, false), numItems} } func (atab *AddressTable) Register(addr common.Address) (uint64, error) { diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 2bea8f7c54..93ec1ec469 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -71,13 +71,13 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error) backingStorage.OpenStorageBackedUint64(uint64(upgradeVersionOffset)), backingStorage.OpenStorageBackedUint64(uint64(upgradeTimestampOffset)), backingStorage.OpenStorageBackedAddress(uint64(networkFeeAccountOffset)), - l1pricing.OpenL1PricingState(backingStorage.OpenSubStorage(l1PricingSubspace)), - l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace)), - retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace), stateDB), - addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace)), - addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace)), - merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace)), - blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace)), + l1pricing.OpenL1PricingState(backingStorage.OpenSubStorage(l1PricingSubspace, true)), + l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace, true)), + retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace, true), stateDB), + addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace, true)), + addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace, true)), + merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace, true)), + blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace, true)), backingStorage.OpenStorageBackedBigInt(uint64(chainIdOffset)), backingStorage.OpenStorageBackedBytes(chainConfigSubspace), backingStorage.OpenStorageBackedUint64(uint64(genesisBlockNumOffset)), @@ -220,14 +220,14 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p if desiredArbosVersion >= 2 { initialRewardsRecipient = initialChainOwner } - _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) - _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace)) - _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace)) - addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace)) - merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace)) - blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace)) - - ownersStorage := sto.OpenSubStorage(chainOwnerSubspace) + _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace, true), initialRewardsRecipient, initMessage.InitialL1BaseFee) + _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace, true)) + _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace, true)) + addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace, true)) + merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace, true)) + blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace, true)) + + ownersStorage := sto.OpenSubStorage(chainOwnerSubspace, true) _ = addressSet.Initialize(ownersStorage) _ = addressSet.OpenAddressSet(ownersStorage).Add(initialChainOwner) @@ -385,7 +385,7 @@ func (state *ArbosState) ChainOwners() *addressSet.AddressSet { func (state *ArbosState) SendMerkleAccumulator() *merkleAccumulator.MerkleAccumulator { if state.sendMerkle == nil { - state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenSubStorage(sendMerkleSubspace)) + state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenSubStorage(sendMerkleSubspace, true)) } return state.sendMerkle } diff --git a/arbos/arbosState/arbosstate_test.go b/arbos/arbosState/arbosstate_test.go index c4643c9183..384fc9c72f 100644 --- a/arbos/arbosState/arbosstate_test.go +++ b/arbos/arbosState/arbosstate_test.go @@ -64,7 +64,7 @@ func TestStorageBackedInt64(t *testing.T) { func TestStorageSlots(t *testing.T) { state, _ := NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}) + sto := state.BackingStorage().OpenSubStorage([]byte{}, true) println("nil address", colors.Blue, storage.NilAddressRepresentation.String(), colors.Clear) diff --git a/arbos/blockhash/blockhash.go b/arbos/blockhash/blockhash.go index 2eedf7f5bb..99fb3ef470 100644 --- a/arbos/blockhash/blockhash.go +++ b/arbos/blockhash/blockhash.go @@ -21,7 +21,7 @@ func InitializeBlockhashes(backingStorage *storage.Storage) { } func OpenBlockhashes(backingStorage *storage.Storage) *Blockhashes { - return &Blockhashes{backingStorage, backingStorage.OpenStorageBackedUint64(0)} + return &Blockhashes{backingStorage.NoCacheCopy(), backingStorage.OpenStorageBackedUint64(0)} } func (bh *Blockhashes) L1BlockNumber() (uint64, error) { diff --git a/arbos/l1pricing/batchPoster.go b/arbos/l1pricing/batchPoster.go index 97b7b16234..4ac86307ec 100644 --- a/arbos/l1pricing/batchPoster.go +++ b/arbos/l1pricing/batchPoster.go @@ -42,13 +42,13 @@ func InitializeBatchPostersTable(storage *storage.Storage) error { if err := totalFundsDue.SetChecked(common.Big0); err != nil { return err } - return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey)) + return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey, true)) } func OpenBatchPostersTable(storage *storage.Storage) *BatchPostersTable { return &BatchPostersTable{ - posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey)), - posterInfo: storage.OpenSubStorage(PosterInfoKey), + posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey, true)), + posterInfo: storage.OpenSubStorage(PosterInfoKey, false), totalFundsDue: storage.OpenStorageBackedBigInt(totalFundsDueOffset), } } @@ -68,7 +68,7 @@ func (bpt *BatchPostersTable) OpenPoster(poster common.Address, createIfNotExist } func (bpt *BatchPostersTable) internalOpen(poster common.Address) *BatchPosterState { - bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes()) + bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes(), false) return &BatchPosterState{ fundsDue: bpStorage.OpenStorageBackedBigInt(0), payTo: bpStorage.OpenStorageBackedAddress(1), diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 9772ac028b..0d6bca7bf9 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -82,7 +82,7 @@ var InitialEquilibrationUnitsV0 = arbmath.UintToBig(60 * params.TxDataNonZeroGas var InitialEquilibrationUnitsV6 = arbmath.UintToBig(params.TxDataNonZeroGasEIP2028 * 10000000) func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient common.Address, initialL1BaseFee *big.Int) error { - bptStorage := sto.OpenSubStorage(BatchPosterTableKey) + bptStorage := sto.OpenSubStorage(BatchPosterTableKey, true) if err := InitializeBatchPostersTable(bptStorage); err != nil { return err } @@ -117,7 +117,7 @@ func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient comm func OpenL1PricingState(sto *storage.Storage) *L1PricingState { return &L1PricingState{ sto, - OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey)), + OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey, true)), sto.OpenStorageBackedAddress(payRewardsToOffset), sto.OpenStorageBackedBigUint(equilibrationUnitsOffset), sto.OpenStorageBackedUint64(inertiaOffset), diff --git a/arbos/queue_test.go b/arbos/queue_test.go index d8d491bdb0..abeec49a93 100644 --- a/arbos/queue_test.go +++ b/arbos/queue_test.go @@ -14,7 +14,7 @@ import ( func TestQueue(t *testing.T) { state, statedb := arbosState.NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}) + sto := state.BackingStorage().OpenSubStorage([]byte{}, true) Require(t, storage.InitializeQueue(sto)) q := storage.OpenQueue(sto) diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index abea2ab7bd..0322938541 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -31,13 +31,13 @@ var ( ) func InitializeRetryableState(sto *storage.Storage) error { - return storage.InitializeQueue(sto.OpenSubStorage(timeoutQueueKey)) + return storage.InitializeQueue(sto.OpenSubStorage(timeoutQueueKey, true)) } func OpenRetryableState(sto *storage.Storage, statedb vm.StateDB) *RetryableState { return &RetryableState{ sto, - storage.OpenQueue(sto.OpenSubStorage(timeoutQueueKey)), + storage.OpenQueue(sto.OpenSubStorage(timeoutQueueKey, true)), } } @@ -73,7 +73,7 @@ func (rs *RetryableState) CreateRetryable( beneficiary common.Address, calldata []byte, ) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes()) + sto := rs.retryables.OpenSubStorage(id.Bytes(), false) ret := &Retryable{ id, sto, @@ -100,7 +100,7 @@ func (rs *RetryableState) CreateRetryable( } func (rs *RetryableState) OpenRetryable(id common.Hash, currentTimestamp uint64) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes()) + sto := rs.retryables.OpenSubStorage(id.Bytes(), false) timeoutStorage := sto.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if timeout == 0 || timeout < currentTimestamp || err != nil { @@ -134,7 +134,7 @@ func (rs *RetryableState) RetryableSizeBytes(id common.Hash, currentTime uint64) } func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario util.TracingScenario) (bool, error) { - retStorage := rs.retryables.OpenSubStorage(id.Bytes()) + retStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) timeout, err := retStorage.GetByUint64(timeoutOffset) if timeout == (common.Hash{}) || err != nil { return false, err @@ -157,7 +157,7 @@ func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario _ = retStorage.ClearByUint64(beneficiaryOffset) _ = retStorage.ClearByUint64(timeoutOffset) _ = retStorage.ClearByUint64(timeoutWindowsLeftOffset) - err = retStorage.OpenSubStorage(calldataKey).ClearBytes() + err = retStorage.OpenSubStorage(calldataKey, false).ClearBytes() return true, err } @@ -291,7 +291,7 @@ func (rs *RetryableState) TryToReapOneRetryable(currentTimestamp uint64, evm *vm if err != nil || id == nil { return err } - retryableStorage := rs.retryables.OpenSubStorage(id.Bytes()) + retryableStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) timeoutStorage := retryableStorage.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if err != nil { diff --git a/arbos/storage/queue.go b/arbos/storage/queue.go index 55231d3a90..032ac11aad 100644 --- a/arbos/storage/queue.go +++ b/arbos/storage/queue.go @@ -25,7 +25,7 @@ func InitializeQueue(sto *Storage) error { func OpenQueue(sto *Storage) *Queue { return &Queue{ - sto, + sto.NoCacheCopy(), sto.OpenStorageBackedUint64(0), sto.OpenStorageBackedUint64(1), } diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 478ad68f8f..b93c835ff0 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -4,6 +4,7 @@ package storage import ( + "bytes" "fmt" "math/big" @@ -17,6 +18,7 @@ import ( "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/containers" ) // Storage allows ArbOS to store data persistently in the Ethereum-compatible stateDB. This is represented in @@ -43,12 +45,18 @@ type Storage struct { db vm.StateDB storageKey []byte burner burn.Burner + hashCache *containers.SafeLruCache[string, []byte] } const StorageReadCost = params.SloadGasEIP2200 const StorageWriteCost = params.SstoreSetGasEIP2200 const StorageWriteZeroCost = params.SstoreResetGasEIP2200 +const storageKeyCacheSize = 1024 + +// TODO(magic) rename? +var storageHashCache = containers.NewSafeLruCache[string, []byte](storageKeyCacheSize) + // NewGeth uses a Geth database to create an evm key-value store func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { account := common.HexToAddress("0xA4B05FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") @@ -58,6 +66,7 @@ func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { db: statedb, storageKey: []byte{}, burner: burner, + hashCache: storageHashCache, } } @@ -81,15 +90,16 @@ func NewMemoryBackedStateDB() vm.StateDB { // a page, to preserve contiguity within a page. This will reduce cost if/when Ethereum switches to storage // representations that reward contiguity. // Because page numbers are 248 bits, this gives us 124-bit security against collision attacks, which is good enough. -func mapAddress(storageKey []byte, key common.Hash) common.Hash { +func (store *Storage) mapAddress(storageKey []byte, key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 - return common.BytesToHash( + mapped := common.BytesToHash( append( - crypto.Keccak256(storageKey, keyBytes[:boundary])[:boundary], + store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary], keyBytes[boundary], ), ) + return mapped } func writeCost(value common.Hash) uint64 { @@ -111,11 +121,11 @@ func (store *Storage) Get(key common.Hash) (common.Hash, error) { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageGet(key) } - return store.db.GetState(store.account, mapAddress(store.storageKey, key)), nil + return store.db.GetState(store.account, store.mapAddress(store.storageKey, key)), nil } func (store *Storage) GetStorageSlot(key common.Hash) common.Hash { - return mapAddress(store.storageKey, key) + return store.mapAddress(store.storageKey, key) } func (store *Storage) GetUint64(key common.Hash) (uint64, error) { @@ -143,7 +153,7 @@ func (store *Storage) Set(key common.Hash, value common.Hash) error { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageSet(key, value) } - store.db.SetState(store.account, mapAddress(store.storageKey, key), value) + store.db.SetState(store.account, store.mapAddress(store.storageKey, key), value) return nil } @@ -171,12 +181,27 @@ func (store *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, return oldValue, store.Set(key, newValue) } -func (store *Storage) OpenSubStorage(id []byte) *Storage { +func (store *Storage) OpenSubStorage(id []byte, cacheKeys bool) *Storage { + var hashCache *containers.SafeLruCache[string, []byte] + if cacheKeys { + hashCache = storageHashCache + } return &Storage{ store.account, store.db, - crypto.Keccak256(store.storageKey, id), + store.cachedKeccak(store.storageKey, id), store.burner, + hashCache, + } +} + +func (store *Storage) NoCacheCopy() *Storage { + return &Storage{ + store.account, + store.db, + store.storageKey, + store.burner, + nil, } } @@ -266,6 +291,24 @@ func (store *Storage) Keccak(data ...[]byte) ([]byte, error) { return crypto.Keccak256(data...), nil } +func (store *Storage) cachedKeccak(data ...[]byte) []byte { + if store.hashCache == nil { + return crypto.Keccak256(data...) + } + keyString := string(bytes.Join(data, []byte{})) + hash, isCached := store.hashCache.Get(keyString) + if isCached { + return hash + } + // TODO(magic) we might miss the warning if concurrent Add will be before + if store.hashCache.Size()-store.hashCache.Len() == 1 { + log.Warn("Hash cache almost full, but we didn't expect that. We may be caching some non-static keys.") + } + hash = crypto.Keccak256(data...) + store.hashCache.Add(keyString, hash) + return hash +} + func (store *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { bytes, err := store.Keccak(data...) return common.BytesToHash(bytes), err @@ -279,7 +322,7 @@ type StorageSlot struct { } func (store *Storage) NewSlot(offset uint64) StorageSlot { - return StorageSlot{store.account, store.db, mapAddress(store.storageKey, util.UintToHash(offset)), store.burner} + return StorageSlot{store.account, store.db, store.mapAddress(store.storageKey, util.UintToHash(offset)), store.burner} } func (ss *StorageSlot) Get() (common.Hash, error) { @@ -590,7 +633,7 @@ type StorageBackedBytes struct { func (store *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { return StorageBackedBytes{ - *store.OpenSubStorage(id), + *store.OpenSubStorage(id, false), } } diff --git a/util/containers/safe_lru.go b/util/containers/safe_lru.go new file mode 100644 index 0000000000..40e7d993ec --- /dev/null +++ b/util/containers/safe_lru.go @@ -0,0 +1,82 @@ +package containers + +import ( + "sync" +) + +// thread safe version of containers.LruCache +type SafeLruCache[K comparable, V any] struct { + inner *LruCache[K, V] + mutex sync.RWMutex +} + +func NewSafeLruCache[K comparable, V any](size int) *SafeLruCache[K, V] { + return NewSafeLruCacheWithOnEvict[K, V](size, nil) +} + +func NewSafeLruCacheWithOnEvict[K comparable, V any](size int, onEvict func(K, V)) *SafeLruCache[K, V] { + return &SafeLruCache[K, V]{ + inner: NewLruCacheWithOnEvict(size, onEvict), + } +} + +// Returns true if an item was evicted +func (c *SafeLruCache[K, V]) Add(key K, value V) bool { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.inner.Add(key, value) +} + +func (c *SafeLruCache[K, V]) Get(key K) (V, bool) { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.inner.Get(key) +} + +func (c *SafeLruCache[K, V]) Contains(key K) bool { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.Contains(key) +} + +func (c *SafeLruCache[K, V]) Remove(key K) { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.Remove(key) +} + +func (c *SafeLruCache[K, V]) GetOldest() (K, V, bool) { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.GetOldest() +} + +func (c *SafeLruCache[K, V]) RemoveOldest() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.RemoveOldest() +} + +func (c *SafeLruCache[K, V]) Len() int { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.Len() +} + +func (c *SafeLruCache[K, V]) Size() int { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.Size() +} + +func (c *SafeLruCache[K, V]) Clear() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.Clear() +} + +func (c *SafeLruCache[K, V]) Resize(newSize int) { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.Resize(newSize) +} From 691a213eb0127a20d32114dfcd73a652a84c2ea4 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 16:42:33 +0000 Subject: [PATCH 02/16] add onetime warning when hash cache unexpectedly fills up --- arbos/storage/storage.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index b93c835ff0..39af072f0a 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -7,6 +7,7 @@ import ( "bytes" "fmt" "math/big" + "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -54,8 +55,8 @@ const StorageWriteZeroCost = params.SstoreResetGasEIP2200 const storageKeyCacheSize = 1024 -// TODO(magic) rename? var storageHashCache = containers.NewSafeLruCache[string, []byte](storageKeyCacheSize) +var cacheFullLogged atomic.Bool // NewGeth uses a Geth database to create an evm key-value store func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { @@ -300,12 +301,11 @@ func (store *Storage) cachedKeccak(data ...[]byte) []byte { if isCached { return hash } - // TODO(magic) we might miss the warning if concurrent Add will be before - if store.hashCache.Size()-store.hashCache.Len() == 1 { - log.Warn("Hash cache almost full, but we didn't expect that. We may be caching some non-static keys.") - } hash = crypto.Keccak256(data...) - store.hashCache.Add(keyString, hash) + evicted := store.hashCache.Add(keyString, hash) + if evicted && cacheFullLogged.CompareAndSwap(false, true) { + log.Warn("Hash cache full, we didn't expect that. Some non-static storage keys may fill up the cache.") + } return hash } From f802560c966b27eeba1b68a51585e1238ecb5787 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 18:44:13 +0000 Subject: [PATCH 03/16] fix race in mapAddress --- arbos/storage/storage.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 39af072f0a..f234e69d5a 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -94,13 +94,10 @@ func NewMemoryBackedStateDB() vm.StateDB { func (store *Storage) mapAddress(storageKey []byte, key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 - mapped := common.BytesToHash( - append( - store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary], - keyBytes[boundary], - ), - ) - return mapped + mapped := make([]byte, 0, common.HashLength) + mapped = append(mapped, store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary]...) + mapped = append(mapped, keyBytes[boundary]) + return common.BytesToHash(mapped) } func writeCost(value common.Hash) uint64 { @@ -292,13 +289,14 @@ func (store *Storage) Keccak(data ...[]byte) ([]byte, error) { return crypto.Keccak256(data...), nil } +// note: returned slice is not thread-safe func (store *Storage) cachedKeccak(data ...[]byte) []byte { if store.hashCache == nil { return crypto.Keccak256(data...) } keyString := string(bytes.Join(data, []byte{})) - hash, isCached := store.hashCache.Get(keyString) - if isCached { + hash, wasCached := store.hashCache.Get(keyString) + if wasCached { return hash } hash = crypto.Keccak256(data...) From 0fd07c6dbbee64e8c80f45125aec670c52ce7114 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 19:04:26 +0000 Subject: [PATCH 04/16] remove redundant mapAddress parameter --- arbos/storage/storage.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index f234e69d5a..66f3d49473 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -91,11 +91,11 @@ func NewMemoryBackedStateDB() vm.StateDB { // a page, to preserve contiguity within a page. This will reduce cost if/when Ethereum switches to storage // representations that reward contiguity. // Because page numbers are 248 bits, this gives us 124-bit security against collision attacks, which is good enough. -func (store *Storage) mapAddress(storageKey []byte, key common.Hash) common.Hash { +func (store *Storage) mapAddress(key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 mapped := make([]byte, 0, common.HashLength) - mapped = append(mapped, store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary]...) + mapped = append(mapped, store.cachedKeccak(store.storageKey, keyBytes[:boundary])[:boundary]...) mapped = append(mapped, keyBytes[boundary]) return common.BytesToHash(mapped) } @@ -119,11 +119,11 @@ func (store *Storage) Get(key common.Hash) (common.Hash, error) { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageGet(key) } - return store.db.GetState(store.account, store.mapAddress(store.storageKey, key)), nil + return store.db.GetState(store.account, store.mapAddress(key)), nil } func (store *Storage) GetStorageSlot(key common.Hash) common.Hash { - return store.mapAddress(store.storageKey, key) + return store.mapAddress(key) } func (store *Storage) GetUint64(key common.Hash) (uint64, error) { @@ -151,7 +151,7 @@ func (store *Storage) Set(key common.Hash, value common.Hash) error { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageSet(key, value) } - store.db.SetState(store.account, store.mapAddress(store.storageKey, key), value) + store.db.SetState(store.account, store.mapAddress(key), value) return nil } @@ -320,7 +320,7 @@ type StorageSlot struct { } func (store *Storage) NewSlot(offset uint64) StorageSlot { - return StorageSlot{store.account, store.db, store.mapAddress(store.storageKey, util.UintToHash(offset)), store.burner} + return StorageSlot{store.account, store.db, store.mapAddress(util.UintToHash(offset)), store.burner} } func (ss *StorageSlot) Get() (common.Hash, error) { From 001ae3179abb304ee87b3e36cdfc03414cefaf02 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 12 Oct 2023 22:05:10 +0200 Subject: [PATCH 05/16] address review comments --- arbos/addressSet/addressSet.go | 90 ++++++------- arbos/addressTable/addressTable.go | 2 +- arbos/arbosState/arbosstate.go | 32 ++--- arbos/arbosState/arbosstate_test.go | 2 +- arbos/blockhash/blockhash.go | 2 +- arbos/l1pricing/batchPoster.go | 8 +- arbos/l1pricing/l1pricing.go | 4 +- arbos/queue_test.go | 2 +- arbos/retryables/retryable.go | 14 +- arbos/storage/queue.go | 2 +- arbos/storage/storage.go | 200 +++++++++++++++------------- 11 files changed, 184 insertions(+), 174 deletions(-) diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 38b07f7a1b..50a43c36cf 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -26,49 +26,49 @@ func Initialize(sto *storage.Storage) error { func OpenAddressSet(sto *storage.Storage) *AddressSet { return &AddressSet{ - sto.NoCacheCopy(), - sto.OpenStorageBackedUint64(0), - sto.OpenSubStorage([]byte{0}, false), + backingStorage: sto.WithoutCache(), + size: sto.OpenStorageBackedUint64(0), + byAddress: sto.OpenSubStorage([]byte{0}), } } -func (aset *AddressSet) Size() (uint64, error) { - return aset.size.Get() +func (as *AddressSet) Size() (uint64, error) { + return as.size.Get() } -func (aset *AddressSet) IsMember(addr common.Address) (bool, error) { - value, err := aset.byAddress.Get(util.AddressToHash(addr)) +func (as *AddressSet) IsMember(addr common.Address) (bool, error) { + value, err := as.byAddress.Get(util.AddressToHash(addr)) return value != (common.Hash{}), err } -func (aset *AddressSet) GetAnyMember() (*common.Address, error) { - size, err := aset.size.Get() +func (as *AddressSet) GetAnyMember() (*common.Address, error) { + size, err := as.size.Get() if err != nil || size == 0 { return nil, err } - sba := aset.backingStorage.OpenStorageBackedAddressOrNil(1) + sba := as.backingStorage.OpenStorageBackedAddressOrNil(1) addr, err := sba.Get() return addr, err } -func (aset *AddressSet) Clear() error { - size, err := aset.size.Get() +func (as *AddressSet) Clear() error { + size, err := as.size.Get() if err != nil || size == 0 { return err } for i := uint64(1); i <= size; i++ { - contents, _ := aset.backingStorage.GetByUint64(i) - _ = aset.backingStorage.ClearByUint64(i) - err = aset.byAddress.Clear(contents) + contents, _ := as.backingStorage.GetByUint64(i) + _ = as.backingStorage.ClearByUint64(i) + err = as.byAddress.Clear(contents) if err != nil { return err } } - return aset.size.Clear() + return as.size.Clear() } -func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error) { - size, err := aset.size.Get() +func (as *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error) { + size, err := as.size.Get() if err != nil { return nil, err } @@ -77,7 +77,7 @@ func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, err } ret := make([]common.Address, size) for i := range ret { - sba := aset.backingStorage.OpenStorageBackedAddress(uint64(i + 1)) + sba := as.backingStorage.OpenStorageBackedAddress(uint64(i + 1)) ret[i], err = sba.Get() if err != nil { return nil, err @@ -86,22 +86,22 @@ func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, err return ret, nil } -func (aset *AddressSet) ClearList() error { - size, err := aset.size.Get() +func (as *AddressSet) ClearList() error { + size, err := as.size.Get() if err != nil || size == 0 { return err } for i := uint64(1); i <= size; i++ { - err = aset.backingStorage.ClearByUint64(i) + err = as.backingStorage.ClearByUint64(i) if err != nil { return err } } - return aset.size.Clear() + return as.size.Clear() } -func (aset *AddressSet) RectifyMapping(addr common.Address) error { - isOwner, err := aset.IsMember(addr) +func (as *AddressSet) RectifyMapping(addr common.Address) error { + isOwner, err := as.IsMember(addr) if !isOwner || err != nil { return errors.New("RectifyMapping: Address is not an owner") } @@ -109,15 +109,15 @@ func (aset *AddressSet) RectifyMapping(addr common.Address) error { // If the mapping is correct, RectifyMapping shouldn't do anything // Additional safety check to avoid corruption of mapping after the initial fix addrAsHash := common.BytesToHash(addr.Bytes()) - slot, err := aset.byAddress.GetUint64(addrAsHash) + slot, err := as.byAddress.GetUint64(addrAsHash) if err != nil { return err } - atSlot, err := aset.backingStorage.GetByUint64(slot) + atSlot, err := as.backingStorage.GetByUint64(slot) if err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } @@ -126,72 +126,72 @@ func (aset *AddressSet) RectifyMapping(addr common.Address) error { } // Remove the owner from map and add them as a new owner - err = aset.byAddress.Clear(addrAsHash) + err = as.byAddress.Clear(addrAsHash) if err != nil { return err } - return aset.Add(addr) + return as.Add(addr) } -func (aset *AddressSet) Add(addr common.Address) error { - present, err := aset.IsMember(addr) +func (as *AddressSet) Add(addr common.Address) error { + present, err := as.IsMember(addr) if present || err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } slot := util.UintToHash(1 + size) addrAsHash := common.BytesToHash(addr.Bytes()) - err = aset.byAddress.Set(addrAsHash, slot) + err = as.byAddress.Set(addrAsHash, slot) if err != nil { return err } - sba := aset.backingStorage.OpenStorageBackedAddress(1 + size) + sba := as.backingStorage.OpenStorageBackedAddress(1 + size) err = sba.Set(addr) if err != nil { return err } - _, err = aset.size.Increment() + _, err = as.size.Increment() return err } -func (aset *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { +func (as *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { addrAsHash := common.BytesToHash(addr.Bytes()) - slot, err := aset.byAddress.GetUint64(addrAsHash) + slot, err := as.byAddress.GetUint64(addrAsHash) if slot == 0 || err != nil { return err } - err = aset.byAddress.Clear(addrAsHash) + err = as.byAddress.Clear(addrAsHash) if err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } if slot < size { - atSize, err := aset.backingStorage.GetByUint64(size) + atSize, err := as.backingStorage.GetByUint64(size) if err != nil { return err } - err = aset.backingStorage.SetByUint64(slot, atSize) + err = as.backingStorage.SetByUint64(slot, atSize) if err != nil { return err } if arbosVersion >= 11 { - err = aset.byAddress.Set(atSize, util.UintToHash(slot)) + err = as.byAddress.Set(atSize, util.UintToHash(slot)) if err != nil { return err } } } - err = aset.backingStorage.ClearByUint64(size) + err = as.backingStorage.ClearByUint64(size) if err != nil { return err } - _, err = aset.size.Decrement() + _, err = as.size.Decrement() return err } diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index 56f04badff..f44e7f3dcf 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -25,7 +25,7 @@ func Initialize(sto *storage.Storage) { func Open(sto *storage.Storage) *AddressTable { numItems := sto.OpenStorageBackedUint64(0) - return &AddressTable{sto.NoCacheCopy(), sto.OpenSubStorage([]byte{}, false), numItems} + return &AddressTable{sto.WithoutCache(), sto.OpenSubStorage([]byte{}), numItems} } func (atab *AddressTable) Register(addr common.Address) (uint64, error) { diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 750c796e33..8702c62d16 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -73,13 +73,13 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error) backingStorage.OpenStorageBackedUint64(uint64(upgradeVersionOffset)), backingStorage.OpenStorageBackedUint64(uint64(upgradeTimestampOffset)), backingStorage.OpenStorageBackedAddress(uint64(networkFeeAccountOffset)), - l1pricing.OpenL1PricingState(backingStorage.OpenSubStorage(l1PricingSubspace, true)), - l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace, true)), - retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace, true), stateDB), - addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace, true)), - addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace, true)), - merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace, true)), - blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace, true)), + l1pricing.OpenL1PricingState(backingStorage.OpenCachedSubStorage(l1PricingSubspace)), + l2pricing.OpenL2PricingState(backingStorage.OpenCachedSubStorage(l2PricingSubspace)), + retryables.OpenRetryableState(backingStorage.OpenCachedSubStorage(retryablesSubspace), stateDB), + addressTable.Open(backingStorage.OpenCachedSubStorage(addressTableSubspace)), + addressSet.OpenAddressSet(backingStorage.OpenCachedSubStorage(chainOwnerSubspace)), + merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenCachedSubStorage(sendMerkleSubspace)), + blockhash.OpenBlockhashes(backingStorage.OpenCachedSubStorage(blockhashesSubspace)), backingStorage.OpenStorageBackedBigInt(uint64(chainIdOffset)), backingStorage.OpenStorageBackedBytes(chainConfigSubspace), backingStorage.OpenStorageBackedUint64(uint64(genesisBlockNumOffset)), @@ -225,14 +225,14 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p if desiredArbosVersion >= 2 { initialRewardsRecipient = initialChainOwner } - _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace, true), initialRewardsRecipient, initMessage.InitialL1BaseFee) - _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace, true)) - _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace, true)) - addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace, true)) - merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace, true)) - blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace, true)) - - ownersStorage := sto.OpenSubStorage(chainOwnerSubspace, true) + _ = l1pricing.InitializeL1PricingState(sto.OpenCachedSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) + _ = l2pricing.InitializeL2PricingState(sto.OpenCachedSubStorage(l2PricingSubspace)) + _ = retryables.InitializeRetryableState(sto.OpenCachedSubStorage(retryablesSubspace)) + addressTable.Initialize(sto.OpenCachedSubStorage(addressTableSubspace)) + merkleAccumulator.InitializeMerkleAccumulator(sto.OpenCachedSubStorage(sendMerkleSubspace)) + blockhash.InitializeBlockhashes(sto.OpenCachedSubStorage(blockhashesSubspace)) + + ownersStorage := sto.OpenCachedSubStorage(chainOwnerSubspace) _ = addressSet.Initialize(ownersStorage) _ = addressSet.OpenAddressSet(ownersStorage).Add(initialChainOwner) @@ -428,7 +428,7 @@ func (state *ArbosState) ChainOwners() *addressSet.AddressSet { func (state *ArbosState) SendMerkleAccumulator() *merkleAccumulator.MerkleAccumulator { if state.sendMerkle == nil { - state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenSubStorage(sendMerkleSubspace, true)) + state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenCachedSubStorage(sendMerkleSubspace)) } return state.sendMerkle } diff --git a/arbos/arbosState/arbosstate_test.go b/arbos/arbosState/arbosstate_test.go index 384fc9c72f..ef63c23386 100644 --- a/arbos/arbosState/arbosstate_test.go +++ b/arbos/arbosState/arbosstate_test.go @@ -64,7 +64,7 @@ func TestStorageBackedInt64(t *testing.T) { func TestStorageSlots(t *testing.T) { state, _ := NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}, true) + sto := state.BackingStorage().OpenCachedSubStorage([]byte{}) println("nil address", colors.Blue, storage.NilAddressRepresentation.String(), colors.Clear) diff --git a/arbos/blockhash/blockhash.go b/arbos/blockhash/blockhash.go index 99fb3ef470..34c907207c 100644 --- a/arbos/blockhash/blockhash.go +++ b/arbos/blockhash/blockhash.go @@ -21,7 +21,7 @@ func InitializeBlockhashes(backingStorage *storage.Storage) { } func OpenBlockhashes(backingStorage *storage.Storage) *Blockhashes { - return &Blockhashes{backingStorage.NoCacheCopy(), backingStorage.OpenStorageBackedUint64(0)} + return &Blockhashes{backingStorage.WithoutCache(), backingStorage.OpenStorageBackedUint64(0)} } func (bh *Blockhashes) L1BlockNumber() (uint64, error) { diff --git a/arbos/l1pricing/batchPoster.go b/arbos/l1pricing/batchPoster.go index 4ac86307ec..a3428c441c 100644 --- a/arbos/l1pricing/batchPoster.go +++ b/arbos/l1pricing/batchPoster.go @@ -42,13 +42,13 @@ func InitializeBatchPostersTable(storage *storage.Storage) error { if err := totalFundsDue.SetChecked(common.Big0); err != nil { return err } - return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey, true)) + return addressSet.Initialize(storage.OpenCachedSubStorage(PosterAddrsKey)) } func OpenBatchPostersTable(storage *storage.Storage) *BatchPostersTable { return &BatchPostersTable{ - posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey, true)), - posterInfo: storage.OpenSubStorage(PosterInfoKey, false), + posterAddrs: addressSet.OpenAddressSet(storage.OpenCachedSubStorage(PosterAddrsKey)), + posterInfo: storage.OpenSubStorage(PosterInfoKey), totalFundsDue: storage.OpenStorageBackedBigInt(totalFundsDueOffset), } } @@ -68,7 +68,7 @@ func (bpt *BatchPostersTable) OpenPoster(poster common.Address, createIfNotExist } func (bpt *BatchPostersTable) internalOpen(poster common.Address) *BatchPosterState { - bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes(), false) + bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes()) return &BatchPosterState{ fundsDue: bpStorage.OpenStorageBackedBigInt(0), payTo: bpStorage.OpenStorageBackedAddress(1), diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 1956eab29b..58c1042ab7 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -83,7 +83,7 @@ var InitialEquilibrationUnitsV0 = arbmath.UintToBig(60 * params.TxDataNonZeroGas var InitialEquilibrationUnitsV6 = arbmath.UintToBig(params.TxDataNonZeroGasEIP2028 * 10000000) func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient common.Address, initialL1BaseFee *big.Int) error { - bptStorage := sto.OpenSubStorage(BatchPosterTableKey, true) + bptStorage := sto.OpenCachedSubStorage(BatchPosterTableKey) if err := InitializeBatchPostersTable(bptStorage); err != nil { return err } @@ -118,7 +118,7 @@ func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient comm func OpenL1PricingState(sto *storage.Storage) *L1PricingState { return &L1PricingState{ sto, - OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey, true)), + OpenBatchPostersTable(sto.OpenCachedSubStorage(BatchPosterTableKey)), sto.OpenStorageBackedAddress(payRewardsToOffset), sto.OpenStorageBackedBigUint(equilibrationUnitsOffset), sto.OpenStorageBackedUint64(inertiaOffset), diff --git a/arbos/queue_test.go b/arbos/queue_test.go index abeec49a93..ff993a233f 100644 --- a/arbos/queue_test.go +++ b/arbos/queue_test.go @@ -14,7 +14,7 @@ import ( func TestQueue(t *testing.T) { state, statedb := arbosState.NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}, true) + sto := state.BackingStorage().OpenCachedSubStorage([]byte{}) Require(t, storage.InitializeQueue(sto)) q := storage.OpenQueue(sto) diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index 0322938541..1121de01f4 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -31,13 +31,13 @@ var ( ) func InitializeRetryableState(sto *storage.Storage) error { - return storage.InitializeQueue(sto.OpenSubStorage(timeoutQueueKey, true)) + return storage.InitializeQueue(sto.OpenCachedSubStorage(timeoutQueueKey)) } func OpenRetryableState(sto *storage.Storage, statedb vm.StateDB) *RetryableState { return &RetryableState{ sto, - storage.OpenQueue(sto.OpenSubStorage(timeoutQueueKey, true)), + storage.OpenQueue(sto.OpenCachedSubStorage(timeoutQueueKey)), } } @@ -73,7 +73,7 @@ func (rs *RetryableState) CreateRetryable( beneficiary common.Address, calldata []byte, ) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes(), false) + sto := rs.retryables.OpenSubStorage(id.Bytes()) ret := &Retryable{ id, sto, @@ -100,7 +100,7 @@ func (rs *RetryableState) CreateRetryable( } func (rs *RetryableState) OpenRetryable(id common.Hash, currentTimestamp uint64) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes(), false) + sto := rs.retryables.OpenSubStorage(id.Bytes()) timeoutStorage := sto.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if timeout == 0 || timeout < currentTimestamp || err != nil { @@ -134,7 +134,7 @@ func (rs *RetryableState) RetryableSizeBytes(id common.Hash, currentTime uint64) } func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario util.TracingScenario) (bool, error) { - retStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) + retStorage := rs.retryables.OpenSubStorage(id.Bytes()) timeout, err := retStorage.GetByUint64(timeoutOffset) if timeout == (common.Hash{}) || err != nil { return false, err @@ -157,7 +157,7 @@ func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario _ = retStorage.ClearByUint64(beneficiaryOffset) _ = retStorage.ClearByUint64(timeoutOffset) _ = retStorage.ClearByUint64(timeoutWindowsLeftOffset) - err = retStorage.OpenSubStorage(calldataKey, false).ClearBytes() + err = retStorage.OpenSubStorage(calldataKey).ClearBytes() return true, err } @@ -291,7 +291,7 @@ func (rs *RetryableState) TryToReapOneRetryable(currentTimestamp uint64, evm *vm if err != nil || id == nil { return err } - retryableStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) + retryableStorage := rs.retryables.OpenSubStorage(id.Bytes()) timeoutStorage := retryableStorage.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if err != nil { diff --git a/arbos/storage/queue.go b/arbos/storage/queue.go index 032ac11aad..9c02dc1ee7 100644 --- a/arbos/storage/queue.go +++ b/arbos/storage/queue.go @@ -25,7 +25,7 @@ func InitializeQueue(sto *Storage) error { func OpenQueue(sto *Storage) *Queue { return &Queue{ - sto.NoCacheCopy(), + sto.WithoutCache(), sto.OpenStorageBackedUint64(0), sto.OpenStorageBackedUint64(1), } diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 66f3d49473..47ca66445d 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -91,11 +91,11 @@ func NewMemoryBackedStateDB() vm.StateDB { // a page, to preserve contiguity within a page. This will reduce cost if/when Ethereum switches to storage // representations that reward contiguity. // Because page numbers are 248 bits, this gives us 124-bit security against collision attacks, which is good enough. -func (store *Storage) mapAddress(key common.Hash) common.Hash { +func (s *Storage) mapAddress(key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 mapped := make([]byte, 0, common.HashLength) - mapped = append(mapped, store.cachedKeccak(store.storageKey, keyBytes[:boundary])[:boundary]...) + mapped = append(mapped, s.cachedKeccak(s.storageKey, keyBytes[:boundary])[:boundary]...) mapped = append(mapped, keyBytes[boundary]) return common.BytesToHash(mapped) } @@ -107,132 +107,139 @@ func writeCost(value common.Hash) uint64 { return StorageWriteCost } -func (store *Storage) Account() common.Address { - return store.account +func (s *Storage) Account() common.Address { + return s.account } -func (store *Storage) Get(key common.Hash) (common.Hash, error) { - err := store.burner.Burn(StorageReadCost) +func (s *Storage) Get(key common.Hash) (common.Hash, error) { + err := s.burner.Burn(StorageReadCost) if err != nil { return common.Hash{}, err } - if info := store.burner.TracingInfo(); info != nil { + if info := s.burner.TracingInfo(); info != nil { info.RecordStorageGet(key) } - return store.db.GetState(store.account, store.mapAddress(key)), nil + return s.db.GetState(s.account, s.mapAddress(key)), nil } -func (store *Storage) GetStorageSlot(key common.Hash) common.Hash { - return store.mapAddress(key) +func (s *Storage) GetStorageSlot(key common.Hash) common.Hash { + return s.mapAddress(key) } -func (store *Storage) GetUint64(key common.Hash) (uint64, error) { - value, err := store.Get(key) +func (s *Storage) GetUint64(key common.Hash) (uint64, error) { + value, err := s.Get(key) return value.Big().Uint64(), err } -func (store *Storage) GetByUint64(key uint64) (common.Hash, error) { - return store.Get(util.UintToHash(key)) +func (s *Storage) GetByUint64(key uint64) (common.Hash, error) { + return s.Get(util.UintToHash(key)) } -func (store *Storage) GetUint64ByUint64(key uint64) (uint64, error) { - return store.GetUint64(util.UintToHash(key)) +func (s *Storage) GetUint64ByUint64(key uint64) (uint64, error) { + return s.GetUint64(util.UintToHash(key)) } -func (store *Storage) Set(key common.Hash, value common.Hash) error { - if store.burner.ReadOnly() { +func (s *Storage) Set(key common.Hash, value common.Hash) error { + if s.burner.ReadOnly() { log.Error("Read-only burner attempted to mutate state", "key", key, "value", value) return vm.ErrWriteProtection } - err := store.burner.Burn(writeCost(value)) + err := s.burner.Burn(writeCost(value)) if err != nil { return err } - if info := store.burner.TracingInfo(); info != nil { + if info := s.burner.TracingInfo(); info != nil { info.RecordStorageSet(key, value) } - store.db.SetState(store.account, store.mapAddress(key), value) + s.db.SetState(s.account, s.mapAddress(key), value) return nil } -func (store *Storage) SetByUint64(key uint64, value common.Hash) error { - return store.Set(util.UintToHash(key), value) +func (s *Storage) SetByUint64(key uint64, value common.Hash) error { + return s.Set(util.UintToHash(key), value) } -func (store *Storage) SetUint64ByUint64(key uint64, value uint64) error { - return store.Set(util.UintToHash(key), util.UintToHash(value)) +func (s *Storage) SetUint64ByUint64(key uint64, value uint64) error { + return s.Set(util.UintToHash(key), util.UintToHash(value)) } -func (store *Storage) Clear(key common.Hash) error { - return store.Set(key, common.Hash{}) +func (s *Storage) Clear(key common.Hash) error { + return s.Set(key, common.Hash{}) } -func (store *Storage) ClearByUint64(key uint64) error { - return store.Set(util.UintToHash(key), common.Hash{}) +func (s *Storage) ClearByUint64(key uint64) error { + return s.Set(util.UintToHash(key), common.Hash{}) } -func (store *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, error) { - oldValue, err := store.Get(key) +func (s *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, error) { + oldValue, err := s.Get(key) if err != nil { return common.Hash{}, err } - return oldValue, store.Set(key, newValue) + return oldValue, s.Set(key, newValue) } -func (store *Storage) OpenSubStorage(id []byte, cacheKeys bool) *Storage { - var hashCache *containers.SafeLruCache[string, []byte] - if cacheKeys { - hashCache = storageHashCache +func (s *Storage) OpenCachedSubStorage(id []byte) *Storage { + return &Storage{ + account: s.account, + db: s.db, + storageKey: s.cachedKeccak(s.storageKey, id), + burner: s.burner, + hashCache: storageHashCache, } +} +func (s *Storage) OpenSubStorage(id []byte) *Storage { return &Storage{ - store.account, - store.db, - store.cachedKeccak(store.storageKey, id), - store.burner, - hashCache, + account: s.account, + db: s.db, + storageKey: s.cachedKeccak(s.storageKey, id), + burner: s.burner, + hashCache: nil, } } -func (store *Storage) NoCacheCopy() *Storage { +// Returns shallow copy of Storage that won't use storage key hash cache. +// The storage space represented by the returned Storage is kept the same. +func (s *Storage) WithoutCache() *Storage { return &Storage{ - store.account, - store.db, - store.storageKey, - store.burner, - nil, + account: s.account, + db: s.db, + storageKey: s.storageKey, + burner: s.burner, + hashCache: nil, } } -func (store *Storage) SetBytes(b []byte) error { - err := store.ClearBytes() +func (s *Storage) SetBytes(b []byte) error { + err := s.ClearBytes() if err != nil { return err } - err = store.SetUint64ByUint64(0, uint64(len(b))) + err = s.SetUint64ByUint64(0, uint64(len(b))) if err != nil { return err } offset := uint64(1) for len(b) >= 32 { - err = store.SetByUint64(offset, common.BytesToHash(b[:32])) + err = s.SetByUint64(offset, common.BytesToHash(b[:32])) if err != nil { return err } b = b[32:] offset++ } - return store.SetByUint64(offset, common.BytesToHash(b)) + return s.SetByUint64(offset, common.BytesToHash(b)) } -func (store *Storage) GetBytes() ([]byte, error) { - bytesLeft, err := store.GetUint64ByUint64(0) +func (s *Storage) GetBytes() ([]byte, error) { + bytesLeft, err := s.GetUint64ByUint64(0) if err != nil { return nil, err } ret := []byte{} offset := uint64(1) for bytesLeft >= 32 { - next, err := store.GetByUint64(offset) + next, err := s.GetByUint64(offset) if err != nil { return nil, err } @@ -240,7 +247,7 @@ func (store *Storage) GetBytes() ([]byte, error) { bytesLeft -= 32 offset++ } - next, err := store.GetByUint64(offset) + next, err := s.GetByUint64(offset) if err != nil { return nil, err } @@ -248,18 +255,18 @@ func (store *Storage) GetBytes() ([]byte, error) { return ret, nil } -func (store *Storage) GetBytesSize() (uint64, error) { - return store.GetUint64ByUint64(0) +func (s *Storage) GetBytesSize() (uint64, error) { + return s.GetUint64ByUint64(0) } -func (store *Storage) ClearBytes() error { - bytesLeft, err := store.GetUint64ByUint64(0) +func (s *Storage) ClearBytes() error { + bytesLeft, err := s.GetUint64ByUint64(0) if err != nil { return err } offset := uint64(1) for bytesLeft > 0 { - err := store.ClearByUint64(offset) + err := s.ClearByUint64(offset) if err != nil { return err } @@ -270,48 +277,51 @@ func (store *Storage) ClearBytes() error { bytesLeft -= 32 } } - return store.ClearByUint64(0) + return s.ClearByUint64(0) } -func (store *Storage) Burner() burn.Burner { - return store.burner // not public because these should never be changed once set +func (s *Storage) Burner() burn.Burner { + return s.burner // not public because these should never be changed once set } -func (store *Storage) Keccak(data ...[]byte) ([]byte, error) { +func (s *Storage) Keccak(data ...[]byte) ([]byte, error) { byteCount := 0 for _, part := range data { byteCount += len(part) } cost := 30 + 6*arbmath.WordsForBytes(uint64(byteCount)) - if err := store.burner.Burn(cost); err != nil { + if err := s.burner.Burn(cost); err != nil { return nil, err } return crypto.Keccak256(data...), nil } +func (s *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { + bytes, err := s.Keccak(data...) + return common.BytesToHash(bytes), err +} + +// Returns crypto.Keccak256 result for the given data +// If available the result is taken from hash cache +// otherwise crypto.Keccak256 is executed and its result is added to the cache and returned +// note: the method doesn't burn gas, as it's only intended for generating storage subspace keys and mapping slot addresses // note: returned slice is not thread-safe -func (store *Storage) cachedKeccak(data ...[]byte) []byte { - if store.hashCache == nil { +func (s *Storage) cachedKeccak(data ...[]byte) []byte { + if s.hashCache == nil { return crypto.Keccak256(data...) } keyString := string(bytes.Join(data, []byte{})) - hash, wasCached := store.hashCache.Get(keyString) - if wasCached { + if hash, wasCached := s.hashCache.Get(keyString); wasCached { return hash } - hash = crypto.Keccak256(data...) - evicted := store.hashCache.Add(keyString, hash) + hash := crypto.Keccak256(data...) + evicted := s.hashCache.Add(keyString, hash) if evicted && cacheFullLogged.CompareAndSwap(false, true) { log.Warn("Hash cache full, we didn't expect that. Some non-static storage keys may fill up the cache.") } return hash } -func (store *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { - bytes, err := store.Keccak(data...) - return common.BytesToHash(bytes), err -} - type StorageSlot struct { account common.Address db vm.StateDB @@ -319,8 +329,8 @@ type StorageSlot struct { burner burn.Burner } -func (store *Storage) NewSlot(offset uint64) StorageSlot { - return StorageSlot{store.account, store.db, store.mapAddress(util.UintToHash(offset)), store.burner} +func (s *Storage) NewSlot(offset uint64) StorageSlot { + return StorageSlot{s.account, s.db, s.mapAddress(util.UintToHash(offset)), s.burner} } func (ss *StorageSlot) Get() (common.Hash, error) { @@ -359,8 +369,8 @@ type StorageBackedInt64 struct { StorageSlot } -func (store *Storage) OpenStorageBackedInt64(offset uint64) StorageBackedInt64 { - return StorageBackedInt64{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedInt64(offset uint64) StorageBackedInt64 { + return StorageBackedInt64{s.NewSlot(offset)} } func (sbu *StorageBackedInt64) Get() (int64, error) { @@ -380,8 +390,8 @@ type StorageBackedBips struct { backing StorageBackedInt64 } -func (store *Storage) OpenStorageBackedBips(offset uint64) StorageBackedBips { - return StorageBackedBips{StorageBackedInt64{store.NewSlot(offset)}} +func (s *Storage) OpenStorageBackedBips(offset uint64) StorageBackedBips { + return StorageBackedBips{StorageBackedInt64{s.NewSlot(offset)}} } func (sbu *StorageBackedBips) Get() (arbmath.Bips, error) { @@ -397,8 +407,8 @@ type StorageBackedUint64 struct { StorageSlot } -func (store *Storage) OpenStorageBackedUint64(offset uint64) StorageBackedUint64 { - return StorageBackedUint64{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedUint64(offset uint64) StorageBackedUint64 { + return StorageBackedUint64{s.NewSlot(offset)} } func (sbu *StorageBackedUint64) Get() (uint64, error) { @@ -485,8 +495,8 @@ type StorageBackedBigUint struct { StorageSlot } -func (store *Storage) OpenStorageBackedBigUint(offset uint64) StorageBackedBigUint { - return StorageBackedBigUint{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedBigUint(offset uint64) StorageBackedBigUint { + return StorageBackedBigUint{s.NewSlot(offset)} } func (sbbu *StorageBackedBigUint) Get() (*big.Int, error) { @@ -524,8 +534,8 @@ type StorageBackedBigInt struct { StorageSlot } -func (store *Storage) OpenStorageBackedBigInt(offset uint64) StorageBackedBigInt { - return StorageBackedBigInt{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedBigInt(offset uint64) StorageBackedBigInt { + return StorageBackedBigInt{s.NewSlot(offset)} } func (sbbi *StorageBackedBigInt) Get() (*big.Int, error) { @@ -581,8 +591,8 @@ type StorageBackedAddress struct { StorageSlot } -func (store *Storage) OpenStorageBackedAddress(offset uint64) StorageBackedAddress { - return StorageBackedAddress{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedAddress(offset uint64) StorageBackedAddress { + return StorageBackedAddress{s.NewSlot(offset)} } func (sba *StorageBackedAddress) Get() (common.Address, error) { @@ -604,8 +614,8 @@ func init() { NilAddressRepresentation = common.BigToHash(new(big.Int).Lsh(big.NewInt(1), 255)) } -func (store *Storage) OpenStorageBackedAddressOrNil(offset uint64) StorageBackedAddressOrNil { - return StorageBackedAddressOrNil{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedAddressOrNil(offset uint64) StorageBackedAddressOrNil { + return StorageBackedAddressOrNil{s.NewSlot(offset)} } func (sba *StorageBackedAddressOrNil) Get() (*common.Address, error) { @@ -629,9 +639,9 @@ type StorageBackedBytes struct { Storage } -func (store *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { +func (s *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { return StorageBackedBytes{ - *store.OpenSubStorage(id, false), + *s.OpenSubStorage(id), } } From eeb327c3b7d3e81ff057f58e0fb366739610dee7 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 12 Oct 2023 23:45:59 +0200 Subject: [PATCH 06/16] replace SafeLruCache with lru.Cache from geth --- arbos/storage/storage.go | 6 +-- util/containers/safe_lru.go | 82 ------------------------------------- 2 files changed, 3 insertions(+), 85 deletions(-) delete mode 100644 util/containers/safe_lru.go diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 47ca66445d..63987b91f8 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -10,6 +10,7 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" @@ -19,7 +20,6 @@ import ( "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/util/arbmath" - "github.com/offchainlabs/nitro/util/containers" ) // Storage allows ArbOS to store data persistently in the Ethereum-compatible stateDB. This is represented in @@ -46,7 +46,7 @@ type Storage struct { db vm.StateDB storageKey []byte burner burn.Burner - hashCache *containers.SafeLruCache[string, []byte] + hashCache *lru.Cache[string, []byte] } const StorageReadCost = params.SloadGasEIP2200 @@ -55,7 +55,7 @@ const StorageWriteZeroCost = params.SstoreResetGasEIP2200 const storageKeyCacheSize = 1024 -var storageHashCache = containers.NewSafeLruCache[string, []byte](storageKeyCacheSize) +var storageHashCache = lru.NewCache[string, []byte](storageKeyCacheSize) var cacheFullLogged atomic.Bool // NewGeth uses a Geth database to create an evm key-value store diff --git a/util/containers/safe_lru.go b/util/containers/safe_lru.go deleted file mode 100644 index 40e7d993ec..0000000000 --- a/util/containers/safe_lru.go +++ /dev/null @@ -1,82 +0,0 @@ -package containers - -import ( - "sync" -) - -// thread safe version of containers.LruCache -type SafeLruCache[K comparable, V any] struct { - inner *LruCache[K, V] - mutex sync.RWMutex -} - -func NewSafeLruCache[K comparable, V any](size int) *SafeLruCache[K, V] { - return NewSafeLruCacheWithOnEvict[K, V](size, nil) -} - -func NewSafeLruCacheWithOnEvict[K comparable, V any](size int, onEvict func(K, V)) *SafeLruCache[K, V] { - return &SafeLruCache[K, V]{ - inner: NewLruCacheWithOnEvict(size, onEvict), - } -} - -// Returns true if an item was evicted -func (c *SafeLruCache[K, V]) Add(key K, value V) bool { - c.mutex.Lock() - defer c.mutex.Unlock() - return c.inner.Add(key, value) -} - -func (c *SafeLruCache[K, V]) Get(key K) (V, bool) { - c.mutex.Lock() - defer c.mutex.Unlock() - return c.inner.Get(key) -} - -func (c *SafeLruCache[K, V]) Contains(key K) bool { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.Contains(key) -} - -func (c *SafeLruCache[K, V]) Remove(key K) { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.Remove(key) -} - -func (c *SafeLruCache[K, V]) GetOldest() (K, V, bool) { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.GetOldest() -} - -func (c *SafeLruCache[K, V]) RemoveOldest() { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.RemoveOldest() -} - -func (c *SafeLruCache[K, V]) Len() int { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.Len() -} - -func (c *SafeLruCache[K, V]) Size() int { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.Size() -} - -func (c *SafeLruCache[K, V]) Clear() { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.Clear() -} - -func (c *SafeLruCache[K, V]) Resize(newSize int) { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.Resize(newSize) -} From 6f1c79fca98a5a2eec1c9302f4bb751c2599064f Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 13 Oct 2023 00:20:15 +0200 Subject: [PATCH 07/16] add todos for renaming packages --- arbos/addressSet/addressSet.go | 2 ++ arbos/addressTable/addressTable.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 50a43c36cf..6d2db26941 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -3,6 +3,8 @@ package addressSet +// TODO rename this package to lowercase + import ( "errors" diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index f44e7f3dcf..bfc526d39d 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -3,6 +3,8 @@ package addressTable +// TODO rename this package to lowercase + import ( "bytes" "errors" From 9ec92bc55121f2c54571268b567a9249d5680060 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 13 Oct 2023 00:22:01 +0200 Subject: [PATCH 08/16] rephrase todos --- arbos/addressSet/addressSet.go | 2 +- arbos/addressTable/addressTable.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 6d2db26941..1f09ff1440 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -3,7 +3,7 @@ package addressSet -// TODO rename this package to lowercase +// TODO lowercase this package name import ( "errors" diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index bfc526d39d..3fbb7b3782 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -3,7 +3,7 @@ package addressTable -// TODO rename this package to lowercase +// TODO lowercase this package name import ( "bytes" From 41b9517ec40c1a72e130dbe53940952910a14283 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 13 Oct 2023 18:21:29 +0200 Subject: [PATCH 09/16] add comment to skipped error checks --- arbos/retryables/retryable.go | 1 + 1 file changed, 1 insertion(+) diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index 1121de01f4..6984e41904 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -150,6 +150,7 @@ func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario return false, err } + // we ignore returned error as we expect that if one ClearByUint64 fails, than all consecutive calls to ClearByUint64 will fail with the same error (not modifying state), and then ClearBytes will also fail with the same error (also not modifying state) - and this one we check and return _ = retStorage.ClearByUint64(numTriesOffset) _ = retStorage.ClearByUint64(fromOffset) _ = retStorage.ClearByUint64(toOffset) From df6f73e1b9c34a2fe5b272a4259ddbe63020c34a Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 16 Oct 2023 09:38:51 -0500 Subject: [PATCH 10/16] test failure --- .github/workflows/waitForNitro.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh index e196b38d88..422083e38e 100755 --- a/.github/workflows/waitForNitro.sh +++ b/.github/workflows/waitForNitro.sh @@ -1,10 +1,14 @@ -# poll the nitro endpoint until we get a 0 return code -while true -do +# poll the nitro endpoint until we get a 0 return code or 30mins have passed, in that case exit 1 +start_time=$(date +%s) +timeout=20 + +while (( $(date +%s) - start_time <= timeout )); do curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547' if [ "$?" -eq "0" ]; then exit 0 else sleep 20 fi -done \ No newline at end of file +done + +exit 1 \ No newline at end of file From 82663481ad99b6ea6eef6440aed735e3cdedd243 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 16 Oct 2023 09:58:24 -0500 Subject: [PATCH 11/16] test success --- .github/workflows/waitForNitro.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh index 422083e38e..a1b7f2ad0f 100755 --- a/.github/workflows/waitForNitro.sh +++ b/.github/workflows/waitForNitro.sh @@ -1,6 +1,6 @@ # poll the nitro endpoint until we get a 0 return code or 30mins have passed, in that case exit 1 start_time=$(date +%s) -timeout=20 +timeout=1800 while (( $(date +%s) - start_time <= timeout )); do curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547' From f99cfe673e5799404186a60ce254a37d48935397 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 16 Oct 2023 10:31:24 -0500 Subject: [PATCH 12/16] refactor --- .github/workflows/waitForNitro.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh index a1b7f2ad0f..add83e24bc 100755 --- a/.github/workflows/waitForNitro.sh +++ b/.github/workflows/waitForNitro.sh @@ -1,8 +1,7 @@ # poll the nitro endpoint until we get a 0 return code or 30mins have passed, in that case exit 1 -start_time=$(date +%s) -timeout=1800 +timeout_time=$(($(date +%s) + 1800)) -while (( $(date +%s) - start_time <= timeout )); do +while (( $(date +%s) <= timeout_time )); do curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547' if [ "$?" -eq "0" ]; then exit 0 From f5593692885c7401195a6907339ea1d03df95c35 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 16 Oct 2023 10:46:04 -0500 Subject: [PATCH 13/16] refactor --- .github/workflows/waitForNitro.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh index add83e24bc..cf3f6484fc 100755 --- a/.github/workflows/waitForNitro.sh +++ b/.github/workflows/waitForNitro.sh @@ -1,9 +1,9 @@ +#!/bin/bash # poll the nitro endpoint until we get a 0 return code or 30mins have passed, in that case exit 1 timeout_time=$(($(date +%s) + 1800)) while (( $(date +%s) <= timeout_time )); do - curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547' - if [ "$?" -eq "0" ]; then + if curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547'; then exit 0 else sleep 20 From 754d9be19aacb8cca479701099930e45b8e1d8e5 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Mon, 16 Oct 2023 17:58:32 +0200 Subject: [PATCH 14/16] add simple storage key caching tests --- arbos/storage/storage_test.go | 75 +++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/arbos/storage/storage_test.go b/arbos/storage/storage_test.go index 35e6b7c4be..a8d424d14e 100644 --- a/arbos/storage/storage_test.go +++ b/arbos/storage/storage_test.go @@ -1,11 +1,16 @@ package storage import ( + "bytes" + "fmt" "math/big" + "math/rand" + "sync" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -91,3 +96,73 @@ func TestStorageBackedBigInt(t *testing.T) { }) } } + +func TestOpenCachedSubStorage(t *testing.T) { + s := NewMemoryBacked(burn.NewSystemBurner(nil, false)) + var subSpaceIDs [][]byte + for i := 0; i < 20; i++ { + subSpaceIDs = append(subSpaceIDs, []byte{byte(rand.Intn(0xff))}) + } + var expectedKeys [][]byte + for _, subSpaceID := range subSpaceIDs { + expectedKeys = append(expectedKeys, crypto.Keccak256(s.storageKey, subSpaceID)) + } + n := len(subSpaceIDs) * 50 + start := make(chan struct{}) + errs := make(chan error, n) + var wg sync.WaitGroup + for i := 0; i < n; i++ { + j := i % len(subSpaceIDs) + subSpaceID, expectedKey := subSpaceIDs[j], expectedKeys[j] + wg.Add(1) + go func() { + defer wg.Done() + <-start + ss := s.OpenCachedSubStorage(subSpaceID) + if !bytes.Equal(ss.storageKey, expectedKey) { + errs <- fmt.Errorf("unexpected storage key, want: %v, have: %v", expectedKey, ss.storageKey) + } + }() + } + close(start) + wg.Wait() + select { + case err := <-errs: + t.Fatal(err) + default: + } +} + +func TestMapAddressCache(t *testing.T) { + s := NewMemoryBacked(burn.NewSystemBurner(nil, false)) + var keys []common.Hash + for i := 0; i < 20; i++ { + keys = append(keys, common.BytesToHash([]byte{byte(rand.Intn(0xff))})) + } + var expectedMapped []common.Hash + for _, key := range keys { + expectedMapped = append(expectedMapped, s.mapAddress(key)) + } + n := len(keys) * 50 + start := make(chan struct{}) + errs := make(chan error, n) + var wg sync.WaitGroup + for i := 0; i < n; i++ { + j := i % len(keys) + key, expected := keys[j], expectedMapped[j] + wg.Add(1) + go func() { + defer wg.Done() + <-start + mapped := s.mapAddress(key) + if !bytes.Equal(mapped.Bytes(), expected.Bytes()) { + errs <- fmt.Errorf("unexpected storage key, want: %v, have: %v", expected, mapped) + } + }() + } + close(start) + wg.Wait() + if len(errs) > 0 { + t.Fatal(<-errs) + } +} From 487f6eeba172805c8be6ce2a0bb8914e55c30e5d Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 16 Oct 2023 23:07:06 -0600 Subject: [PATCH 15/16] Fix retryable gas estimation when overriding gas price to zero --- arbos/tx_processor.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index 4eeffc679e..436998dfb5 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -245,16 +245,17 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } balance := statedb.GetBalance(tx.From) - basefee := evm.Context.BaseFee + effectiveBaseFee := evm.Context.BaseFee usergas := p.msg.GasLimit - maxGasCost := arbmath.BigMulByUint(tx.GasFeeCap, usergas) - maxFeePerGasTooLow := arbmath.BigLessThan(tx.GasFeeCap, basefee) - if p.msg.TxRunMode == core.MessageGasEstimationMode && tx.GasFeeCap.BitLen() == 0 { - // In gas estimation mode, we permit a zero gas fee cap. - // This matches behavior with normal tx gas estimation. - maxFeePerGasTooLow = false + if p.msg.TxRunMode != core.MessageCommitMode && tx.GasFeeCap.BitLen() == 0 { + // In gas estimation or eth_call mode, we permit a zero gas fee cap. + // This matches behavior with normal tx gas estimation and eth_call. + effectiveBaseFee = common.Big0 } + + maxGasCost := arbmath.BigMulByUint(tx.GasFeeCap, usergas) + maxFeePerGasTooLow := arbmath.BigLessThan(tx.GasFeeCap, effectiveBaseFee) if arbmath.BigLessThan(balance, maxGasCost) || usergas < params.TxGas || maxFeePerGasTooLow { // User either specified too low of a gas fee cap, didn't have enough balance to pay for gas, // or the specified gas limit is below the minimum transaction gas cost. @@ -268,7 +269,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } // pay for the retryable's gas and update the pools - gascost := arbmath.BigMulByUint(basefee, usergas) + gascost := arbmath.BigMulByUint(effectiveBaseFee, usergas) networkCost := gascost if p.state.ArbOSVersion() >= 11 { infraFeeAccount, err := p.state.InfraFeeAccount() @@ -276,7 +277,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r if infraFeeAccount != (common.Address{}) { minBaseFee, err := p.state.L2PricingState().MinBaseFeeWei() p.state.Restrict(err) - infraFee := arbmath.BigMin(minBaseFee, basefee) + infraFee := arbmath.BigMin(minBaseFee, effectiveBaseFee) infraCost := arbmath.BigMulByUint(infraFee, usergas) infraCost = takeFunds(networkCost, infraCost) if err := transfer(&tx.From, &infraFeeAccount, infraCost); err != nil { @@ -294,7 +295,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } withheldGasFunds := takeFunds(availableRefund, gascost) // gascost is conceptually charged before the gas price refund - gasPriceRefund := arbmath.BigMulByUint(arbmath.BigSub(tx.GasFeeCap, basefee), tx.Gas) + gasPriceRefund := arbmath.BigMulByUint(arbmath.BigSub(tx.GasFeeCap, effectiveBaseFee), tx.Gas) if gasPriceRefund.Sign() < 0 { // This should only be possible during gas estimation mode gasPriceRefund.SetInt64(0) @@ -310,7 +311,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r retryTxInner, err := retryable.MakeTx( underlyingTx.ChainId(), 0, - basefee, + effectiveBaseFee, usergas, ticketId, tx.FeeRefundAddr, From 0a3e0d8bbcedad1ccae5a987928875c588d38835 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 19 Oct 2023 12:10:34 -0600 Subject: [PATCH 16/16] Fix basefee in EndTxHook --- arbos/tx_processor.go | 31 ++++++++++++++++++++++++------- go-ethereum | 2 +- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index 436998dfb5..569edb7c63 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -248,7 +248,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r effectiveBaseFee := evm.Context.BaseFee usergas := p.msg.GasLimit - if p.msg.TxRunMode != core.MessageCommitMode && tx.GasFeeCap.BitLen() == 0 { + if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.BitLen() == 0 { // In gas estimation or eth_call mode, we permit a zero gas fee cap. // This matches behavior with normal tx gas estimation and eth_call. effectiveBaseFee = common.Big0 @@ -457,7 +457,6 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { underlyingTx := p.msg.Tx networkFeeAccount, _ := p.state.NetworkFeeAccount() - basefee := p.evm.Context.BaseFee scenario := util.TracingAfterEVM if gasLeft > p.msg.GasLimit { @@ -467,9 +466,20 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { if underlyingTx != nil && underlyingTx.Type() == types.ArbitrumRetryTxType { inner, _ := underlyingTx.GetInner().(*types.ArbitrumRetryTx) + effectiveBaseFee := inner.GasFeeCap + if p.msg.TxRunMode == core.MessageCommitMode && !arbmath.BigEquals(effectiveBaseFee, p.evm.Context.BaseFee) { + log.Error( + "ArbitrumRetryTx GasFeeCap doesn't match basefee in commit mode", + "txHash", underlyingTx.Hash(), + "gasFeeCap", inner.GasFeeCap, + "baseFee", p.evm.Context.BaseFee, + ) + // revert to the old behavior to avoid diverging from older nodes + effectiveBaseFee = p.evm.Context.BaseFee + } // undo Geth's refund to the From address - gasRefund := arbmath.BigMulByUint(basefee, gasLeft) + gasRefund := arbmath.BigMulByUint(effectiveBaseFee, gasLeft) err := util.BurnBalance(&inner.From, gasRefund, p.evm, scenario, "undoRefund") if err != nil { log.Error("Uh oh, Geth didn't refund the user", inner.From, gasRefund) @@ -504,7 +514,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { takeFunds(maxRefund, inner.SubmissionFeeRefund) } // Conceptually, the gas charge is taken from the L1 deposit pool if possible. - takeFunds(maxRefund, arbmath.BigMulByUint(basefee, gasUsed)) + takeFunds(maxRefund, arbmath.BigMulByUint(effectiveBaseFee, gasUsed)) // Refund any unused gas, without overdrafting the L1 deposit. networkRefund := gasRefund if p.state.ArbOSVersion() >= 11 { @@ -514,7 +524,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { minBaseFee, err := p.state.L2PricingState().MinBaseFeeWei() p.state.Restrict(err) // TODO MinBaseFeeWei change during RetryTx execution may cause incorrect calculation of the part of the refund that should be taken from infraFeeAccount. Unless the balances of network and infra fee accounts are too low, the amount transferred to refund address should remain correct. - infraFee := arbmath.BigMin(minBaseFee, basefee) + infraFee := arbmath.BigMin(minBaseFee, effectiveBaseFee) infraRefund := arbmath.BigMulByUint(infraFee, gasLeft) infraRefund = takeFunds(networkRefund, infraRefund) refund(infraFeeAccount, infraRefund) @@ -542,6 +552,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { return } + basefee := p.evm.Context.BaseFee totalCost := arbmath.BigMul(basefee, arbmath.UintToBig(gasUsed)) // total cost = price of gas * gas burnt computeCost := arbmath.BigSub(totalCost, p.PosterFee) // total cost = network's compute + poster's L1 costs if computeCost.Sign() < 0 { @@ -603,9 +614,15 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { func (p *TxProcessor) ScheduledTxes() types.Transactions { scheduled := types.Transactions{} time := p.evm.Context.Time - basefee := p.evm.Context.BaseFee + effectiveBaseFee := p.evm.Context.BaseFee chainID := p.evm.ChainConfig().ChainID + if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.BitLen() == 0 { + // In gas estimation or eth_call mode, we permit a zero gas fee cap. + // This matches behavior with normal tx gas estimation and eth_call. + effectiveBaseFee = common.Big0 + } + logs := p.evm.StateDB.GetCurrentTxLogs() for _, log := range logs { if log.Address != ArbRetryableTxAddress || log.Topics[0] != RedeemScheduledEventID { @@ -624,7 +641,7 @@ func (p *TxProcessor) ScheduledTxes() types.Transactions { redeem, _ := retryable.MakeTx( chainID, event.SequenceNum, - basefee, + effectiveBaseFee, event.DonatedGas, event.TicketId, event.GasDonor, diff --git a/go-ethereum b/go-ethereum index b4221631e1..e8c8827c0b 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit b4221631e1e5eac86f01582bd74234e3c0f7f5c7 +Subproject commit e8c8827c0b9e22e60829da1945cba9c451cda85a