From cebea6fb233da8ba7b348c68525df7d7e3068757 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 02:24:52 +0000 Subject: [PATCH 01/15] add storage keys caching --- arbos/addressSet/addressSet.go | 4 +- arbos/addressTable/addressTable.go | 2 +- arbos/arbosState/arbosstate.go | 32 +++++------ arbos/arbosState/arbosstate_test.go | 2 +- arbos/blockhash/blockhash.go | 2 +- arbos/l1pricing/batchPoster.go | 8 +-- arbos/l1pricing/l1pricing.go | 4 +- arbos/queue_test.go | 2 +- arbos/retryables/retryable.go | 14 ++--- arbos/storage/queue.go | 2 +- arbos/storage/storage.go | 63 ++++++++++++++++++---- util/containers/safe_lru.go | 82 +++++++++++++++++++++++++++++ 12 files changed, 171 insertions(+), 46 deletions(-) create mode 100644 util/containers/safe_lru.go diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index ae2e6a34c1..72200c0373 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -24,9 +24,9 @@ func Initialize(sto *storage.Storage) error { func OpenAddressSet(sto *storage.Storage) *AddressSet { return &AddressSet{ - sto, + sto.NoCacheCopy(), sto.OpenStorageBackedUint64(0), - sto.OpenSubStorage([]byte{0}), + sto.OpenSubStorage([]byte{0}, false), } } diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index 220c2700f4..56f04badff 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -25,7 +25,7 @@ func Initialize(sto *storage.Storage) { func Open(sto *storage.Storage) *AddressTable { numItems := sto.OpenStorageBackedUint64(0) - return &AddressTable{sto, sto.OpenSubStorage([]byte{}), numItems} + return &AddressTable{sto.NoCacheCopy(), sto.OpenSubStorage([]byte{}, false), numItems} } func (atab *AddressTable) Register(addr common.Address) (uint64, error) { diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 2bea8f7c54..93ec1ec469 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -71,13 +71,13 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error) backingStorage.OpenStorageBackedUint64(uint64(upgradeVersionOffset)), backingStorage.OpenStorageBackedUint64(uint64(upgradeTimestampOffset)), backingStorage.OpenStorageBackedAddress(uint64(networkFeeAccountOffset)), - l1pricing.OpenL1PricingState(backingStorage.OpenSubStorage(l1PricingSubspace)), - l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace)), - retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace), stateDB), - addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace)), - addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace)), - merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace)), - blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace)), + l1pricing.OpenL1PricingState(backingStorage.OpenSubStorage(l1PricingSubspace, true)), + l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace, true)), + retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace, true), stateDB), + addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace, true)), + addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace, true)), + merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace, true)), + blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace, true)), backingStorage.OpenStorageBackedBigInt(uint64(chainIdOffset)), backingStorage.OpenStorageBackedBytes(chainConfigSubspace), backingStorage.OpenStorageBackedUint64(uint64(genesisBlockNumOffset)), @@ -220,14 +220,14 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p if desiredArbosVersion >= 2 { initialRewardsRecipient = initialChainOwner } - _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) - _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace)) - _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace)) - addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace)) - merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace)) - blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace)) - - ownersStorage := sto.OpenSubStorage(chainOwnerSubspace) + _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace, true), initialRewardsRecipient, initMessage.InitialL1BaseFee) + _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace, true)) + _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace, true)) + addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace, true)) + merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace, true)) + blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace, true)) + + ownersStorage := sto.OpenSubStorage(chainOwnerSubspace, true) _ = addressSet.Initialize(ownersStorage) _ = addressSet.OpenAddressSet(ownersStorage).Add(initialChainOwner) @@ -385,7 +385,7 @@ func (state *ArbosState) ChainOwners() *addressSet.AddressSet { func (state *ArbosState) SendMerkleAccumulator() *merkleAccumulator.MerkleAccumulator { if state.sendMerkle == nil { - state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenSubStorage(sendMerkleSubspace)) + state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenSubStorage(sendMerkleSubspace, true)) } return state.sendMerkle } diff --git a/arbos/arbosState/arbosstate_test.go b/arbos/arbosState/arbosstate_test.go index c4643c9183..384fc9c72f 100644 --- a/arbos/arbosState/arbosstate_test.go +++ b/arbos/arbosState/arbosstate_test.go @@ -64,7 +64,7 @@ func TestStorageBackedInt64(t *testing.T) { func TestStorageSlots(t *testing.T) { state, _ := NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}) + sto := state.BackingStorage().OpenSubStorage([]byte{}, true) println("nil address", colors.Blue, storage.NilAddressRepresentation.String(), colors.Clear) diff --git a/arbos/blockhash/blockhash.go b/arbos/blockhash/blockhash.go index 2eedf7f5bb..99fb3ef470 100644 --- a/arbos/blockhash/blockhash.go +++ b/arbos/blockhash/blockhash.go @@ -21,7 +21,7 @@ func InitializeBlockhashes(backingStorage *storage.Storage) { } func OpenBlockhashes(backingStorage *storage.Storage) *Blockhashes { - return &Blockhashes{backingStorage, backingStorage.OpenStorageBackedUint64(0)} + return &Blockhashes{backingStorage.NoCacheCopy(), backingStorage.OpenStorageBackedUint64(0)} } func (bh *Blockhashes) L1BlockNumber() (uint64, error) { diff --git a/arbos/l1pricing/batchPoster.go b/arbos/l1pricing/batchPoster.go index 97b7b16234..4ac86307ec 100644 --- a/arbos/l1pricing/batchPoster.go +++ b/arbos/l1pricing/batchPoster.go @@ -42,13 +42,13 @@ func InitializeBatchPostersTable(storage *storage.Storage) error { if err := totalFundsDue.SetChecked(common.Big0); err != nil { return err } - return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey)) + return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey, true)) } func OpenBatchPostersTable(storage *storage.Storage) *BatchPostersTable { return &BatchPostersTable{ - posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey)), - posterInfo: storage.OpenSubStorage(PosterInfoKey), + posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey, true)), + posterInfo: storage.OpenSubStorage(PosterInfoKey, false), totalFundsDue: storage.OpenStorageBackedBigInt(totalFundsDueOffset), } } @@ -68,7 +68,7 @@ func (bpt *BatchPostersTable) OpenPoster(poster common.Address, createIfNotExist } func (bpt *BatchPostersTable) internalOpen(poster common.Address) *BatchPosterState { - bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes()) + bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes(), false) return &BatchPosterState{ fundsDue: bpStorage.OpenStorageBackedBigInt(0), payTo: bpStorage.OpenStorageBackedAddress(1), diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 9772ac028b..0d6bca7bf9 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -82,7 +82,7 @@ var InitialEquilibrationUnitsV0 = arbmath.UintToBig(60 * params.TxDataNonZeroGas var InitialEquilibrationUnitsV6 = arbmath.UintToBig(params.TxDataNonZeroGasEIP2028 * 10000000) func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient common.Address, initialL1BaseFee *big.Int) error { - bptStorage := sto.OpenSubStorage(BatchPosterTableKey) + bptStorage := sto.OpenSubStorage(BatchPosterTableKey, true) if err := InitializeBatchPostersTable(bptStorage); err != nil { return err } @@ -117,7 +117,7 @@ func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient comm func OpenL1PricingState(sto *storage.Storage) *L1PricingState { return &L1PricingState{ sto, - OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey)), + OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey, true)), sto.OpenStorageBackedAddress(payRewardsToOffset), sto.OpenStorageBackedBigUint(equilibrationUnitsOffset), sto.OpenStorageBackedUint64(inertiaOffset), diff --git a/arbos/queue_test.go b/arbos/queue_test.go index d8d491bdb0..abeec49a93 100644 --- a/arbos/queue_test.go +++ b/arbos/queue_test.go @@ -14,7 +14,7 @@ import ( func TestQueue(t *testing.T) { state, statedb := arbosState.NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}) + sto := state.BackingStorage().OpenSubStorage([]byte{}, true) Require(t, storage.InitializeQueue(sto)) q := storage.OpenQueue(sto) diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index abea2ab7bd..0322938541 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -31,13 +31,13 @@ var ( ) func InitializeRetryableState(sto *storage.Storage) error { - return storage.InitializeQueue(sto.OpenSubStorage(timeoutQueueKey)) + return storage.InitializeQueue(sto.OpenSubStorage(timeoutQueueKey, true)) } func OpenRetryableState(sto *storage.Storage, statedb vm.StateDB) *RetryableState { return &RetryableState{ sto, - storage.OpenQueue(sto.OpenSubStorage(timeoutQueueKey)), + storage.OpenQueue(sto.OpenSubStorage(timeoutQueueKey, true)), } } @@ -73,7 +73,7 @@ func (rs *RetryableState) CreateRetryable( beneficiary common.Address, calldata []byte, ) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes()) + sto := rs.retryables.OpenSubStorage(id.Bytes(), false) ret := &Retryable{ id, sto, @@ -100,7 +100,7 @@ func (rs *RetryableState) CreateRetryable( } func (rs *RetryableState) OpenRetryable(id common.Hash, currentTimestamp uint64) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes()) + sto := rs.retryables.OpenSubStorage(id.Bytes(), false) timeoutStorage := sto.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if timeout == 0 || timeout < currentTimestamp || err != nil { @@ -134,7 +134,7 @@ func (rs *RetryableState) RetryableSizeBytes(id common.Hash, currentTime uint64) } func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario util.TracingScenario) (bool, error) { - retStorage := rs.retryables.OpenSubStorage(id.Bytes()) + retStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) timeout, err := retStorage.GetByUint64(timeoutOffset) if timeout == (common.Hash{}) || err != nil { return false, err @@ -157,7 +157,7 @@ func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario _ = retStorage.ClearByUint64(beneficiaryOffset) _ = retStorage.ClearByUint64(timeoutOffset) _ = retStorage.ClearByUint64(timeoutWindowsLeftOffset) - err = retStorage.OpenSubStorage(calldataKey).ClearBytes() + err = retStorage.OpenSubStorage(calldataKey, false).ClearBytes() return true, err } @@ -291,7 +291,7 @@ func (rs *RetryableState) TryToReapOneRetryable(currentTimestamp uint64, evm *vm if err != nil || id == nil { return err } - retryableStorage := rs.retryables.OpenSubStorage(id.Bytes()) + retryableStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) timeoutStorage := retryableStorage.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if err != nil { diff --git a/arbos/storage/queue.go b/arbos/storage/queue.go index 55231d3a90..032ac11aad 100644 --- a/arbos/storage/queue.go +++ b/arbos/storage/queue.go @@ -25,7 +25,7 @@ func InitializeQueue(sto *Storage) error { func OpenQueue(sto *Storage) *Queue { return &Queue{ - sto, + sto.NoCacheCopy(), sto.OpenStorageBackedUint64(0), sto.OpenStorageBackedUint64(1), } diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 478ad68f8f..b93c835ff0 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -4,6 +4,7 @@ package storage import ( + "bytes" "fmt" "math/big" @@ -17,6 +18,7 @@ import ( "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/containers" ) // Storage allows ArbOS to store data persistently in the Ethereum-compatible stateDB. This is represented in @@ -43,12 +45,18 @@ type Storage struct { db vm.StateDB storageKey []byte burner burn.Burner + hashCache *containers.SafeLruCache[string, []byte] } const StorageReadCost = params.SloadGasEIP2200 const StorageWriteCost = params.SstoreSetGasEIP2200 const StorageWriteZeroCost = params.SstoreResetGasEIP2200 +const storageKeyCacheSize = 1024 + +// TODO(magic) rename? +var storageHashCache = containers.NewSafeLruCache[string, []byte](storageKeyCacheSize) + // NewGeth uses a Geth database to create an evm key-value store func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { account := common.HexToAddress("0xA4B05FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") @@ -58,6 +66,7 @@ func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { db: statedb, storageKey: []byte{}, burner: burner, + hashCache: storageHashCache, } } @@ -81,15 +90,16 @@ func NewMemoryBackedStateDB() vm.StateDB { // a page, to preserve contiguity within a page. This will reduce cost if/when Ethereum switches to storage // representations that reward contiguity. // Because page numbers are 248 bits, this gives us 124-bit security against collision attacks, which is good enough. -func mapAddress(storageKey []byte, key common.Hash) common.Hash { +func (store *Storage) mapAddress(storageKey []byte, key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 - return common.BytesToHash( + mapped := common.BytesToHash( append( - crypto.Keccak256(storageKey, keyBytes[:boundary])[:boundary], + store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary], keyBytes[boundary], ), ) + return mapped } func writeCost(value common.Hash) uint64 { @@ -111,11 +121,11 @@ func (store *Storage) Get(key common.Hash) (common.Hash, error) { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageGet(key) } - return store.db.GetState(store.account, mapAddress(store.storageKey, key)), nil + return store.db.GetState(store.account, store.mapAddress(store.storageKey, key)), nil } func (store *Storage) GetStorageSlot(key common.Hash) common.Hash { - return mapAddress(store.storageKey, key) + return store.mapAddress(store.storageKey, key) } func (store *Storage) GetUint64(key common.Hash) (uint64, error) { @@ -143,7 +153,7 @@ func (store *Storage) Set(key common.Hash, value common.Hash) error { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageSet(key, value) } - store.db.SetState(store.account, mapAddress(store.storageKey, key), value) + store.db.SetState(store.account, store.mapAddress(store.storageKey, key), value) return nil } @@ -171,12 +181,27 @@ func (store *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, return oldValue, store.Set(key, newValue) } -func (store *Storage) OpenSubStorage(id []byte) *Storage { +func (store *Storage) OpenSubStorage(id []byte, cacheKeys bool) *Storage { + var hashCache *containers.SafeLruCache[string, []byte] + if cacheKeys { + hashCache = storageHashCache + } return &Storage{ store.account, store.db, - crypto.Keccak256(store.storageKey, id), + store.cachedKeccak(store.storageKey, id), store.burner, + hashCache, + } +} + +func (store *Storage) NoCacheCopy() *Storage { + return &Storage{ + store.account, + store.db, + store.storageKey, + store.burner, + nil, } } @@ -266,6 +291,24 @@ func (store *Storage) Keccak(data ...[]byte) ([]byte, error) { return crypto.Keccak256(data...), nil } +func (store *Storage) cachedKeccak(data ...[]byte) []byte { + if store.hashCache == nil { + return crypto.Keccak256(data...) + } + keyString := string(bytes.Join(data, []byte{})) + hash, isCached := store.hashCache.Get(keyString) + if isCached { + return hash + } + // TODO(magic) we might miss the warning if concurrent Add will be before + if store.hashCache.Size()-store.hashCache.Len() == 1 { + log.Warn("Hash cache almost full, but we didn't expect that. We may be caching some non-static keys.") + } + hash = crypto.Keccak256(data...) + store.hashCache.Add(keyString, hash) + return hash +} + func (store *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { bytes, err := store.Keccak(data...) return common.BytesToHash(bytes), err @@ -279,7 +322,7 @@ type StorageSlot struct { } func (store *Storage) NewSlot(offset uint64) StorageSlot { - return StorageSlot{store.account, store.db, mapAddress(store.storageKey, util.UintToHash(offset)), store.burner} + return StorageSlot{store.account, store.db, store.mapAddress(store.storageKey, util.UintToHash(offset)), store.burner} } func (ss *StorageSlot) Get() (common.Hash, error) { @@ -590,7 +633,7 @@ type StorageBackedBytes struct { func (store *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { return StorageBackedBytes{ - *store.OpenSubStorage(id), + *store.OpenSubStorage(id, false), } } diff --git a/util/containers/safe_lru.go b/util/containers/safe_lru.go new file mode 100644 index 0000000000..40e7d993ec --- /dev/null +++ b/util/containers/safe_lru.go @@ -0,0 +1,82 @@ +package containers + +import ( + "sync" +) + +// thread safe version of containers.LruCache +type SafeLruCache[K comparable, V any] struct { + inner *LruCache[K, V] + mutex sync.RWMutex +} + +func NewSafeLruCache[K comparable, V any](size int) *SafeLruCache[K, V] { + return NewSafeLruCacheWithOnEvict[K, V](size, nil) +} + +func NewSafeLruCacheWithOnEvict[K comparable, V any](size int, onEvict func(K, V)) *SafeLruCache[K, V] { + return &SafeLruCache[K, V]{ + inner: NewLruCacheWithOnEvict(size, onEvict), + } +} + +// Returns true if an item was evicted +func (c *SafeLruCache[K, V]) Add(key K, value V) bool { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.inner.Add(key, value) +} + +func (c *SafeLruCache[K, V]) Get(key K) (V, bool) { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.inner.Get(key) +} + +func (c *SafeLruCache[K, V]) Contains(key K) bool { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.Contains(key) +} + +func (c *SafeLruCache[K, V]) Remove(key K) { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.Remove(key) +} + +func (c *SafeLruCache[K, V]) GetOldest() (K, V, bool) { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.GetOldest() +} + +func (c *SafeLruCache[K, V]) RemoveOldest() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.RemoveOldest() +} + +func (c *SafeLruCache[K, V]) Len() int { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.Len() +} + +func (c *SafeLruCache[K, V]) Size() int { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.Size() +} + +func (c *SafeLruCache[K, V]) Clear() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.Clear() +} + +func (c *SafeLruCache[K, V]) Resize(newSize int) { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.Resize(newSize) +} From 691a213eb0127a20d32114dfcd73a652a84c2ea4 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 16:42:33 +0000 Subject: [PATCH 02/15] add onetime warning when hash cache unexpectedly fills up --- arbos/storage/storage.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index b93c835ff0..39af072f0a 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -7,6 +7,7 @@ import ( "bytes" "fmt" "math/big" + "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -54,8 +55,8 @@ const StorageWriteZeroCost = params.SstoreResetGasEIP2200 const storageKeyCacheSize = 1024 -// TODO(magic) rename? var storageHashCache = containers.NewSafeLruCache[string, []byte](storageKeyCacheSize) +var cacheFullLogged atomic.Bool // NewGeth uses a Geth database to create an evm key-value store func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { @@ -300,12 +301,11 @@ func (store *Storage) cachedKeccak(data ...[]byte) []byte { if isCached { return hash } - // TODO(magic) we might miss the warning if concurrent Add will be before - if store.hashCache.Size()-store.hashCache.Len() == 1 { - log.Warn("Hash cache almost full, but we didn't expect that. We may be caching some non-static keys.") - } hash = crypto.Keccak256(data...) - store.hashCache.Add(keyString, hash) + evicted := store.hashCache.Add(keyString, hash) + if evicted && cacheFullLogged.CompareAndSwap(false, true) { + log.Warn("Hash cache full, we didn't expect that. Some non-static storage keys may fill up the cache.") + } return hash } From f802560c966b27eeba1b68a51585e1238ecb5787 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 18:44:13 +0000 Subject: [PATCH 03/15] fix race in mapAddress --- arbos/storage/storage.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 39af072f0a..f234e69d5a 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -94,13 +94,10 @@ func NewMemoryBackedStateDB() vm.StateDB { func (store *Storage) mapAddress(storageKey []byte, key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 - mapped := common.BytesToHash( - append( - store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary], - keyBytes[boundary], - ), - ) - return mapped + mapped := make([]byte, 0, common.HashLength) + mapped = append(mapped, store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary]...) + mapped = append(mapped, keyBytes[boundary]) + return common.BytesToHash(mapped) } func writeCost(value common.Hash) uint64 { @@ -292,13 +289,14 @@ func (store *Storage) Keccak(data ...[]byte) ([]byte, error) { return crypto.Keccak256(data...), nil } +// note: returned slice is not thread-safe func (store *Storage) cachedKeccak(data ...[]byte) []byte { if store.hashCache == nil { return crypto.Keccak256(data...) } keyString := string(bytes.Join(data, []byte{})) - hash, isCached := store.hashCache.Get(keyString) - if isCached { + hash, wasCached := store.hashCache.Get(keyString) + if wasCached { return hash } hash = crypto.Keccak256(data...) From 0fd07c6dbbee64e8c80f45125aec670c52ce7114 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 19:04:26 +0000 Subject: [PATCH 04/15] remove redundant mapAddress parameter --- arbos/storage/storage.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index f234e69d5a..66f3d49473 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -91,11 +91,11 @@ func NewMemoryBackedStateDB() vm.StateDB { // a page, to preserve contiguity within a page. This will reduce cost if/when Ethereum switches to storage // representations that reward contiguity. // Because page numbers are 248 bits, this gives us 124-bit security against collision attacks, which is good enough. -func (store *Storage) mapAddress(storageKey []byte, key common.Hash) common.Hash { +func (store *Storage) mapAddress(key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 mapped := make([]byte, 0, common.HashLength) - mapped = append(mapped, store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary]...) + mapped = append(mapped, store.cachedKeccak(store.storageKey, keyBytes[:boundary])[:boundary]...) mapped = append(mapped, keyBytes[boundary]) return common.BytesToHash(mapped) } @@ -119,11 +119,11 @@ func (store *Storage) Get(key common.Hash) (common.Hash, error) { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageGet(key) } - return store.db.GetState(store.account, store.mapAddress(store.storageKey, key)), nil + return store.db.GetState(store.account, store.mapAddress(key)), nil } func (store *Storage) GetStorageSlot(key common.Hash) common.Hash { - return store.mapAddress(store.storageKey, key) + return store.mapAddress(key) } func (store *Storage) GetUint64(key common.Hash) (uint64, error) { @@ -151,7 +151,7 @@ func (store *Storage) Set(key common.Hash, value common.Hash) error { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageSet(key, value) } - store.db.SetState(store.account, store.mapAddress(store.storageKey, key), value) + store.db.SetState(store.account, store.mapAddress(key), value) return nil } @@ -320,7 +320,7 @@ type StorageSlot struct { } func (store *Storage) NewSlot(offset uint64) StorageSlot { - return StorageSlot{store.account, store.db, store.mapAddress(store.storageKey, util.UintToHash(offset)), store.burner} + return StorageSlot{store.account, store.db, store.mapAddress(util.UintToHash(offset)), store.burner} } func (ss *StorageSlot) Get() (common.Hash, error) { From 001ae3179abb304ee87b3e36cdfc03414cefaf02 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 12 Oct 2023 22:05:10 +0200 Subject: [PATCH 05/15] address review comments --- arbos/addressSet/addressSet.go | 90 ++++++------- arbos/addressTable/addressTable.go | 2 +- arbos/arbosState/arbosstate.go | 32 ++--- arbos/arbosState/arbosstate_test.go | 2 +- arbos/blockhash/blockhash.go | 2 +- arbos/l1pricing/batchPoster.go | 8 +- arbos/l1pricing/l1pricing.go | 4 +- arbos/queue_test.go | 2 +- arbos/retryables/retryable.go | 14 +- arbos/storage/queue.go | 2 +- arbos/storage/storage.go | 200 +++++++++++++++------------- 11 files changed, 184 insertions(+), 174 deletions(-) diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 38b07f7a1b..50a43c36cf 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -26,49 +26,49 @@ func Initialize(sto *storage.Storage) error { func OpenAddressSet(sto *storage.Storage) *AddressSet { return &AddressSet{ - sto.NoCacheCopy(), - sto.OpenStorageBackedUint64(0), - sto.OpenSubStorage([]byte{0}, false), + backingStorage: sto.WithoutCache(), + size: sto.OpenStorageBackedUint64(0), + byAddress: sto.OpenSubStorage([]byte{0}), } } -func (aset *AddressSet) Size() (uint64, error) { - return aset.size.Get() +func (as *AddressSet) Size() (uint64, error) { + return as.size.Get() } -func (aset *AddressSet) IsMember(addr common.Address) (bool, error) { - value, err := aset.byAddress.Get(util.AddressToHash(addr)) +func (as *AddressSet) IsMember(addr common.Address) (bool, error) { + value, err := as.byAddress.Get(util.AddressToHash(addr)) return value != (common.Hash{}), err } -func (aset *AddressSet) GetAnyMember() (*common.Address, error) { - size, err := aset.size.Get() +func (as *AddressSet) GetAnyMember() (*common.Address, error) { + size, err := as.size.Get() if err != nil || size == 0 { return nil, err } - sba := aset.backingStorage.OpenStorageBackedAddressOrNil(1) + sba := as.backingStorage.OpenStorageBackedAddressOrNil(1) addr, err := sba.Get() return addr, err } -func (aset *AddressSet) Clear() error { - size, err := aset.size.Get() +func (as *AddressSet) Clear() error { + size, err := as.size.Get() if err != nil || size == 0 { return err } for i := uint64(1); i <= size; i++ { - contents, _ := aset.backingStorage.GetByUint64(i) - _ = aset.backingStorage.ClearByUint64(i) - err = aset.byAddress.Clear(contents) + contents, _ := as.backingStorage.GetByUint64(i) + _ = as.backingStorage.ClearByUint64(i) + err = as.byAddress.Clear(contents) if err != nil { return err } } - return aset.size.Clear() + return as.size.Clear() } -func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error) { - size, err := aset.size.Get() +func (as *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error) { + size, err := as.size.Get() if err != nil { return nil, err } @@ -77,7 +77,7 @@ func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, err } ret := make([]common.Address, size) for i := range ret { - sba := aset.backingStorage.OpenStorageBackedAddress(uint64(i + 1)) + sba := as.backingStorage.OpenStorageBackedAddress(uint64(i + 1)) ret[i], err = sba.Get() if err != nil { return nil, err @@ -86,22 +86,22 @@ func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, err return ret, nil } -func (aset *AddressSet) ClearList() error { - size, err := aset.size.Get() +func (as *AddressSet) ClearList() error { + size, err := as.size.Get() if err != nil || size == 0 { return err } for i := uint64(1); i <= size; i++ { - err = aset.backingStorage.ClearByUint64(i) + err = as.backingStorage.ClearByUint64(i) if err != nil { return err } } - return aset.size.Clear() + return as.size.Clear() } -func (aset *AddressSet) RectifyMapping(addr common.Address) error { - isOwner, err := aset.IsMember(addr) +func (as *AddressSet) RectifyMapping(addr common.Address) error { + isOwner, err := as.IsMember(addr) if !isOwner || err != nil { return errors.New("RectifyMapping: Address is not an owner") } @@ -109,15 +109,15 @@ func (aset *AddressSet) RectifyMapping(addr common.Address) error { // If the mapping is correct, RectifyMapping shouldn't do anything // Additional safety check to avoid corruption of mapping after the initial fix addrAsHash := common.BytesToHash(addr.Bytes()) - slot, err := aset.byAddress.GetUint64(addrAsHash) + slot, err := as.byAddress.GetUint64(addrAsHash) if err != nil { return err } - atSlot, err := aset.backingStorage.GetByUint64(slot) + atSlot, err := as.backingStorage.GetByUint64(slot) if err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } @@ -126,72 +126,72 @@ func (aset *AddressSet) RectifyMapping(addr common.Address) error { } // Remove the owner from map and add them as a new owner - err = aset.byAddress.Clear(addrAsHash) + err = as.byAddress.Clear(addrAsHash) if err != nil { return err } - return aset.Add(addr) + return as.Add(addr) } -func (aset *AddressSet) Add(addr common.Address) error { - present, err := aset.IsMember(addr) +func (as *AddressSet) Add(addr common.Address) error { + present, err := as.IsMember(addr) if present || err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } slot := util.UintToHash(1 + size) addrAsHash := common.BytesToHash(addr.Bytes()) - err = aset.byAddress.Set(addrAsHash, slot) + err = as.byAddress.Set(addrAsHash, slot) if err != nil { return err } - sba := aset.backingStorage.OpenStorageBackedAddress(1 + size) + sba := as.backingStorage.OpenStorageBackedAddress(1 + size) err = sba.Set(addr) if err != nil { return err } - _, err = aset.size.Increment() + _, err = as.size.Increment() return err } -func (aset *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { +func (as *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { addrAsHash := common.BytesToHash(addr.Bytes()) - slot, err := aset.byAddress.GetUint64(addrAsHash) + slot, err := as.byAddress.GetUint64(addrAsHash) if slot == 0 || err != nil { return err } - err = aset.byAddress.Clear(addrAsHash) + err = as.byAddress.Clear(addrAsHash) if err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } if slot < size { - atSize, err := aset.backingStorage.GetByUint64(size) + atSize, err := as.backingStorage.GetByUint64(size) if err != nil { return err } - err = aset.backingStorage.SetByUint64(slot, atSize) + err = as.backingStorage.SetByUint64(slot, atSize) if err != nil { return err } if arbosVersion >= 11 { - err = aset.byAddress.Set(atSize, util.UintToHash(slot)) + err = as.byAddress.Set(atSize, util.UintToHash(slot)) if err != nil { return err } } } - err = aset.backingStorage.ClearByUint64(size) + err = as.backingStorage.ClearByUint64(size) if err != nil { return err } - _, err = aset.size.Decrement() + _, err = as.size.Decrement() return err } diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index 56f04badff..f44e7f3dcf 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -25,7 +25,7 @@ func Initialize(sto *storage.Storage) { func Open(sto *storage.Storage) *AddressTable { numItems := sto.OpenStorageBackedUint64(0) - return &AddressTable{sto.NoCacheCopy(), sto.OpenSubStorage([]byte{}, false), numItems} + return &AddressTable{sto.WithoutCache(), sto.OpenSubStorage([]byte{}), numItems} } func (atab *AddressTable) Register(addr common.Address) (uint64, error) { diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 750c796e33..8702c62d16 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -73,13 +73,13 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error) backingStorage.OpenStorageBackedUint64(uint64(upgradeVersionOffset)), backingStorage.OpenStorageBackedUint64(uint64(upgradeTimestampOffset)), backingStorage.OpenStorageBackedAddress(uint64(networkFeeAccountOffset)), - l1pricing.OpenL1PricingState(backingStorage.OpenSubStorage(l1PricingSubspace, true)), - l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace, true)), - retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace, true), stateDB), - addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace, true)), - addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace, true)), - merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace, true)), - blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace, true)), + l1pricing.OpenL1PricingState(backingStorage.OpenCachedSubStorage(l1PricingSubspace)), + l2pricing.OpenL2PricingState(backingStorage.OpenCachedSubStorage(l2PricingSubspace)), + retryables.OpenRetryableState(backingStorage.OpenCachedSubStorage(retryablesSubspace), stateDB), + addressTable.Open(backingStorage.OpenCachedSubStorage(addressTableSubspace)), + addressSet.OpenAddressSet(backingStorage.OpenCachedSubStorage(chainOwnerSubspace)), + merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenCachedSubStorage(sendMerkleSubspace)), + blockhash.OpenBlockhashes(backingStorage.OpenCachedSubStorage(blockhashesSubspace)), backingStorage.OpenStorageBackedBigInt(uint64(chainIdOffset)), backingStorage.OpenStorageBackedBytes(chainConfigSubspace), backingStorage.OpenStorageBackedUint64(uint64(genesisBlockNumOffset)), @@ -225,14 +225,14 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p if desiredArbosVersion >= 2 { initialRewardsRecipient = initialChainOwner } - _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace, true), initialRewardsRecipient, initMessage.InitialL1BaseFee) - _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace, true)) - _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace, true)) - addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace, true)) - merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace, true)) - blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace, true)) - - ownersStorage := sto.OpenSubStorage(chainOwnerSubspace, true) + _ = l1pricing.InitializeL1PricingState(sto.OpenCachedSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) + _ = l2pricing.InitializeL2PricingState(sto.OpenCachedSubStorage(l2PricingSubspace)) + _ = retryables.InitializeRetryableState(sto.OpenCachedSubStorage(retryablesSubspace)) + addressTable.Initialize(sto.OpenCachedSubStorage(addressTableSubspace)) + merkleAccumulator.InitializeMerkleAccumulator(sto.OpenCachedSubStorage(sendMerkleSubspace)) + blockhash.InitializeBlockhashes(sto.OpenCachedSubStorage(blockhashesSubspace)) + + ownersStorage := sto.OpenCachedSubStorage(chainOwnerSubspace) _ = addressSet.Initialize(ownersStorage) _ = addressSet.OpenAddressSet(ownersStorage).Add(initialChainOwner) @@ -428,7 +428,7 @@ func (state *ArbosState) ChainOwners() *addressSet.AddressSet { func (state *ArbosState) SendMerkleAccumulator() *merkleAccumulator.MerkleAccumulator { if state.sendMerkle == nil { - state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenSubStorage(sendMerkleSubspace, true)) + state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenCachedSubStorage(sendMerkleSubspace)) } return state.sendMerkle } diff --git a/arbos/arbosState/arbosstate_test.go b/arbos/arbosState/arbosstate_test.go index 384fc9c72f..ef63c23386 100644 --- a/arbos/arbosState/arbosstate_test.go +++ b/arbos/arbosState/arbosstate_test.go @@ -64,7 +64,7 @@ func TestStorageBackedInt64(t *testing.T) { func TestStorageSlots(t *testing.T) { state, _ := NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}, true) + sto := state.BackingStorage().OpenCachedSubStorage([]byte{}) println("nil address", colors.Blue, storage.NilAddressRepresentation.String(), colors.Clear) diff --git a/arbos/blockhash/blockhash.go b/arbos/blockhash/blockhash.go index 99fb3ef470..34c907207c 100644 --- a/arbos/blockhash/blockhash.go +++ b/arbos/blockhash/blockhash.go @@ -21,7 +21,7 @@ func InitializeBlockhashes(backingStorage *storage.Storage) { } func OpenBlockhashes(backingStorage *storage.Storage) *Blockhashes { - return &Blockhashes{backingStorage.NoCacheCopy(), backingStorage.OpenStorageBackedUint64(0)} + return &Blockhashes{backingStorage.WithoutCache(), backingStorage.OpenStorageBackedUint64(0)} } func (bh *Blockhashes) L1BlockNumber() (uint64, error) { diff --git a/arbos/l1pricing/batchPoster.go b/arbos/l1pricing/batchPoster.go index 4ac86307ec..a3428c441c 100644 --- a/arbos/l1pricing/batchPoster.go +++ b/arbos/l1pricing/batchPoster.go @@ -42,13 +42,13 @@ func InitializeBatchPostersTable(storage *storage.Storage) error { if err := totalFundsDue.SetChecked(common.Big0); err != nil { return err } - return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey, true)) + return addressSet.Initialize(storage.OpenCachedSubStorage(PosterAddrsKey)) } func OpenBatchPostersTable(storage *storage.Storage) *BatchPostersTable { return &BatchPostersTable{ - posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey, true)), - posterInfo: storage.OpenSubStorage(PosterInfoKey, false), + posterAddrs: addressSet.OpenAddressSet(storage.OpenCachedSubStorage(PosterAddrsKey)), + posterInfo: storage.OpenSubStorage(PosterInfoKey), totalFundsDue: storage.OpenStorageBackedBigInt(totalFundsDueOffset), } } @@ -68,7 +68,7 @@ func (bpt *BatchPostersTable) OpenPoster(poster common.Address, createIfNotExist } func (bpt *BatchPostersTable) internalOpen(poster common.Address) *BatchPosterState { - bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes(), false) + bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes()) return &BatchPosterState{ fundsDue: bpStorage.OpenStorageBackedBigInt(0), payTo: bpStorage.OpenStorageBackedAddress(1), diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 1956eab29b..58c1042ab7 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -83,7 +83,7 @@ var InitialEquilibrationUnitsV0 = arbmath.UintToBig(60 * params.TxDataNonZeroGas var InitialEquilibrationUnitsV6 = arbmath.UintToBig(params.TxDataNonZeroGasEIP2028 * 10000000) func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient common.Address, initialL1BaseFee *big.Int) error { - bptStorage := sto.OpenSubStorage(BatchPosterTableKey, true) + bptStorage := sto.OpenCachedSubStorage(BatchPosterTableKey) if err := InitializeBatchPostersTable(bptStorage); err != nil { return err } @@ -118,7 +118,7 @@ func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient comm func OpenL1PricingState(sto *storage.Storage) *L1PricingState { return &L1PricingState{ sto, - OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey, true)), + OpenBatchPostersTable(sto.OpenCachedSubStorage(BatchPosterTableKey)), sto.OpenStorageBackedAddress(payRewardsToOffset), sto.OpenStorageBackedBigUint(equilibrationUnitsOffset), sto.OpenStorageBackedUint64(inertiaOffset), diff --git a/arbos/queue_test.go b/arbos/queue_test.go index abeec49a93..ff993a233f 100644 --- a/arbos/queue_test.go +++ b/arbos/queue_test.go @@ -14,7 +14,7 @@ import ( func TestQueue(t *testing.T) { state, statedb := arbosState.NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}, true) + sto := state.BackingStorage().OpenCachedSubStorage([]byte{}) Require(t, storage.InitializeQueue(sto)) q := storage.OpenQueue(sto) diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index 0322938541..1121de01f4 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -31,13 +31,13 @@ var ( ) func InitializeRetryableState(sto *storage.Storage) error { - return storage.InitializeQueue(sto.OpenSubStorage(timeoutQueueKey, true)) + return storage.InitializeQueue(sto.OpenCachedSubStorage(timeoutQueueKey)) } func OpenRetryableState(sto *storage.Storage, statedb vm.StateDB) *RetryableState { return &RetryableState{ sto, - storage.OpenQueue(sto.OpenSubStorage(timeoutQueueKey, true)), + storage.OpenQueue(sto.OpenCachedSubStorage(timeoutQueueKey)), } } @@ -73,7 +73,7 @@ func (rs *RetryableState) CreateRetryable( beneficiary common.Address, calldata []byte, ) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes(), false) + sto := rs.retryables.OpenSubStorage(id.Bytes()) ret := &Retryable{ id, sto, @@ -100,7 +100,7 @@ func (rs *RetryableState) CreateRetryable( } func (rs *RetryableState) OpenRetryable(id common.Hash, currentTimestamp uint64) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes(), false) + sto := rs.retryables.OpenSubStorage(id.Bytes()) timeoutStorage := sto.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if timeout == 0 || timeout < currentTimestamp || err != nil { @@ -134,7 +134,7 @@ func (rs *RetryableState) RetryableSizeBytes(id common.Hash, currentTime uint64) } func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario util.TracingScenario) (bool, error) { - retStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) + retStorage := rs.retryables.OpenSubStorage(id.Bytes()) timeout, err := retStorage.GetByUint64(timeoutOffset) if timeout == (common.Hash{}) || err != nil { return false, err @@ -157,7 +157,7 @@ func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario _ = retStorage.ClearByUint64(beneficiaryOffset) _ = retStorage.ClearByUint64(timeoutOffset) _ = retStorage.ClearByUint64(timeoutWindowsLeftOffset) - err = retStorage.OpenSubStorage(calldataKey, false).ClearBytes() + err = retStorage.OpenSubStorage(calldataKey).ClearBytes() return true, err } @@ -291,7 +291,7 @@ func (rs *RetryableState) TryToReapOneRetryable(currentTimestamp uint64, evm *vm if err != nil || id == nil { return err } - retryableStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) + retryableStorage := rs.retryables.OpenSubStorage(id.Bytes()) timeoutStorage := retryableStorage.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if err != nil { diff --git a/arbos/storage/queue.go b/arbos/storage/queue.go index 032ac11aad..9c02dc1ee7 100644 --- a/arbos/storage/queue.go +++ b/arbos/storage/queue.go @@ -25,7 +25,7 @@ func InitializeQueue(sto *Storage) error { func OpenQueue(sto *Storage) *Queue { return &Queue{ - sto.NoCacheCopy(), + sto.WithoutCache(), sto.OpenStorageBackedUint64(0), sto.OpenStorageBackedUint64(1), } diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 66f3d49473..47ca66445d 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -91,11 +91,11 @@ func NewMemoryBackedStateDB() vm.StateDB { // a page, to preserve contiguity within a page. This will reduce cost if/when Ethereum switches to storage // representations that reward contiguity. // Because page numbers are 248 bits, this gives us 124-bit security against collision attacks, which is good enough. -func (store *Storage) mapAddress(key common.Hash) common.Hash { +func (s *Storage) mapAddress(key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 mapped := make([]byte, 0, common.HashLength) - mapped = append(mapped, store.cachedKeccak(store.storageKey, keyBytes[:boundary])[:boundary]...) + mapped = append(mapped, s.cachedKeccak(s.storageKey, keyBytes[:boundary])[:boundary]...) mapped = append(mapped, keyBytes[boundary]) return common.BytesToHash(mapped) } @@ -107,132 +107,139 @@ func writeCost(value common.Hash) uint64 { return StorageWriteCost } -func (store *Storage) Account() common.Address { - return store.account +func (s *Storage) Account() common.Address { + return s.account } -func (store *Storage) Get(key common.Hash) (common.Hash, error) { - err := store.burner.Burn(StorageReadCost) +func (s *Storage) Get(key common.Hash) (common.Hash, error) { + err := s.burner.Burn(StorageReadCost) if err != nil { return common.Hash{}, err } - if info := store.burner.TracingInfo(); info != nil { + if info := s.burner.TracingInfo(); info != nil { info.RecordStorageGet(key) } - return store.db.GetState(store.account, store.mapAddress(key)), nil + return s.db.GetState(s.account, s.mapAddress(key)), nil } -func (store *Storage) GetStorageSlot(key common.Hash) common.Hash { - return store.mapAddress(key) +func (s *Storage) GetStorageSlot(key common.Hash) common.Hash { + return s.mapAddress(key) } -func (store *Storage) GetUint64(key common.Hash) (uint64, error) { - value, err := store.Get(key) +func (s *Storage) GetUint64(key common.Hash) (uint64, error) { + value, err := s.Get(key) return value.Big().Uint64(), err } -func (store *Storage) GetByUint64(key uint64) (common.Hash, error) { - return store.Get(util.UintToHash(key)) +func (s *Storage) GetByUint64(key uint64) (common.Hash, error) { + return s.Get(util.UintToHash(key)) } -func (store *Storage) GetUint64ByUint64(key uint64) (uint64, error) { - return store.GetUint64(util.UintToHash(key)) +func (s *Storage) GetUint64ByUint64(key uint64) (uint64, error) { + return s.GetUint64(util.UintToHash(key)) } -func (store *Storage) Set(key common.Hash, value common.Hash) error { - if store.burner.ReadOnly() { +func (s *Storage) Set(key common.Hash, value common.Hash) error { + if s.burner.ReadOnly() { log.Error("Read-only burner attempted to mutate state", "key", key, "value", value) return vm.ErrWriteProtection } - err := store.burner.Burn(writeCost(value)) + err := s.burner.Burn(writeCost(value)) if err != nil { return err } - if info := store.burner.TracingInfo(); info != nil { + if info := s.burner.TracingInfo(); info != nil { info.RecordStorageSet(key, value) } - store.db.SetState(store.account, store.mapAddress(key), value) + s.db.SetState(s.account, s.mapAddress(key), value) return nil } -func (store *Storage) SetByUint64(key uint64, value common.Hash) error { - return store.Set(util.UintToHash(key), value) +func (s *Storage) SetByUint64(key uint64, value common.Hash) error { + return s.Set(util.UintToHash(key), value) } -func (store *Storage) SetUint64ByUint64(key uint64, value uint64) error { - return store.Set(util.UintToHash(key), util.UintToHash(value)) +func (s *Storage) SetUint64ByUint64(key uint64, value uint64) error { + return s.Set(util.UintToHash(key), util.UintToHash(value)) } -func (store *Storage) Clear(key common.Hash) error { - return store.Set(key, common.Hash{}) +func (s *Storage) Clear(key common.Hash) error { + return s.Set(key, common.Hash{}) } -func (store *Storage) ClearByUint64(key uint64) error { - return store.Set(util.UintToHash(key), common.Hash{}) +func (s *Storage) ClearByUint64(key uint64) error { + return s.Set(util.UintToHash(key), common.Hash{}) } -func (store *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, error) { - oldValue, err := store.Get(key) +func (s *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, error) { + oldValue, err := s.Get(key) if err != nil { return common.Hash{}, err } - return oldValue, store.Set(key, newValue) + return oldValue, s.Set(key, newValue) } -func (store *Storage) OpenSubStorage(id []byte, cacheKeys bool) *Storage { - var hashCache *containers.SafeLruCache[string, []byte] - if cacheKeys { - hashCache = storageHashCache +func (s *Storage) OpenCachedSubStorage(id []byte) *Storage { + return &Storage{ + account: s.account, + db: s.db, + storageKey: s.cachedKeccak(s.storageKey, id), + burner: s.burner, + hashCache: storageHashCache, } +} +func (s *Storage) OpenSubStorage(id []byte) *Storage { return &Storage{ - store.account, - store.db, - store.cachedKeccak(store.storageKey, id), - store.burner, - hashCache, + account: s.account, + db: s.db, + storageKey: s.cachedKeccak(s.storageKey, id), + burner: s.burner, + hashCache: nil, } } -func (store *Storage) NoCacheCopy() *Storage { +// Returns shallow copy of Storage that won't use storage key hash cache. +// The storage space represented by the returned Storage is kept the same. +func (s *Storage) WithoutCache() *Storage { return &Storage{ - store.account, - store.db, - store.storageKey, - store.burner, - nil, + account: s.account, + db: s.db, + storageKey: s.storageKey, + burner: s.burner, + hashCache: nil, } } -func (store *Storage) SetBytes(b []byte) error { - err := store.ClearBytes() +func (s *Storage) SetBytes(b []byte) error { + err := s.ClearBytes() if err != nil { return err } - err = store.SetUint64ByUint64(0, uint64(len(b))) + err = s.SetUint64ByUint64(0, uint64(len(b))) if err != nil { return err } offset := uint64(1) for len(b) >= 32 { - err = store.SetByUint64(offset, common.BytesToHash(b[:32])) + err = s.SetByUint64(offset, common.BytesToHash(b[:32])) if err != nil { return err } b = b[32:] offset++ } - return store.SetByUint64(offset, common.BytesToHash(b)) + return s.SetByUint64(offset, common.BytesToHash(b)) } -func (store *Storage) GetBytes() ([]byte, error) { - bytesLeft, err := store.GetUint64ByUint64(0) +func (s *Storage) GetBytes() ([]byte, error) { + bytesLeft, err := s.GetUint64ByUint64(0) if err != nil { return nil, err } ret := []byte{} offset := uint64(1) for bytesLeft >= 32 { - next, err := store.GetByUint64(offset) + next, err := s.GetByUint64(offset) if err != nil { return nil, err } @@ -240,7 +247,7 @@ func (store *Storage) GetBytes() ([]byte, error) { bytesLeft -= 32 offset++ } - next, err := store.GetByUint64(offset) + next, err := s.GetByUint64(offset) if err != nil { return nil, err } @@ -248,18 +255,18 @@ func (store *Storage) GetBytes() ([]byte, error) { return ret, nil } -func (store *Storage) GetBytesSize() (uint64, error) { - return store.GetUint64ByUint64(0) +func (s *Storage) GetBytesSize() (uint64, error) { + return s.GetUint64ByUint64(0) } -func (store *Storage) ClearBytes() error { - bytesLeft, err := store.GetUint64ByUint64(0) +func (s *Storage) ClearBytes() error { + bytesLeft, err := s.GetUint64ByUint64(0) if err != nil { return err } offset := uint64(1) for bytesLeft > 0 { - err := store.ClearByUint64(offset) + err := s.ClearByUint64(offset) if err != nil { return err } @@ -270,48 +277,51 @@ func (store *Storage) ClearBytes() error { bytesLeft -= 32 } } - return store.ClearByUint64(0) + return s.ClearByUint64(0) } -func (store *Storage) Burner() burn.Burner { - return store.burner // not public because these should never be changed once set +func (s *Storage) Burner() burn.Burner { + return s.burner // not public because these should never be changed once set } -func (store *Storage) Keccak(data ...[]byte) ([]byte, error) { +func (s *Storage) Keccak(data ...[]byte) ([]byte, error) { byteCount := 0 for _, part := range data { byteCount += len(part) } cost := 30 + 6*arbmath.WordsForBytes(uint64(byteCount)) - if err := store.burner.Burn(cost); err != nil { + if err := s.burner.Burn(cost); err != nil { return nil, err } return crypto.Keccak256(data...), nil } +func (s *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { + bytes, err := s.Keccak(data...) + return common.BytesToHash(bytes), err +} + +// Returns crypto.Keccak256 result for the given data +// If available the result is taken from hash cache +// otherwise crypto.Keccak256 is executed and its result is added to the cache and returned +// note: the method doesn't burn gas, as it's only intended for generating storage subspace keys and mapping slot addresses // note: returned slice is not thread-safe -func (store *Storage) cachedKeccak(data ...[]byte) []byte { - if store.hashCache == nil { +func (s *Storage) cachedKeccak(data ...[]byte) []byte { + if s.hashCache == nil { return crypto.Keccak256(data...) } keyString := string(bytes.Join(data, []byte{})) - hash, wasCached := store.hashCache.Get(keyString) - if wasCached { + if hash, wasCached := s.hashCache.Get(keyString); wasCached { return hash } - hash = crypto.Keccak256(data...) - evicted := store.hashCache.Add(keyString, hash) + hash := crypto.Keccak256(data...) + evicted := s.hashCache.Add(keyString, hash) if evicted && cacheFullLogged.CompareAndSwap(false, true) { log.Warn("Hash cache full, we didn't expect that. Some non-static storage keys may fill up the cache.") } return hash } -func (store *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { - bytes, err := store.Keccak(data...) - return common.BytesToHash(bytes), err -} - type StorageSlot struct { account common.Address db vm.StateDB @@ -319,8 +329,8 @@ type StorageSlot struct { burner burn.Burner } -func (store *Storage) NewSlot(offset uint64) StorageSlot { - return StorageSlot{store.account, store.db, store.mapAddress(util.UintToHash(offset)), store.burner} +func (s *Storage) NewSlot(offset uint64) StorageSlot { + return StorageSlot{s.account, s.db, s.mapAddress(util.UintToHash(offset)), s.burner} } func (ss *StorageSlot) Get() (common.Hash, error) { @@ -359,8 +369,8 @@ type StorageBackedInt64 struct { StorageSlot } -func (store *Storage) OpenStorageBackedInt64(offset uint64) StorageBackedInt64 { - return StorageBackedInt64{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedInt64(offset uint64) StorageBackedInt64 { + return StorageBackedInt64{s.NewSlot(offset)} } func (sbu *StorageBackedInt64) Get() (int64, error) { @@ -380,8 +390,8 @@ type StorageBackedBips struct { backing StorageBackedInt64 } -func (store *Storage) OpenStorageBackedBips(offset uint64) StorageBackedBips { - return StorageBackedBips{StorageBackedInt64{store.NewSlot(offset)}} +func (s *Storage) OpenStorageBackedBips(offset uint64) StorageBackedBips { + return StorageBackedBips{StorageBackedInt64{s.NewSlot(offset)}} } func (sbu *StorageBackedBips) Get() (arbmath.Bips, error) { @@ -397,8 +407,8 @@ type StorageBackedUint64 struct { StorageSlot } -func (store *Storage) OpenStorageBackedUint64(offset uint64) StorageBackedUint64 { - return StorageBackedUint64{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedUint64(offset uint64) StorageBackedUint64 { + return StorageBackedUint64{s.NewSlot(offset)} } func (sbu *StorageBackedUint64) Get() (uint64, error) { @@ -485,8 +495,8 @@ type StorageBackedBigUint struct { StorageSlot } -func (store *Storage) OpenStorageBackedBigUint(offset uint64) StorageBackedBigUint { - return StorageBackedBigUint{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedBigUint(offset uint64) StorageBackedBigUint { + return StorageBackedBigUint{s.NewSlot(offset)} } func (sbbu *StorageBackedBigUint) Get() (*big.Int, error) { @@ -524,8 +534,8 @@ type StorageBackedBigInt struct { StorageSlot } -func (store *Storage) OpenStorageBackedBigInt(offset uint64) StorageBackedBigInt { - return StorageBackedBigInt{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedBigInt(offset uint64) StorageBackedBigInt { + return StorageBackedBigInt{s.NewSlot(offset)} } func (sbbi *StorageBackedBigInt) Get() (*big.Int, error) { @@ -581,8 +591,8 @@ type StorageBackedAddress struct { StorageSlot } -func (store *Storage) OpenStorageBackedAddress(offset uint64) StorageBackedAddress { - return StorageBackedAddress{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedAddress(offset uint64) StorageBackedAddress { + return StorageBackedAddress{s.NewSlot(offset)} } func (sba *StorageBackedAddress) Get() (common.Address, error) { @@ -604,8 +614,8 @@ func init() { NilAddressRepresentation = common.BigToHash(new(big.Int).Lsh(big.NewInt(1), 255)) } -func (store *Storage) OpenStorageBackedAddressOrNil(offset uint64) StorageBackedAddressOrNil { - return StorageBackedAddressOrNil{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedAddressOrNil(offset uint64) StorageBackedAddressOrNil { + return StorageBackedAddressOrNil{s.NewSlot(offset)} } func (sba *StorageBackedAddressOrNil) Get() (*common.Address, error) { @@ -629,9 +639,9 @@ type StorageBackedBytes struct { Storage } -func (store *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { +func (s *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { return StorageBackedBytes{ - *store.OpenSubStorage(id, false), + *s.OpenSubStorage(id), } } From eeb327c3b7d3e81ff057f58e0fb366739610dee7 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 12 Oct 2023 23:45:59 +0200 Subject: [PATCH 06/15] replace SafeLruCache with lru.Cache from geth --- arbos/storage/storage.go | 6 +-- util/containers/safe_lru.go | 82 ------------------------------------- 2 files changed, 3 insertions(+), 85 deletions(-) delete mode 100644 util/containers/safe_lru.go diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 47ca66445d..63987b91f8 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -10,6 +10,7 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" @@ -19,7 +20,6 @@ import ( "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/util/arbmath" - "github.com/offchainlabs/nitro/util/containers" ) // Storage allows ArbOS to store data persistently in the Ethereum-compatible stateDB. This is represented in @@ -46,7 +46,7 @@ type Storage struct { db vm.StateDB storageKey []byte burner burn.Burner - hashCache *containers.SafeLruCache[string, []byte] + hashCache *lru.Cache[string, []byte] } const StorageReadCost = params.SloadGasEIP2200 @@ -55,7 +55,7 @@ const StorageWriteZeroCost = params.SstoreResetGasEIP2200 const storageKeyCacheSize = 1024 -var storageHashCache = containers.NewSafeLruCache[string, []byte](storageKeyCacheSize) +var storageHashCache = lru.NewCache[string, []byte](storageKeyCacheSize) var cacheFullLogged atomic.Bool // NewGeth uses a Geth database to create an evm key-value store diff --git a/util/containers/safe_lru.go b/util/containers/safe_lru.go deleted file mode 100644 index 40e7d993ec..0000000000 --- a/util/containers/safe_lru.go +++ /dev/null @@ -1,82 +0,0 @@ -package containers - -import ( - "sync" -) - -// thread safe version of containers.LruCache -type SafeLruCache[K comparable, V any] struct { - inner *LruCache[K, V] - mutex sync.RWMutex -} - -func NewSafeLruCache[K comparable, V any](size int) *SafeLruCache[K, V] { - return NewSafeLruCacheWithOnEvict[K, V](size, nil) -} - -func NewSafeLruCacheWithOnEvict[K comparable, V any](size int, onEvict func(K, V)) *SafeLruCache[K, V] { - return &SafeLruCache[K, V]{ - inner: NewLruCacheWithOnEvict(size, onEvict), - } -} - -// Returns true if an item was evicted -func (c *SafeLruCache[K, V]) Add(key K, value V) bool { - c.mutex.Lock() - defer c.mutex.Unlock() - return c.inner.Add(key, value) -} - -func (c *SafeLruCache[K, V]) Get(key K) (V, bool) { - c.mutex.Lock() - defer c.mutex.Unlock() - return c.inner.Get(key) -} - -func (c *SafeLruCache[K, V]) Contains(key K) bool { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.Contains(key) -} - -func (c *SafeLruCache[K, V]) Remove(key K) { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.Remove(key) -} - -func (c *SafeLruCache[K, V]) GetOldest() (K, V, bool) { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.GetOldest() -} - -func (c *SafeLruCache[K, V]) RemoveOldest() { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.RemoveOldest() -} - -func (c *SafeLruCache[K, V]) Len() int { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.Len() -} - -func (c *SafeLruCache[K, V]) Size() int { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.Size() -} - -func (c *SafeLruCache[K, V]) Clear() { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.Clear() -} - -func (c *SafeLruCache[K, V]) Resize(newSize int) { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.Resize(newSize) -} From 6f1c79fca98a5a2eec1c9302f4bb751c2599064f Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 13 Oct 2023 00:20:15 +0200 Subject: [PATCH 07/15] add todos for renaming packages --- arbos/addressSet/addressSet.go | 2 ++ arbos/addressTable/addressTable.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 50a43c36cf..6d2db26941 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -3,6 +3,8 @@ package addressSet +// TODO rename this package to lowercase + import ( "errors" diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index f44e7f3dcf..bfc526d39d 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -3,6 +3,8 @@ package addressTable +// TODO rename this package to lowercase + import ( "bytes" "errors" From 9ec92bc55121f2c54571268b567a9249d5680060 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 13 Oct 2023 00:22:01 +0200 Subject: [PATCH 08/15] rephrase todos --- arbos/addressSet/addressSet.go | 2 +- arbos/addressTable/addressTable.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 6d2db26941..1f09ff1440 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -3,7 +3,7 @@ package addressSet -// TODO rename this package to lowercase +// TODO lowercase this package name import ( "errors" diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index bfc526d39d..3fbb7b3782 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -3,7 +3,7 @@ package addressTable -// TODO rename this package to lowercase +// TODO lowercase this package name import ( "bytes" From 41b9517ec40c1a72e130dbe53940952910a14283 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 13 Oct 2023 18:21:29 +0200 Subject: [PATCH 09/15] add comment to skipped error checks --- arbos/retryables/retryable.go | 1 + 1 file changed, 1 insertion(+) diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index 1121de01f4..6984e41904 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -150,6 +150,7 @@ func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario return false, err } + // we ignore returned error as we expect that if one ClearByUint64 fails, than all consecutive calls to ClearByUint64 will fail with the same error (not modifying state), and then ClearBytes will also fail with the same error (also not modifying state) - and this one we check and return _ = retStorage.ClearByUint64(numTriesOffset) _ = retStorage.ClearByUint64(fromOffset) _ = retStorage.ClearByUint64(toOffset) From 0bb425a4de6139d17c55ba0824770f3ae350ccc3 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Mon, 16 Oct 2023 17:20:37 +0200 Subject: [PATCH 10/15] Add client certificate, require to verify client cert --- arbnode/dataposter/data_poster.go | 33 ++++++++++++---- arbnode/dataposter/dataposter_test.go | 31 ++++++++++++--- arbnode/dataposter/testdata/client.cnf | 52 ++++++++++++++++++++++++++ arbnode/dataposter/testdata/client.crt | 28 ++++++++++++++ arbnode/dataposter/testdata/client.key | 28 ++++++++++++++ 5 files changed, 159 insertions(+), 13 deletions(-) create mode 100644 arbnode/dataposter/testdata/client.cnf create mode 100644 arbnode/dataposter/testdata/client.crt create mode 100644 arbnode/dataposter/testdata/client.key diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 56e87e3b29..02a3548660 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -175,21 +175,32 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro } func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error) { - rootCrt, err := os.ReadFile(opts.RootCA) + clientCert, err := tls.LoadX509KeyPair(opts.ClientCert, opts.ClientPrivateKey) if err != nil { - return nil, fmt.Errorf("error reading external signer root CA: %w", err) + return nil, fmt.Errorf("error loading client certificate and private key: %w", err) } - pool := x509.NewCertPool() - pool.AppendCertsFromPEM(rootCrt) + + tlsCfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{clientCert}, + } + if opts.RootCA != "" { + rootCrt, err := os.ReadFile(opts.RootCA) + if err != nil { + return nil, fmt.Errorf("error reading external signer root CA: %w", err) + } + rootCertPool := x509.NewCertPool() + rootCertPool.AppendCertsFromPEM(rootCrt) + tlsCfg.RootCAs = rootCertPool + } + return rpc.DialOptions( ctx, opts.URL, rpc.WithHTTPClient( &http.Client{ Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: pool, - }, + TLSClientConfig: tlsCfg, }, }, ), @@ -742,9 +753,13 @@ type ExternalSignerCfg struct { Address string `koanf:"address"` // API method name (e.g. eth_signTransaction). Method string `koanf:"method"` - // Path to the external signer root CA certificate. + // (Optional) Path to the external signer root CA certificate. // This allows us to use self-signed certificats on the external signer. RootCA string `koanf:"root-ca"` + // Client certificate for mtls. + ClientCert string `koanf:"client-cert"` + // Client certificate key for mtls. + ClientPrivateKey string `koanf:"client-private-key"` } type DangerousConfig struct { @@ -787,6 +802,8 @@ func addExternalSignerOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".address", DefaultDataPosterConfig.ExternalSigner.Address, "external signer address") f.String(prefix+".method", DefaultDataPosterConfig.ExternalSigner.Method, "external signer method") f.String(prefix+".root-ca", DefaultDataPosterConfig.ExternalSigner.RootCA, "external signer root CA") + f.String(prefix+".client-cert", DefaultDataPosterConfig.ExternalSigner.ClientCert, "rpc client cert") + f.String(prefix+".client-private-key", DefaultDataPosterConfig.ExternalSigner.ClientPrivateKey, "rpc client private key") } var DefaultDataPosterConfig = DataPosterConfig{ diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index 0b3b8cba5c..d21390bccd 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -2,6 +2,8 @@ package dataposter import ( "context" + "crypto/tls" + "crypto/x509" "encoding/json" "fmt" "io" @@ -74,10 +76,12 @@ func TestExternalSigner(t *testing.T) { }() signer, addr, err := externalSigner(ctx, &ExternalSignerCfg{ - Address: srv.address.Hex(), - URL: "https://localhost:1234", - Method: "test_signTransaction", - RootCA: cert, + Address: srv.address.Hex(), + URL: "https://localhost:1234", + Method: "test_signTransaction", + RootCA: cert, + ClientCert: "./testdata/client.crt", + ClientPrivateKey: "./testdata/client.key", }) if err != nil { t.Fatalf("Error getting external signer: %v", err) @@ -129,7 +133,24 @@ func newServer(ctx context.Context, t *testing.T) (*http.Server, *server) { "test_signTransaction": s.signTransaction, } m := http.NewServeMux() - httpSrv := &http.Server{Addr: ":1234", Handler: m, ReadTimeout: 5 * time.Second} + + clientCert, err := os.ReadFile("./testdata/client.crt") + if err != nil { + panic(err) + } + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(clientCert) + + httpSrv := &http.Server{ + Addr: ":1234", + Handler: m, + ReadTimeout: 5 * time.Second, + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: pool, + }, + } m.HandleFunc("/", s.mux) return httpSrv, s } diff --git a/arbnode/dataposter/testdata/client.cnf b/arbnode/dataposter/testdata/client.cnf new file mode 100644 index 0000000000..8c15cc3dbc --- /dev/null +++ b/arbnode/dataposter/testdata/client.cnf @@ -0,0 +1,52 @@ +[req] +default_bits = 2048 +default_keyfile = server-key.pem +distinguished_name = subject +req_extensions = req_ext +x509_extensions = x509_ext +string_mask = utf8only + +[subject] +countryName = CH +countryName_default = CH + +stateOrProvinceName = Zurich +stateOrProvinceName_default = ZH + +localityName = city +localityName_default = Zurich + +organizationName = Offchain Labs +organizationName_default = Offchain Labs + +commonName = offchainlabs.ch +commonName_default = localhost + +emailAddress = Email Address +emailAddress_default = notabigdeal@offchainlabs.ch + +[x509_ext] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer + +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment +subjectAltName = @alternate_names +nsComment = "OpenSSL Generated Certificate" + +[req_ext] +subjectKeyIdentifier = hash + +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment +subjectAltName = @alternate_names +nsComment = "OpenSSL Generated Certificate" + +[alternate_names] +DNS.1 = localhost +DNS.2 = 127.0.0.1 + +[alternate_names] +DNS.1 = localhost +DNS.2 = 127.0.0.1 + diff --git a/arbnode/dataposter/testdata/client.crt b/arbnode/dataposter/testdata/client.crt new file mode 100644 index 0000000000..3d494be820 --- /dev/null +++ b/arbnode/dataposter/testdata/client.crt @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIE0jCCA7qgAwIBAgIUPaBB3/hHMpZfGB3VOw1+mHG4LnUwDQYJKoZIhvcNAQEL +BQAwgYMxCzAJBgNVBAYTAkNIMQswCQYDVQQIDAJaSDEPMA0GA1UEBwwGWnVyaWNo +MRYwFAYDVQQKDA1PZmZjaGFpbiBMYWJzMRIwEAYDVQQDDAlsb2NhbGhvc3QxKjAo +BgkqhkiG9w0BCQEWG25vdGFiaWdkZWFsQG9mZmNoYWlubGFicy5jaDAeFw0yMzEw +MTYxNDU2MjhaFw0yNDEwMTUxNDU2MjhaMIGDMQswCQYDVQQGEwJDSDELMAkGA1UE +CAwCWkgxDzANBgNVBAcMBlp1cmljaDEWMBQGA1UECgwNT2ZmY2hhaW4gTGFiczES +MBAGA1UEAwwJbG9jYWxob3N0MSowKAYJKoZIhvcNAQkBFhtub3RhYmlnZGVhbEBv +ZmZjaGFpbmxhYnMuY2gwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1 +1asfUzv07QTVwlM4o3g51ilIFEApPkpdQej/GIItLEVRQW+GI9jYuEM07wdwMhSH +JPFNbZB3dmBuqDLx13hY03ufyeY+nab0/sO6x13kXChvIqgPRyJtkEAoYkMM3W0D +S6HeL/6DFoTQ2xAlZb/7i/9deuUwDL3MNVSjPCm9PjFzSOFgAQQud2uUT7aENGuG +Whw3oXz9gU/8gv3keLzcIa2PHyEW5M7jeGSYMjfW3wr0d+Z5mSNRc/U6kncKi06c +QrMKrgFfF7a5kHgxUL7bRCGgCMemXe7VfrW6oKT11JcLWDKhe+uo6bNXUptek55H +HfQi6x8cbM46/h3riZA3AgMBAAGjggE6MIIBNjAdBgNVHQ4EFgQUQD2BOems0+JQ +br234cW5noMmXRIwga0GA1UdIwSBpTCBoqGBiaSBhjCBgzELMAkGA1UEBhMCQ0gx +CzAJBgNVBAgMAlpIMQ8wDQYDVQQHDAZadXJpY2gxFjAUBgNVBAoMDU9mZmNoYWlu +IExhYnMxEjAQBgNVBAMMCWxvY2FsaG9zdDEqMCgGCSqGSIb3DQEJARYbbm90YWJp +Z2RlYWxAb2ZmY2hhaW5sYWJzLmNoghQ9oEHf+Ecyll8YHdU7DX6YcbgudTAJBgNV +HRMEAjAAMAsGA1UdDwQEAwIFoDAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4w +LjAuMTAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNh +dGUwDQYJKoZIhvcNAQELBQADggEBAF4EVkOZZeMIvv0JViP7NsmIl2ke/935x6Hd +hQiLUw13XHYXzMa5/8Y5fnKjttBODpFoQlwjgI18vzuYzItYMBc2cabQJcpfG+Wq +M3m/wl1TC2XOuHj1E4RA/nU3tslntahtXG+vkks9RN+f9irHUhDRR6AGSnSB2Gi/ +B2OGmXn7S4Qge8+fGHAjN+tlu+tOoEWP6R3if/a9UIe5EGM8QTe4zw6lr+iPrOhC +M94pK5IEWn5IIGhr3zJIYkm/Dp+rFqhV1sqPOjjFLVCA7KJ3jVVVHlcm4Xa/+fyk +CIm7/VAmnbeUNlMbkXNOfQMeku8Iwsu80pvf3kjhU/PgO/5oojk= +-----END CERTIFICATE----- diff --git a/arbnode/dataposter/testdata/client.key b/arbnode/dataposter/testdata/client.key new file mode 100644 index 0000000000..b14941dd9f --- /dev/null +++ b/arbnode/dataposter/testdata/client.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC11asfUzv07QTV +wlM4o3g51ilIFEApPkpdQej/GIItLEVRQW+GI9jYuEM07wdwMhSHJPFNbZB3dmBu +qDLx13hY03ufyeY+nab0/sO6x13kXChvIqgPRyJtkEAoYkMM3W0DS6HeL/6DFoTQ +2xAlZb/7i/9deuUwDL3MNVSjPCm9PjFzSOFgAQQud2uUT7aENGuGWhw3oXz9gU/8 +gv3keLzcIa2PHyEW5M7jeGSYMjfW3wr0d+Z5mSNRc/U6kncKi06cQrMKrgFfF7a5 +kHgxUL7bRCGgCMemXe7VfrW6oKT11JcLWDKhe+uo6bNXUptek55HHfQi6x8cbM46 +/h3riZA3AgMBAAECggEADUboCYMCpm+LqIhzNCtqswQD6QsiSwCmqs8nuKZGk9ue ++hmZj5IpgMJZLrgvWY4s+PGfgiRR/28QCBrVXkETiZ5zirQFN4tvLlKcSK4xZf29 +FBRUCiPxck36NhiqrBNOi1Mn8BKedl4cESkvSu1cvcmeOh100HPcHfLDVqHx3qsl +D/5yMkT2+zdhtLa+X3nkAa+3aibOvgtyfkV679e20CG6h89N9GBKkTXO8ioLZZVm +84ksnd4FcpTo7ebJJxElEB+ZA4akPHbF6ArUmcpqtGso5GtwqqO2ZlguSn2XQT0d +jqvOG4DwfSXk6SpE/dpWvU92fmxWAxZvGrZNgDyJ2QKBgQDyQ8NN4b80Yza/YXar +LWx8A6B0eMc1dXgt9m3UUI+titt45jEcaXhCX01FRFTznWGmWFtJmcWBoaQVPVel +IcDYQSxEuBUrCeI75ocv/IQtENaiX3TK7Nlz5RHfpQpfDVJq45lpiD38CGkYkAif +9pSzC8aup4W3WR0JJZ1AOHUZaQKBgQDAJNJnaSNzB+eDWTKCIN5V9X3QMkmjsuir +Nf2lBXHYARnlYWAbtYFG12wLJQMTNX5ewVQQrWtsdPkGPpCnPLelUTxMssrsXjej +JlLzYUfzRBqEXMI3AA9bVdiauxId2RTcp2F81SM1keCMcuHYxrzVkBSOC9u3wCnb +Whb6+feInwKBgQCbzgC5AcoaQwReqKvNAvWV/C8hONvFAbs8tBOGTBlbHsZvRnun +Lh1tciUbuwp3cmvuszxiZUakS/RexIitZrvDWIbD2y+h8kVRCL1Am0HWSdH/syxF +pXVkF5obHuVApCyxGZb8S+axRCdy6I7jcY3IaHZqtMpGVEVcMJilSKnmoQKBgQCC +tEmgaMfhhx34nqOaG4vDA4T7LEolnh1h4g9RwztnCZC5FZ1QHA79xqrLhfjqhzgY +cwChe6aYl5WSptq1uLrgLTuMnQ8m7QyB4h8JSkKse8ZiBctjqJnJssLutpSjUzk6 +xG2vgjk6RqpuP/PcB40K5cDlw7FJ9OFEQqthPMsi1wKBgQC0/vv5bY3DQ+wV6gUy +nFoSa/XNHaa8y7jmmlCnWJqs6DAAQQ3VW0tPX03GYL/NDcI+PwzYDHDkSB6Qa/o8 +VzVGK1/kr/+bveNvqmi0vNb54fMFLveGgsY4Cu1cffiw8m6nYJ/V4eCsHfpF1B5L +5HDnt5rFKt1Mi9WsUSRtxipxBA== +-----END PRIVATE KEY----- From 754d9be19aacb8cca479701099930e45b8e1d8e5 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Mon, 16 Oct 2023 17:58:32 +0200 Subject: [PATCH 11/15] add simple storage key caching tests --- arbos/storage/storage_test.go | 75 +++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/arbos/storage/storage_test.go b/arbos/storage/storage_test.go index 35e6b7c4be..a8d424d14e 100644 --- a/arbos/storage/storage_test.go +++ b/arbos/storage/storage_test.go @@ -1,11 +1,16 @@ package storage import ( + "bytes" + "fmt" "math/big" + "math/rand" + "sync" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -91,3 +96,73 @@ func TestStorageBackedBigInt(t *testing.T) { }) } } + +func TestOpenCachedSubStorage(t *testing.T) { + s := NewMemoryBacked(burn.NewSystemBurner(nil, false)) + var subSpaceIDs [][]byte + for i := 0; i < 20; i++ { + subSpaceIDs = append(subSpaceIDs, []byte{byte(rand.Intn(0xff))}) + } + var expectedKeys [][]byte + for _, subSpaceID := range subSpaceIDs { + expectedKeys = append(expectedKeys, crypto.Keccak256(s.storageKey, subSpaceID)) + } + n := len(subSpaceIDs) * 50 + start := make(chan struct{}) + errs := make(chan error, n) + var wg sync.WaitGroup + for i := 0; i < n; i++ { + j := i % len(subSpaceIDs) + subSpaceID, expectedKey := subSpaceIDs[j], expectedKeys[j] + wg.Add(1) + go func() { + defer wg.Done() + <-start + ss := s.OpenCachedSubStorage(subSpaceID) + if !bytes.Equal(ss.storageKey, expectedKey) { + errs <- fmt.Errorf("unexpected storage key, want: %v, have: %v", expectedKey, ss.storageKey) + } + }() + } + close(start) + wg.Wait() + select { + case err := <-errs: + t.Fatal(err) + default: + } +} + +func TestMapAddressCache(t *testing.T) { + s := NewMemoryBacked(burn.NewSystemBurner(nil, false)) + var keys []common.Hash + for i := 0; i < 20; i++ { + keys = append(keys, common.BytesToHash([]byte{byte(rand.Intn(0xff))})) + } + var expectedMapped []common.Hash + for _, key := range keys { + expectedMapped = append(expectedMapped, s.mapAddress(key)) + } + n := len(keys) * 50 + start := make(chan struct{}) + errs := make(chan error, n) + var wg sync.WaitGroup + for i := 0; i < n; i++ { + j := i % len(keys) + key, expected := keys[j], expectedMapped[j] + wg.Add(1) + go func() { + defer wg.Done() + <-start + mapped := s.mapAddress(key) + if !bytes.Equal(mapped.Bytes(), expected.Bytes()) { + errs <- fmt.Errorf("unexpected storage key, want: %v, have: %v", expected, mapped) + } + }() + } + close(start) + wg.Wait() + if len(errs) > 0 { + t.Fatal(<-errs) + } +} From 059a45e804657901d62a3ab908d1886a76ad0f22 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 19 Oct 2023 10:40:51 +0200 Subject: [PATCH 12/15] revert testnode pin --- nitro-testnode | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nitro-testnode b/nitro-testnode index 7ad12c0f1b..aee6ceff9c 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 7ad12c0f1be75a72c7360d5258e0090f8225594e +Subproject commit aee6ceff9c9d3fb2749da55a7d7842f23d1bfc8e From b015ab8f06f8fc72c827af4388428949cec8a5c6 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 19 Oct 2023 10:46:31 +0200 Subject: [PATCH 13/15] Make client certificate optional --- arbnode/dataposter/data_poster.go | 19 +++++++++++-------- arbnode/dataposter/dataposter_test.go | 2 +- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 39fda3f2ac..687b26ba26 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -175,15 +175,18 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro } func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error) { - clientCert, err := tls.LoadX509KeyPair(opts.ClientCert, opts.ClientPrivateKey) - if err != nil { - return nil, fmt.Errorf("error loading client certificate and private key: %w", err) + tlsCfg := &tls.Config{ + MinVersion: tls.VersionTLS12, } - tlsCfg := &tls.Config{ - MinVersion: tls.VersionTLS12, - Certificates: []tls.Certificate{clientCert}, + if opts.ClientCert == "" || opts.ClientPrivateKey == "" { + clientCert, err := tls.LoadX509KeyPair(opts.ClientCert, opts.ClientPrivateKey) + if err != nil { + return nil, fmt.Errorf("error loading client certificate and private key: %w", err) + } + tlsCfg.Certificates = []tls.Certificate{clientCert} } + if opts.RootCA != "" { rootCrt, err := os.ReadFile(opts.RootCA) if err != nil { @@ -756,9 +759,9 @@ type ExternalSignerCfg struct { // (Optional) Path to the external signer root CA certificate. // This allows us to use self-signed certificats on the external signer. RootCA string `koanf:"root-ca"` - // Client certificate for mtls. + // (Optional) Client certificate for mtls. ClientCert string `koanf:"client-cert"` - // Client certificate key for mtls. + // (Optional) Client certificate key for mtls. ClientPrivateKey string `koanf:"client-private-key"` } diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index 4734295ae8..d4d72bbbf4 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -136,7 +136,7 @@ func newServer(ctx context.Context, t *testing.T) (*http.Server, *server) { clientCert, err := os.ReadFile("./testdata/client.crt") if err != nil { - panic(err) + t.Fatalf("Error reading client certificate: %v", err) } pool := x509.NewCertPool() pool.AppendCertsFromPEM(clientCert) From 1d524078dc2c41fed4f2bc3efd401e18f911047f Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 19 Oct 2023 10:55:46 +0200 Subject: [PATCH 14/15] Log when certificate is enabled, fix enabling --- arbnode/dataposter/data_poster.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 687b26ba26..1a202171ec 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -179,7 +179,8 @@ func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error MinVersion: tls.VersionTLS12, } - if opts.ClientCert == "" || opts.ClientPrivateKey == "" { + if opts.ClientCert != "" && opts.ClientPrivateKey != "" { + log.Info("Client certificate for external signer is enabled") clientCert, err := tls.LoadX509KeyPair(opts.ClientCert, opts.ClientPrivateKey) if err != nil { return nil, fmt.Errorf("error loading client certificate and private key: %w", err) @@ -762,6 +763,7 @@ type ExternalSignerCfg struct { // (Optional) Client certificate for mtls. ClientCert string `koanf:"client-cert"` // (Optional) Client certificate key for mtls. + // This is required when client-cert is set. ClientPrivateKey string `koanf:"client-private-key"` } From 058db5408a5cfda478332d5b8eb4061cb1279a8b Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Mon, 23 Oct 2023 13:23:31 +0200 Subject: [PATCH 15/15] Suppress golangci-lint error --- cmd/genericconf/pprof.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/genericconf/pprof.go b/cmd/genericconf/pprof.go index e55bfddd32..9fd3a6f2a4 100644 --- a/cmd/genericconf/pprof.go +++ b/cmd/genericconf/pprof.go @@ -17,8 +17,7 @@ func StartPprof(address string) { log.Info("Starting metrics server with pprof", "addr", fmt.Sprintf("http://%s/debug/metrics", address)) log.Info("Pprof endpoint", "addr", fmt.Sprintf("http://%s/debug/pprof", address)) go func() { - // #nosec G114 - if err := http.ListenAndServe(address, http.DefaultServeMux); err != nil { + if err := http.ListenAndServe(address, http.DefaultServeMux); /* #nosec G114 */ err != nil { log.Error("Failure in running pprof server", "err", err) } }()