From 0ef1c8399021dcf5ac6f44274f32a0ffd1f86123 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Fri, 9 Jun 2023 19:07:26 -0700 Subject: [PATCH 001/117] Use go-ethereum v1.20.0 Changes: - MakeSigner now takes block time. - IsShanghai now takes block number. - VerifyHeader removed seal param, we don't use it anyway. - GetEVM needs block context now, so we pass it into the hook. - StartMining no longer takes threads param. --- arbnode/execution/sequencer.go | 2 +- arbnode/execution/tx_pre_checker.go | 4 ++-- arbos/block_processor.go | 2 +- arbos/engine.go | 6 +++--- go-ethereum | 2 +- go.mod | 3 +-- go.sum | 5 ++--- nodeInterface/virtual-contracts.go | 6 ++---- system_tests/common_test.go | 2 +- 9 files changed, 14 insertions(+), 18 deletions(-) diff --git a/arbnode/execution/sequencer.go b/arbnode/execution/sequencer.go index 3bc3c90cb1..1fe6ce83a1 100644 --- a/arbnode/execution/sequencer.go +++ b/arbnode/execution/sequencer.go @@ -639,7 +639,7 @@ func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { return queueItems } nextHeaderNumber := arbmath.BigAdd(latestHeader.Number, common.Big1) - signer := types.MakeSigner(bc.Config(), nextHeaderNumber) + signer := types.MakeSigner(bc.Config(), nextHeaderNumber, latestHeader.Time) outputQueueItems := make([]txQueueItem, 0, len(queueItems)) var nextQueueItem *txQueueItem var queueItemsIdx int diff --git a/arbnode/execution/tx_pre_checker.go b/arbnode/execution/tx_pre_checker.go index 01cef6d7a4..c5f44ccd3c 100644 --- a/arbnode/execution/tx_pre_checker.go +++ b/arbnode/execution/tx_pre_checker.go @@ -115,7 +115,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty if tx.Gas() < params.TxGas { return core.ErrIntrinsicGas } - sender, err := types.Sender(types.MakeSigner(chainConfig, header.Number), tx) + sender, err := types.Sender(types.MakeSigner(chainConfig, header.Number, header.Time), tx) if err != nil { return err } @@ -134,7 +134,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty return MakeNonceError(sender, tx.Nonce(), stateNonce) } extraInfo := types.DeserializeHeaderExtraInformation(header) - intrinsic, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, chainConfig.IsHomestead(header.Number), chainConfig.IsIstanbul(header.Number), chainConfig.IsShanghai(header.Time, extraInfo.ArbOSFormatVersion)) + intrinsic, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, chainConfig.IsHomestead(header.Number), chainConfig.IsIstanbul(header.Number), chainConfig.IsShanghai(header.Number, header.Time, extraInfo.ArbOSFormatVersion)) if err != nil { return err } diff --git a/arbos/block_processor.go b/arbos/block_processor.go index c5270e49ef..bed85b930a 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -189,7 +189,7 @@ func ProduceBlockAdvanced( } header := createNewHeader(lastBlockHeader, l1Info, state, chainConfig) - signer := types.MakeSigner(chainConfig, header.Number) + signer := types.MakeSigner(chainConfig, header.Number, header.Time) // Note: blockGasLeft will diverge from the actual gas left during execution in the event of invalid txs, // but it's only used as block-local representation limiting the amount of work done in a block. blockGasLeft, _ := state.L2PricingState().PerBlockGasLimit() diff --git a/arbos/engine.go b/arbos/engine.go index ebc27c0886..0014e8ab96 100644 --- a/arbos/engine.go +++ b/arbos/engine.go @@ -23,15 +23,15 @@ func (e Engine) Author(header *types.Header) (common.Address, error) { return header.Coinbase, nil } -func (e Engine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { +func (e Engine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { // TODO what verification should be done here? return nil } -func (e Engine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { +func (e Engine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) { errors := make(chan error, len(headers)) for i := range headers { - errors <- e.VerifyHeader(chain, headers[i], seals[i]) + errors <- e.VerifyHeader(chain, headers[i]) } return make(chan struct{}), errors } diff --git a/go-ethereum b/go-ethereum index 63816ba74a..c8150bfc1b 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 63816ba74a6945f9c4b4ebb7be8461367019ca80 +Subproject commit c8150bfc1b7dfa9c2d65914dceaef2517db4c8a8 diff --git a/go.mod b/go.mod index ff7332c3d6..9341eb2e2c 100644 --- a/go.mod +++ b/go.mod @@ -280,7 +280,6 @@ require ( github.com/VictoriaMetrics/fastcache v1.6.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect github.com/go-ole/go-ole v1.2.1 // indirect @@ -311,7 +310,7 @@ require ( golang.org/x/crypto v0.6.0 golang.org/x/net v0.8.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.6.0 + golang.org/x/sys v0.7.0 golang.org/x/text v0.8.0 // indirect golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect diff --git a/go.sum b/go.sum index ea4ee68a5d..2926b21998 100644 --- a/go.sum +++ b/go.sum @@ -302,7 +302,6 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= @@ -1950,8 +1949,8 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= diff --git a/nodeInterface/virtual-contracts.go b/nodeInterface/virtual-contracts.go index 29ca3f2b82..ee81c1c3e6 100644 --- a/nodeInterface/virtual-contracts.go +++ b/nodeInterface/virtual-contracts.go @@ -53,6 +53,7 @@ func init() { statedb *state.StateDB, header *types.Header, backend core.NodeInterfaceBackendAPI, + blockCtx *vm.BlockContext, ) (*core.Message, *ExecutionResult, error) { to := msg.To arbosVersion := arbosState.ArbOSVersion(statedb) // check ArbOS has been installed @@ -87,10 +88,7 @@ func init() { return msg, nil, nil } - evm, vmError, err := backend.GetEVM(ctx, msg, statedb, header, &vm.Config{NoBaseFee: true}) - if err != nil { - return msg, nil, err - } + evm, vmError := backend.GetEVM(ctx, msg, statedb, header, &vm.Config{NoBaseFee: true}, blockCtx) go func() { <-ctx.Done() evm.Cancel() diff --git a/system_tests/common_test.go b/system_tests/common_test.go index e471899ff9..d6b4d9c5b6 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -421,7 +421,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no }}) Require(t, stack.Start()) - Require(t, l1backend.StartMining(1)) + Require(t, l1backend.StartMining()) rpcClient, err := stack.Attach() Require(t, err) From ddd2882169709bd6e168c6acc6f0dab5e95133fb Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Tue, 13 Jun 2023 11:04:21 -0700 Subject: [PATCH 002/117] Force leveldb instead of pebbledb --- cmd/nitro/nitro.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 0035171078..bb20fb2691 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -132,6 +132,7 @@ func mainImpl() int { } stackConf := node.DefaultConfig stackConf.DataDir = nodeConfig.Persistent.Chain + stackConf.DBEngine = "leveldb" nodeConfig.HTTP.Apply(&stackConf) nodeConfig.WS.Apply(&stackConf) nodeConfig.AuthRPC.Apply(&stackConf) From 5c1d6c718d83d6038aeb35df52bdf8013d735ea1 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Fri, 7 Jul 2023 16:54:06 -0700 Subject: [PATCH 003/117] Add cgroups v2 support to resourcemanager --- .../resourcemanager/resource_management.go | 84 +++++++++++-------- .../resource_management_test.go | 36 ++++---- 2 files changed, 69 insertions(+), 51 deletions(-) diff --git a/arbnode/resourcemanager/resource_management.go b/arbnode/resourcemanager/resource_management.go index acb5355987..b1bdc45d70 100644 --- a/arbnode/resourcemanager/resource_management.go +++ b/arbnode/resourcemanager/resource_management.go @@ -90,18 +90,28 @@ type limitChecker interface { String() string } +func isSupported(c limitChecker) bool { + _, err := c.isLimitExceeded() + return err == nil +} + // newLimitChecker attempts to auto-discover the mechanism by which it -// can check system limits. Currently Cgroups V1 is supported, -// with Cgroups V2 likely to be implmemented next. If no supported -// mechanism is discovered, it logs an error and fails open, ie -// it creates a trivialLimitChecker that does no checks. +// can check system limits. Currently Cgroups V1 and V2 are supported. +// If no supported mechanism is discovered, it logs an error and +// fails open, ie it creates a trivialLimitChecker that does no checks. func newLimitChecker(conf *Config) limitChecker { - c := newCgroupsV1MemoryLimitChecker(DefaultCgroupsV1MemoryDirectory, conf.MemoryLimitPercent) + c := newCgroupsMemoryLimitChecker(cgroupsV1MemoryFiles, conf.MemoryLimitPercent) if isSupported(c) { log.Info("Cgroups v1 detected, enabling memory limit RPC throttling") return c } + c = newCgroupsMemoryLimitChecker(cgroupsV2MemoryFiles, conf.MemoryLimitPercent) + if isSupported(c) { + log.Info("Cgroups v2 detected, enabling memory limit RPC throttling") + return c + } + log.Error("No method for determining memory usage and limits was discovered, disabled memory limit RPC throttling") return &trivialLimitChecker{} } @@ -115,28 +125,37 @@ func (_ trivialLimitChecker) isLimitExceeded() (bool, error) { func (_ trivialLimitChecker) String() string { return "trivial" } -const DefaultCgroupsV1MemoryDirectory = "/sys/fs/cgroup/memory/" +type cgroupsMemoryFiles struct { + limitFile, usageFile, statsFile string + inactiveRe *regexp.Regexp +} -type cgroupsV1MemoryLimitChecker struct { - cgroupDir string - memoryLimitPercent int +const defaultCgroupsV1MemoryDirectory = "/sys/fs/cgroup/memory/" +const defaultCgroupsV2MemoryDirectory = "/sys/fs/cgroup/" - limitFile, usageFile, statsFile string +var cgroupsV1MemoryFiles = cgroupsMemoryFiles{ + limitFile: defaultCgroupsV1MemoryDirectory + "/memory.limit_in_bytes", + usageFile: defaultCgroupsV1MemoryDirectory + "/memory.usage_in_bytes", + statsFile: defaultCgroupsV1MemoryDirectory + "/memory.stat", + inactiveRe: regexp.MustCompile(`total_inactive_file (\d+)`), +} +var cgroupsV2MemoryFiles = cgroupsMemoryFiles{ + limitFile: defaultCgroupsV2MemoryDirectory + "/memory.max", + usageFile: defaultCgroupsV2MemoryDirectory + "/memory.current", + statsFile: defaultCgroupsV2MemoryDirectory + "/memory.stat", + inactiveRe: regexp.MustCompile(`inactive_file (\d+)`), } -func newCgroupsV1MemoryLimitChecker(cgroupDir string, memoryLimitPercent int) *cgroupsV1MemoryLimitChecker { - return &cgroupsV1MemoryLimitChecker{ - cgroupDir: cgroupDir, - memoryLimitPercent: memoryLimitPercent, - limitFile: cgroupDir + "/memory.limit_in_bytes", - usageFile: cgroupDir + "/memory.usage_in_bytes", - statsFile: cgroupDir + "/memory.stat", - } +type cgroupsMemoryLimitChecker struct { + files cgroupsMemoryFiles + memoryLimitPercent int } -func isSupported(c limitChecker) bool { - _, err := c.isLimitExceeded() - return err == nil +func newCgroupsMemoryLimitChecker(files cgroupsMemoryFiles, memoryLimitPercent int) *cgroupsMemoryLimitChecker { + return &cgroupsMemoryLimitChecker{ + files: files, + memoryLimitPercent: memoryLimitPercent, + } } // isLimitExceeded checks if the system memory used exceeds the limit @@ -145,24 +164,25 @@ func isSupported(c limitChecker) bool { // See the following page for details of calculating the memory used, // which is reported as container_memory_working_set_bytes in prometheus: // https://mihai-albert.com/2022/02/13/out-of-memory-oom-in-kubernetes-part-3-memory-metrics-sources-and-tools-to-collect-them/ -func (c *cgroupsV1MemoryLimitChecker) isLimitExceeded() (bool, error) { +func (c *cgroupsMemoryLimitChecker) isLimitExceeded() (bool, error) { var limit, usage, inactive int var err error - limit, err = readIntFromFile(c.limitFile) - if err != nil { + if limit, err = readIntFromFile(c.files.limitFile); err != nil { return false, err } - usage, err = readIntFromFile(c.usageFile) - if err != nil { + if usage, err = readIntFromFile(c.files.usageFile); err != nil { return false, err } - inactive, err = readInactive(c.statsFile) - if err != nil { + if inactive, err = readInactive(c.files.statsFile, c.files.inactiveRe); err != nil { return false, err } return usage-inactive >= ((limit * c.memoryLimitPercent) / 100), nil } +func (c cgroupsMemoryLimitChecker) String() string { + return "CgroupsMemoryLimitChecker" +} + func readIntFromFile(fileName string) (int, error) { file, err := os.Open(fileName) if err != nil { @@ -176,9 +196,7 @@ func readIntFromFile(fileName string) (int, error) { return limit, nil } -var re = regexp.MustCompile(`total_inactive_file (\d+)`) - -func readInactive(fileName string) (int, error) { +func readInactive(fileName string, re *regexp.Regexp) (int, error) { file, err := os.Open(fileName) if err != nil { return 0, err @@ -201,7 +219,3 @@ func readInactive(fileName string) (int, error) { return 0, errors.New("total_inactive_file not found in " + fileName) } - -func (c cgroupsV1MemoryLimitChecker) String() string { - return "CgroupsV1MemoryLimitChecker" -} diff --git a/arbnode/resourcemanager/resource_management_test.go b/arbnode/resourcemanager/resource_management_test.go index fe470e706b..3d1248d913 100644 --- a/arbnode/resourcemanager/resource_management_test.go +++ b/arbnode/resourcemanager/resource_management_test.go @@ -6,52 +6,57 @@ package resourcemanager import ( "fmt" "os" + "regexp" "testing" ) -func updateFakeCgroupv1Files(c *cgroupsV1MemoryLimitChecker, limit, usage, inactive int) error { - limitFile, err := os.Create(c.limitFile) +func updateFakeCgroupFiles(c *cgroupsMemoryLimitChecker, limit, usage, inactive int) error { + limitFile, err := os.Create(c.files.limitFile) if err != nil { return err } - _, err = fmt.Fprintf(limitFile, "%d\n", limit) - if err != nil { + if _, err = fmt.Fprintf(limitFile, "%d\n", limit); err != nil { return err } - usageFile, err := os.Create(c.usageFile) + usageFile, err := os.Create(c.files.usageFile) if err != nil { return err } - _, err = fmt.Fprintf(usageFile, "%d\n", usage) - if err != nil { + if _, err = fmt.Fprintf(usageFile, "%d\n", usage); err != nil { return err } - statsFile, err := os.Create(c.statsFile) + statsFile, err := os.Create(c.files.statsFile) if err != nil { return err } - _, err = fmt.Fprintf(statsFile, `total_cache 1029980160 + if _, err = fmt.Fprintf(statsFile, `total_cache 1029980160 total_rss 1016209408 total_inactive_file %d total_active_file 321544192 -`, inactive) - if err != nil { +`, inactive); err != nil { return err } return nil } -func TestCgroupsv1MemoryLimit(t *testing.T) { +func TestCgroupsMemoryLimit(t *testing.T) { cgroupDir := t.TempDir() - c := newCgroupsV1MemoryLimitChecker(cgroupDir, 95) + testFiles := cgroupsMemoryFiles{ + limitFile: cgroupDir + "/memory.limit_in_bytes", + usageFile: cgroupDir + "/memory.usage_in_bytes", + statsFile: cgroupDir + "/memory.stat", + inactiveRe: regexp.MustCompile(`total_inactive_file (\d+)`), + } + + c := newCgroupsMemoryLimitChecker(testFiles, 95) _, err := c.isLimitExceeded() if err == nil { t.Error("Should fail open if can't read files") } - err = updateFakeCgroupv1Files(c, 1000, 1000, 51) + err = updateFakeCgroupFiles(c, 1000, 1000, 51) if err != nil { t.Error(err) } @@ -63,7 +68,7 @@ func TestCgroupsv1MemoryLimit(t *testing.T) { t.Error("Expected under limit") } - err = updateFakeCgroupv1Files(c, 1000, 1000, 50) + err = updateFakeCgroupFiles(c, 1000, 1000, 50) if err != nil { t.Error(err) } @@ -74,5 +79,4 @@ func TestCgroupsv1MemoryLimit(t *testing.T) { if !exceeded { t.Error("Expected over limit") } - } From 232ded2a24121796b4c77cf0d6a3c1fc69de864e Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 17 Jul 2023 13:49:03 -0700 Subject: [PATCH 004/117] Split cgroups test into 3 tests --- .../resource_management_test.go | 54 +++++++++++-------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/arbnode/resourcemanager/resource_management_test.go b/arbnode/resourcemanager/resource_management_test.go index 3d1248d913..699bcd24e1 100644 --- a/arbnode/resourcemanager/resource_management_test.go +++ b/arbnode/resourcemanager/resource_management_test.go @@ -31,52 +31,62 @@ func updateFakeCgroupFiles(c *cgroupsMemoryLimitChecker, limit, usage, inactive if err != nil { return err } - if _, err = fmt.Fprintf(statsFile, `total_cache 1029980160 + _, err = fmt.Fprintf(statsFile, `total_cache 1029980160 total_rss 1016209408 total_inactive_file %d total_active_file 321544192 -`, inactive); err != nil { - return err - } - return nil +`, inactive) + return err } -func TestCgroupsMemoryLimit(t *testing.T) { - cgroupDir := t.TempDir() - testFiles := cgroupsMemoryFiles{ +func makeCgroupsTestDir(cgroupDir string) cgroupsMemoryFiles { + return cgroupsMemoryFiles{ limitFile: cgroupDir + "/memory.limit_in_bytes", usageFile: cgroupDir + "/memory.usage_in_bytes", statsFile: cgroupDir + "/memory.stat", inactiveRe: regexp.MustCompile(`total_inactive_file (\d+)`), } +} +func TestCgroupsFailIfCantOpen(t *testing.T) { + testFiles := makeCgroupsTestDir(t.TempDir()) c := newCgroupsMemoryLimitChecker(testFiles, 95) - _, err := c.isLimitExceeded() - if err == nil { - t.Error("Should fail open if can't read files") + var err error + if _, err = c.isLimitExceeded(); err == nil { + t.Fatal("Should fail open if can't read files") } +} - err = updateFakeCgroupFiles(c, 1000, 1000, 51) - if err != nil { - t.Error(err) +func TestCgroupsLimitNotExceeded(t *testing.T) { + testFiles := makeCgroupsTestDir(t.TempDir()) + c := newCgroupsMemoryLimitChecker(testFiles, 95) + + var err error + if err = updateFakeCgroupFiles(c, 1000, 1000, 51); err != nil { + t.Fatal(err) } exceeded, err := c.isLimitExceeded() if err != nil { - t.Error(err) + t.Fatal(err) } if exceeded { - t.Error("Expected under limit") + t.Fatal("Expected under limit") } +} - err = updateFakeCgroupFiles(c, 1000, 1000, 50) - if err != nil { - t.Error(err) +func TestCgroupsLimitExceeded(t *testing.T) { + testFiles := makeCgroupsTestDir(t.TempDir()) + c := newCgroupsMemoryLimitChecker(testFiles, 95) + + var err error + if err = updateFakeCgroupFiles(c, 1000, 1000, 50); err != nil { + t.Fatal(err) } - exceeded, err = c.isLimitExceeded() + exceeded, err := c.isLimitExceeded() if err != nil { - t.Error(err) + t.Fatal(err) } if !exceeded { - t.Error("Expected over limit") + t.Fatal("Expected over limit") } } From 70d09f489635cfc15cceb993c54294a77b765f8f Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 31 Jul 2023 15:48:32 -0700 Subject: [PATCH 005/117] Use go-ethereum version with fixed build --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 949af3144e..31addf8423 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 949af3144ecbfcd09b457b440ff42eb672e671c8 +Subproject commit 31addf8423c0e028aa4b9308dca9f570aa0602ff From bace811afa437b8770b6751765565817fe73a6ad Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 9 Aug 2023 16:23:25 -0700 Subject: [PATCH 006/117] Refactor resourcemanager as suggested in PR#1744 --- .../resourcemanager/resource_management.go | 26 +++++--- .../resource_management_test.go | 63 ++++++++++--------- 2 files changed, 48 insertions(+), 41 deletions(-) diff --git a/arbnode/resourcemanager/resource_management.go b/arbnode/resourcemanager/resource_management.go index b1bdc45d70..7d0ab47a0d 100644 --- a/arbnode/resourcemanager/resource_management.go +++ b/arbnode/resourcemanager/resource_management.go @@ -23,6 +23,7 @@ var ( limitCheckDurationHistogram = metrics.NewRegisteredHistogram("arb/rpc/limitcheck/duration", nil, metrics.NewBoundedHistogramSample()) limitCheckSuccessCounter = metrics.NewRegisteredCounter("arb/rpc/limitcheck/success", nil) limitCheckFailureCounter = metrics.NewRegisteredCounter("arb/rpc/limitcheck/failure", nil) + errNotSupported = errors.New("not supported") ) // Init adds the resource manager's httpServer to a custom hook in geth. @@ -33,7 +34,15 @@ var ( func Init(conf *Config) { if conf.MemoryLimitPercent > 0 { node.WrapHTTPHandler = func(srv http.Handler) (http.Handler, error) { - return newHttpServer(srv, newLimitChecker(conf)), nil + var c limitChecker + var err error + c, err = newCgroupsMemoryLimitCheckerIfSupported(conf) + if errors.Is(err, errNotSupported) { + log.Error("No method for determining memory usage and limits was discovered, disabled memory limit RPC throttling") + c = &trivialLimitChecker{} + } + + return newHttpServer(srv, c), nil } } } @@ -95,25 +104,22 @@ func isSupported(c limitChecker) bool { return err == nil } -// newLimitChecker attempts to auto-discover the mechanism by which it -// can check system limits. Currently Cgroups V1 and V2 are supported. -// If no supported mechanism is discovered, it logs an error and -// fails open, ie it creates a trivialLimitChecker that does no checks. -func newLimitChecker(conf *Config) limitChecker { +// newCgroupsMemoryLimitCheckerIfSupported attempts to auto-discover whether +// Cgroups V1 or V2 is supported for checking system memory limits. +func newCgroupsMemoryLimitCheckerIfSupported(conf *Config) (*cgroupsMemoryLimitChecker, error) { c := newCgroupsMemoryLimitChecker(cgroupsV1MemoryFiles, conf.MemoryLimitPercent) if isSupported(c) { log.Info("Cgroups v1 detected, enabling memory limit RPC throttling") - return c + return c, nil } c = newCgroupsMemoryLimitChecker(cgroupsV2MemoryFiles, conf.MemoryLimitPercent) if isSupported(c) { log.Info("Cgroups v2 detected, enabling memory limit RPC throttling") - return c + return c, nil } - log.Error("No method for determining memory usage and limits was discovered, disabled memory limit RPC throttling") - return &trivialLimitChecker{} + return nil, errNotSupported } // trivialLimitChecker checks no limits, so its limits are never exceeded. diff --git a/arbnode/resourcemanager/resource_management_test.go b/arbnode/resourcemanager/resource_management_test.go index 699bcd24e1..21f1dca236 100644 --- a/arbnode/resourcemanager/resource_management_test.go +++ b/arbnode/resourcemanager/resource_management_test.go @@ -57,36 +57,37 @@ func TestCgroupsFailIfCantOpen(t *testing.T) { } } -func TestCgroupsLimitNotExceeded(t *testing.T) { - testFiles := makeCgroupsTestDir(t.TempDir()) - c := newCgroupsMemoryLimitChecker(testFiles, 95) - - var err error - if err = updateFakeCgroupFiles(c, 1000, 1000, 51); err != nil { - t.Fatal(err) - } - exceeded, err := c.isLimitExceeded() - if err != nil { - t.Fatal(err) - } - if exceeded { - t.Fatal("Expected under limit") - } -} - -func TestCgroupsLimitExceeded(t *testing.T) { - testFiles := makeCgroupsTestDir(t.TempDir()) - c := newCgroupsMemoryLimitChecker(testFiles, 95) - - var err error - if err = updateFakeCgroupFiles(c, 1000, 1000, 50); err != nil { - t.Fatal(err) - } - exceeded, err := c.isLimitExceeded() - if err != nil { - t.Fatal(err) - } - if !exceeded { - t.Fatal("Expected over limit") +func TestCgroupsMemoryLimit(t *testing.T) { + for _, tc := range []struct { + desc string + inactive int + want bool + }{ + { + desc: "limit should be exceeded", + inactive: 50, + want: true, + }, + { + desc: "limit should not be exceeded", + inactive: 51, + want: false, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + testFiles := makeCgroupsTestDir(t.TempDir()) + c := newCgroupsMemoryLimitChecker(testFiles, 95) + if err := updateFakeCgroupFiles(c, 1000, 1000, tc.inactive); err != nil { + t.Fatalf("Updating cgroup files: %v", err) + } + exceeded, err := c.isLimitExceeded() + if err != nil { + t.Fatalf("Checking if limit exceeded: %v", err) + } + if exceeded != tc.want { + t.Errorf("isLimitExceeded() = %t, want %t", exceeded, tc.want) + } + }, + ) } } From 33f63430f6d229ee9a91364378082e5dfd08ad8e Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 10 Aug 2023 10:29:10 -0700 Subject: [PATCH 007/117] Fix comments on PR #1744 --- arbnode/resourcemanager/resource_management.go | 3 +-- arbnode/resourcemanager/resource_management_test.go | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arbnode/resourcemanager/resource_management.go b/arbnode/resourcemanager/resource_management.go index 7d0ab47a0d..c4d7ee3795 100644 --- a/arbnode/resourcemanager/resource_management.go +++ b/arbnode/resourcemanager/resource_management.go @@ -35,8 +35,7 @@ func Init(conf *Config) { if conf.MemoryLimitPercent > 0 { node.WrapHTTPHandler = func(srv http.Handler) (http.Handler, error) { var c limitChecker - var err error - c, err = newCgroupsMemoryLimitCheckerIfSupported(conf) + c, err := newCgroupsMemoryLimitCheckerIfSupported(conf) if errors.Is(err, errNotSupported) { log.Error("No method for determining memory usage and limits was discovered, disabled memory limit RPC throttling") c = &trivialLimitChecker{} diff --git a/arbnode/resourcemanager/resource_management_test.go b/arbnode/resourcemanager/resource_management_test.go index 21f1dca236..ba791fd729 100644 --- a/arbnode/resourcemanager/resource_management_test.go +++ b/arbnode/resourcemanager/resource_management_test.go @@ -51,8 +51,7 @@ func makeCgroupsTestDir(cgroupDir string) cgroupsMemoryFiles { func TestCgroupsFailIfCantOpen(t *testing.T) { testFiles := makeCgroupsTestDir(t.TempDir()) c := newCgroupsMemoryLimitChecker(testFiles, 95) - var err error - if _, err = c.isLimitExceeded(); err == nil { + if _, err := c.isLimitExceeded(); err == nil { t.Fatal("Should fail open if can't read files") } } From 5f182227e3ab21170517088df02448bda16ccd24 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 10 Aug 2023 12:24:19 -0600 Subject: [PATCH 008/117] Don't treat hitting batch L1 bounds as a backlog --- arbnode/batch_poster.go | 55 ++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 601302e536..1443382ca8 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -55,21 +55,22 @@ type batchPosterPosition struct { type BatchPoster struct { stopwaiter.StopWaiter - l1Reader *headerreader.HeaderReader - inbox *InboxTracker - streamer *TransactionStreamer - config BatchPosterConfigFetcher - seqInbox *bridgegen.SequencerInbox - bridge *bridgegen.Bridge - syncMonitor *SyncMonitor - seqInboxABI *abi.ABI - seqInboxAddr common.Address - building *buildingBatch - daWriter das.DataAvailabilityServiceWriter - dataPoster *dataposter.DataPoster - redisLock *SimpleRedisLock - firstAccErr time.Time // first time a continuous missing accumulator occurred - backlog uint64 // An estimate of the number of unposted batches + l1Reader *headerreader.HeaderReader + inbox *InboxTracker + streamer *TransactionStreamer + config BatchPosterConfigFetcher + seqInbox *bridgegen.SequencerInbox + bridge *bridgegen.Bridge + syncMonitor *SyncMonitor + seqInboxABI *abi.ABI + seqInboxAddr common.Address + building *buildingBatch + daWriter das.DataAvailabilityServiceWriter + dataPoster *dataposter.DataPoster + redisLock *SimpleRedisLock + firstAccErr time.Time // first time a continuous missing accumulator occurred + backlog uint64 // An estimate of the number of unposted batches + lastHitL1Bounds time.Time // The last time we wanted to post a message but hit the L1 bounds batchReverted atomic.Bool // indicates whether data poster batch was reverted } @@ -92,7 +93,7 @@ type BatchPosterConfig struct { MaxBatchSize int `koanf:"max-size" reload:"hot"` MaxBatchPostDelay time.Duration `koanf:"max-delay" reload:"hot"` WaitForMaxBatchPostDelay bool `koanf:"wait-for-max-delay" reload:"hot"` - BatchPollDelay time.Duration `koanf:"poll-delay" reload:"hot"` + PollInterval time.Duration `koanf:"poll-interval" reload:"hot"` PostingErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` CompressionLevel int `koanf:"compression-level" reload:"hot"` DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` @@ -141,7 +142,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxBatchSize, "maximum batch size") f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxBatchPostDelay, "maximum batch posting delay") f.Bool(prefix+".wait-for-max-delay", DefaultBatchPosterConfig.WaitForMaxBatchPostDelay, "wait for the max batch delay, even if the batch is full") - f.Duration(prefix+".poll-delay", DefaultBatchPosterConfig.BatchPollDelay, "how long to delay after successfully posting batch") + f.Duration(prefix+".poll-interval", DefaultBatchPosterConfig.PollInterval, "how long to wait after no batches are ready to be posted before checking again") f.Duration(prefix+".error-delay", DefaultBatchPosterConfig.PostingErrorDelay, "how long to delay after error posting batch") f.Int(prefix+".compression-level", DefaultBatchPosterConfig.CompressionLevel, "batch compression level") f.Duration(prefix+".das-retention-period", DefaultBatchPosterConfig.DASRetentionPeriod, "In AnyTrust mode, the period which DASes are requested to retain the stored batches.") @@ -159,7 +160,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, DisableDasFallbackStoreDataOnChain: false, MaxBatchSize: 100000, - BatchPollDelay: time.Second * 10, + PollInterval: time.Second * 10, PostingErrorDelay: time.Second * 10, MaxBatchPostDelay: time.Hour, WaitForMaxBatchPostDelay: false, @@ -184,7 +185,7 @@ var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ var TestBatchPosterConfig = BatchPosterConfig{ Enable: true, MaxBatchSize: 100000, - BatchPollDelay: time.Millisecond * 10, + PollInterval: time.Millisecond * 10, PostingErrorDelay: time.Millisecond * 10, MaxBatchPostDelay: 0, WaitForMaxBatchPostDelay: false, @@ -797,6 +798,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) l1BoundMaxTimestamp = math.MaxUint64 } if msg.Message.Header.BlockNumber > l1BoundMaxBlockNumber || msg.Message.Header.Timestamp > l1BoundMaxTimestamp { + b.lastHitL1Bounds = time.Now() log.Info( "not posting more messages because block number or timestamp exceed L1 bounds", "blockNumber", msg.Message.Header.BlockNumber, @@ -884,16 +886,20 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "current delayed", b.building.segments.delayedMsg, "total segments", len(b.building.segments.rawSegments), ) + recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount unpostedMessages := msgCount - b.building.msgCount b.backlog = uint64(unpostedMessages) / uint64(postedMessages) if b.backlog > 10 { logLevel := log.Warn - if b.backlog > 30 { + if recentlyHitL1Bounds { + logLevel = log.Info + } else if b.backlog > 30 { logLevel = log.Error } logLevel( "a large batch posting backlog exists", + "recentlyHitL1Bounds", recentlyHitL1Bounds, "currentPosition", b.building.msgCount, "messageCount", msgCount, "lastPostedMessages", postedMessages, @@ -901,6 +907,11 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "batchBacklogEstimate", b.backlog, ) } + if recentlyHitL1Bounds { + // This backlog isn't "real" in that we don't want to post any more messages. + // Setting the backlog to 0 here ensures that we don't lower compression as a result. + b.backlog = 0 + } b.building = nil return true, nil } @@ -930,7 +941,7 @@ func (b *BatchPoster) Start(ctxIn context.Context) { } if !b.redisLock.AttemptLock(ctx) { b.building = nil - return b.config().BatchPollDelay + return b.config().PollInterval } posted, err := b.maybePostSequencerBatch(ctx) if err != nil { @@ -953,7 +964,7 @@ func (b *BatchPoster) Start(ctxIn context.Context) { } else if posted { return 0 } else { - return b.config().BatchPollDelay + return b.config().PollInterval } }) } From 468a2062624dba3ea2c5f4d3bb9a2638d1e9c9fc Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 10 Aug 2023 13:16:45 -0600 Subject: [PATCH 009/117] Default to using "safe" instead of "finalized" for delayed sequencing --- arbnode/delayed_sequencer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index f45a85ac49..aa6d43785e 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -52,14 +52,14 @@ func DelayedSequencerConfigAddOptions(prefix string, f *flag.FlagSet) { var DefaultDelayedSequencerConfig = DelayedSequencerConfig{ Enable: false, FinalizeDistance: 20, - RequireFullFinality: true, + RequireFullFinality: false, UseMergeFinality: true, } var TestDelayedSequencerConfig = DelayedSequencerConfig{ Enable: true, FinalizeDistance: 20, - RequireFullFinality: true, + RequireFullFinality: false, UseMergeFinality: true, } From f1be9c0d0dd5b81a34fb5ba23ee22b3591ad0667 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 15 Aug 2023 12:51:21 -0600 Subject: [PATCH 010/117] Update backlog comment to mention L1 bounds --- arbnode/batch_poster.go | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 5c34fc07c2..62681a3f4f 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -56,21 +56,23 @@ type batchPosterPosition struct { type BatchPoster struct { stopwaiter.StopWaiter - l1Reader *headerreader.HeaderReader - inbox *InboxTracker - streamer *TransactionStreamer - config BatchPosterConfigFetcher - seqInbox *bridgegen.SequencerInbox - bridge *bridgegen.Bridge - syncMonitor *SyncMonitor - seqInboxABI *abi.ABI - seqInboxAddr common.Address - building *buildingBatch - daWriter das.DataAvailabilityServiceWriter - dataPoster *dataposter.DataPoster - redisLock *redislock.Simple - firstAccErr time.Time // first time a continuous missing accumulator occurred - backlog uint64 // An estimate of the number of unposted batches + l1Reader *headerreader.HeaderReader + inbox *InboxTracker + streamer *TransactionStreamer + config BatchPosterConfigFetcher + seqInbox *bridgegen.SequencerInbox + bridge *bridgegen.Bridge + syncMonitor *SyncMonitor + seqInboxABI *abi.ABI + seqInboxAddr common.Address + building *buildingBatch + daWriter das.DataAvailabilityServiceWriter + dataPoster *dataposter.DataPoster + redisLock *redislock.Simple + firstAccErr time.Time // first time a continuous missing accumulator occurred + // An estimate of the number of batches we want to post but haven't yet. + // This doesn't include batches which we don't want to post yet due to the L1 bounds. + backlog uint64 lastHitL1Bounds time.Time // The last time we wanted to post a message but hit the L1 bounds batchReverted atomic.Bool // indicates whether data poster batch was reverted From 813e42e9f09621374382f6caea3251d2d1530b3b Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 16 Aug 2023 17:32:10 +0200 Subject: [PATCH 011/117] Fix koanf lint errors by matching koanf tags with field names --- arbnode/batch_poster.go | 111 ++++++++-------- arbnode/execution/blockchain.go | 40 +++--- arbnode/message_pruner.go | 17 +-- arbnode/node.go | 122 +++++++++--------- .../resourcemanager/resource_management.go | 10 +- arbnode/seq_coordinator.go | 106 +++++++-------- arbnode/seq_coordinator_atomic_test.go | 20 +-- arbnode/transaction_streamer.go | 10 +- broadcastclient/broadcastclient.go | 8 +- cmd/conf/chain.go | 44 +++---- cmd/daserver/daserver.go | 24 ++-- cmd/datool/datool.go | 40 +++--- cmd/deploy/deploy.go | 8 +- cmd/genericconf/wallet.go | 12 +- cmd/nitro-val/config.go | 4 +- cmd/nitro-val/nitro_val.go | 2 +- cmd/nitro/config_test.go | 4 +- cmd/nitro/init.go | 24 ++-- cmd/nitro/nitro.go | 102 +++++++-------- cmd/relay/relay.go | 2 +- cmd/util/keystore.go | 2 +- cmd/util/keystore_test.go | 4 +- das/aggregator.go | 12 +- das/aggregator_test.go | 16 +-- das/das.go | 32 ++--- das/das_test.go | 16 +-- das/db_storage_service.go | 14 +- das/factory.go | 80 ++++++------ das/local_file_storage_service.go | 12 +- das/redis_storage_service.go | 26 ++-- das/redis_storage_service_test.go | 4 +- das/rpc_aggregator.go | 6 +- das/rpc_test.go | 12 +- das/s3_storage_service.go | 22 ++-- das/sign_after_store_das_writer.go | 6 +- das/simple_das_reader_aggregator.go | 42 +++--- das/syncing_fallback_storage.go | 38 +++--- relay/relay.go | 14 +- staker/block_validator.go | 4 +- staker/staker.go | 76 +++++------ system_tests/batch_poster_test.go | 4 +- system_tests/block_validator_test.go | 4 +- system_tests/common_test.go | 18 +-- system_tests/das_test.go | 60 ++++----- system_tests/forwarder_test.go | 8 +- system_tests/seq_coordinator_test.go | 10 +- system_tests/seqfeed_test.go | 6 +- system_tests/twonodes_test.go | 2 +- system_tests/twonodeslong_test.go | 2 +- validator/server_arb/validator_spawner.go | 2 +- 50 files changed, 636 insertions(+), 628 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index f870e3a6fa..a2f6d9686c 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -88,23 +88,28 @@ const ( ) type BatchPosterConfig struct { - Enable bool `koanf:"enable"` - DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` - MaxBatchSize int `koanf:"max-size" reload:"hot"` - MaxBatchPostDelay time.Duration `koanf:"max-delay" reload:"hot"` - WaitForMaxBatchPostDelay bool `koanf:"wait-for-max-delay" reload:"hot"` - BatchPollDelay time.Duration `koanf:"poll-delay" reload:"hot"` - PostingErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` - CompressionLevel int `koanf:"compression-level" reload:"hot"` - DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` - GasRefunderAddress string `koanf:"gas-refunder-address" reload:"hot"` - DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` - RedisUrl string `koanf:"redis-url"` - RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` - ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` - L1Wallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` - L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` - L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` + Enable bool `koanf:"enable"` + DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` + // Max batch size. + MaxSize int `koanf:"max-size" reload:"hot"` + // Max batch post delay. + MaxDelay time.Duration `koanf:"max-delay" reload:"hot"` + // Wait for max BatchPost delay. + WaitForMaxDelay bool `koanf:"wait-for-max-delay" reload:"hot"` + // Batch post polling delay. + PollDelay time.Duration `koanf:"poll-delay" reload:"hot"` + // Batch posting error delay. + ErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` + CompressionLevel int `koanf:"compression-level" reload:"hot"` + DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` + GasRefunderAddress string `koanf:"gas-refunder-address" reload:"hot"` + DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` + RedisUrl string `koanf:"redis-url"` + RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` + ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` + ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` + L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` + L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` gasRefunder common.Address l1BlockBound l1BlockBound @@ -115,7 +120,7 @@ func (c *BatchPosterConfig) Validate() error { return fmt.Errorf("invalid gas refunder address \"%v\"", c.GasRefunderAddress) } c.gasRefunder = common.HexToAddress(c.GasRefunderAddress) - if c.MaxBatchSize <= 40 { + if c.MaxSize <= 40 { return errors.New("MaxBatchSize too small") } if c.L1BlockBound == "" { @@ -139,11 +144,11 @@ type BatchPosterConfigFetcher func() *BatchPosterConfig func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") f.Bool(prefix+".disable-das-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDasFallbackStoreDataOnChain, "If unable to batch to DAS, disable fallback storing data on chain") - f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxBatchSize, "maximum batch size") - f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxBatchPostDelay, "maximum batch posting delay") - f.Bool(prefix+".wait-for-max-delay", DefaultBatchPosterConfig.WaitForMaxBatchPostDelay, "wait for the max batch delay, even if the batch is full") - f.Duration(prefix+".poll-delay", DefaultBatchPosterConfig.BatchPollDelay, "how long to delay after successfully posting batch") - f.Duration(prefix+".error-delay", DefaultBatchPosterConfig.PostingErrorDelay, "how long to delay after error posting batch") + f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") + f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxDelay, "maximum batch posting delay") + f.Bool(prefix+".wait-for-max-delay", DefaultBatchPosterConfig.WaitForMaxDelay, "wait for the max batch delay, even if the batch is full") + f.Duration(prefix+".poll-delay", DefaultBatchPosterConfig.PollDelay, "how long to delay after successfully posting batch") + f.Duration(prefix+".error-delay", DefaultBatchPosterConfig.ErrorDelay, "how long to delay after error posting batch") f.Int(prefix+".compression-level", DefaultBatchPosterConfig.CompressionLevel, "batch compression level") f.Duration(prefix+".das-retention-period", DefaultBatchPosterConfig.DASRetentionPeriod, "In AnyTrust mode, the period which DASes are requested to retain the stored batches.") f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") @@ -153,50 +158,50 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") redislock.AddConfigOptions(prefix+".redis-lock", f) dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) - genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.L1Wallet.Pathname) + genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.ParentChainWallet.Pathname) } var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, DisableDasFallbackStoreDataOnChain: false, - MaxBatchSize: 100000, - BatchPollDelay: time.Second * 10, - PostingErrorDelay: time.Second * 10, - MaxBatchPostDelay: time.Hour, - WaitForMaxBatchPostDelay: false, + MaxSize: 100000, + PollDelay: time.Second * 10, + ErrorDelay: time.Second * 10, + MaxDelay: time.Hour, + WaitForMaxDelay: false, CompressionLevel: brotli.BestCompression, DASRetentionPeriod: time.Hour * 24 * 15, GasRefunderAddress: "", ExtraBatchGas: 50_000, DataPoster: dataposter.DefaultDataPosterConfig, - L1Wallet: DefaultBatchPosterL1WalletConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", L1BlockBoundBypass: time.Hour, } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ Pathname: "batch-poster-wallet", - PasswordImpl: genericconf.WalletConfigDefault.PasswordImpl, + Password: genericconf.WalletConfigDefault.Password, PrivateKey: genericconf.WalletConfigDefault.PrivateKey, Account: genericconf.WalletConfigDefault.Account, OnlyCreateKey: genericconf.WalletConfigDefault.OnlyCreateKey, } var TestBatchPosterConfig = BatchPosterConfig{ - Enable: true, - MaxBatchSize: 100000, - BatchPollDelay: time.Millisecond * 10, - PostingErrorDelay: time.Millisecond * 10, - MaxBatchPostDelay: 0, - WaitForMaxBatchPostDelay: false, - CompressionLevel: 2, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 10_000, - DataPoster: dataposter.TestDataPosterConfig, - L1Wallet: DefaultBatchPosterL1WalletConfig, - L1BlockBound: "", - L1BlockBoundBypass: time.Hour, + Enable: true, + MaxSize: 100000, + PollDelay: time.Millisecond * 10, + ErrorDelay: time.Millisecond * 10, + MaxDelay: 0, + WaitForMaxDelay: false, + CompressionLevel: 2, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 10_000, + DataPoster: dataposter.TestDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, } func NewBatchPoster(dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderReader, inbox *InboxTracker, streamer *TransactionStreamer, syncMonitor *SyncMonitor, config BatchPosterConfigFetcher, deployInfo *chaininfo.RollupAddresses, transactOpts *bind.TransactOpts, daWriter das.DataAvailabilityServiceWriter) (*BatchPoster, error) { @@ -374,8 +379,8 @@ type buildingBatch struct { } func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64) *batchSegments { - compressedBuffer := bytes.NewBuffer(make([]byte, 0, config.MaxBatchSize*2)) - if config.MaxBatchSize <= 40 { + compressedBuffer := bytes.NewBuffer(make([]byte, 0, config.MaxSize*2)) + if config.MaxSize <= 40 { panic("MaxBatchSize too small") } compressionLevel := config.CompressionLevel @@ -401,7 +406,7 @@ func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog ui return &batchSegments{ compressedBuffer: compressedBuffer, compressedWriter: brotli.NewWriterLevel(compressedBuffer, compressionLevel), - sizeLimit: config.MaxBatchSize - 40, // TODO + sizeLimit: config.MaxSize - 40, // TODO recompressionLevel: recompressionLevel, rawSegments: make([][]byte, 0, 128), delayedMsg: firstDelayed, @@ -717,7 +722,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) firstMsgTime := time.Unix(int64(firstMsg.Message.Header.Timestamp), 0) config := b.config() - forcePostBatch := time.Since(firstMsgTime) >= config.MaxBatchPostDelay + forcePostBatch := time.Since(firstMsgTime) >= config.MaxDelay var l1BoundMaxBlockNumber uint64 = math.MaxUint64 var l1BoundMaxTimestamp uint64 = math.MaxUint64 @@ -815,7 +820,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } if !success { // this batch is full - if !config.WaitForMaxBatchPostDelay { + if !config.WaitForMaxDelay { forcePostBatch = true } b.building.haveUsefulMessage = true @@ -930,7 +935,7 @@ func (b *BatchPoster) Start(ctxIn context.Context) { } if !b.redisLock.AttemptLock(ctx) { b.building = nil - return b.config().BatchPollDelay + return b.config().PollDelay } posted, err := b.maybePostSequencerBatch(ctx) if err != nil { @@ -949,11 +954,11 @@ func (b *BatchPoster) Start(ctxIn context.Context) { b.firstAccErr = time.Time{} } logLevel("error posting batch", "err", err) - return b.config().PostingErrorDelay + return b.config().ErrorDelay } else if posted { return 0 } else { - return b.config().BatchPollDelay + return b.config().PollDelay } }) } diff --git a/arbnode/execution/blockchain.go b/arbnode/execution/blockchain.go index 88e7044e8d..0ce76d8ccd 100644 --- a/arbnode/execution/blockchain.go +++ b/arbnode/execution/blockchain.go @@ -26,15 +26,15 @@ import ( ) type CachingConfig struct { - Archive bool `koanf:"archive"` - BlockCount uint64 `koanf:"block-count"` - BlockAge time.Duration `koanf:"block-age"` - TrieTimeLimit time.Duration `koanf:"trie-time-limit"` - TrieDirtyCache int `koanf:"trie-dirty-cache"` - TrieCleanCache int `koanf:"trie-clean-cache"` - SnapshotCache int `koanf:"snapshot-cache"` - DatabaseCache int `koanf:"database-cache"` - SnapshotRestoreMaxGas uint64 `koanf:"snapshot-restore-gas-limit"` + Archive bool `koanf:"archive"` + BlockCount uint64 `koanf:"block-count"` + BlockAge time.Duration `koanf:"block-age"` + TrieTimeLimit time.Duration `koanf:"trie-time-limit"` + TrieDirtyCache int `koanf:"trie-dirty-cache"` + TrieCleanCache int `koanf:"trie-clean-cache"` + SnapshotCache int `koanf:"snapshot-cache"` + DatabaseCache int `koanf:"database-cache"` + SnapshotRestoreGasLimit uint64 `koanf:"snapshot-restore-gas-limit"` } func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -46,19 +46,19 @@ func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".trie-clean-cache", DefaultCachingConfig.TrieCleanCache, "amount of memory in megabytes to cache unchanged state trie nodes with") f.Int(prefix+".snapshot-cache", DefaultCachingConfig.SnapshotCache, "amount of memory in megabytes to cache state snapshots with") f.Int(prefix+".database-cache", DefaultCachingConfig.DatabaseCache, "amount of memory in megabytes to cache database contents with") - f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreMaxGas, "maximum gas rolled back to recover snapshot") + f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreGasLimit, "maximum gas rolled back to recover snapshot") } var DefaultCachingConfig = CachingConfig{ - Archive: false, - BlockCount: 128, - BlockAge: 30 * time.Minute, - TrieTimeLimit: time.Hour, - TrieDirtyCache: 1024, - TrieCleanCache: 600, - SnapshotCache: 400, - DatabaseCache: 2048, - SnapshotRestoreMaxGas: 300_000_000_000, + Archive: false, + BlockCount: 128, + BlockAge: 30 * time.Minute, + TrieTimeLimit: time.Hour, + TrieDirtyCache: 1024, + TrieCleanCache: 600, + SnapshotCache: 400, + DatabaseCache: 2048, + SnapshotRestoreGasLimit: 300_000_000_000, } func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core.CacheConfig { @@ -79,7 +79,7 @@ func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core TrieRetention: cachingConfig.BlockAge, SnapshotLimit: cachingConfig.SnapshotCache, Preimages: baseConf.Preimages, - SnapshotRestoreMaxGas: cachingConfig.SnapshotRestoreMaxGas, + SnapshotRestoreMaxGas: cachingConfig.SnapshotRestoreGasLimit, } } diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index b469ecdbef..31bf1a63ff 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -33,22 +33,23 @@ type MessagePruner struct { } type MessagePrunerConfig struct { - Enable bool `koanf:"enable"` - MessagePruneInterval time.Duration `koanf:"prune-interval" reload:"hot"` - MinBatchesLeft uint64 `koanf:"min-batches-left" reload:"hot"` + Enable bool `koanf:"enable"` + // Message pruning interval. + PruneInterval time.Duration `koanf:"prune-interval" reload:"hot"` + MinBatchesLeft uint64 `koanf:"min-batches-left" reload:"hot"` } type MessagePrunerConfigFetcher func() *MessagePrunerConfig var DefaultMessagePrunerConfig = MessagePrunerConfig{ - Enable: true, - MessagePruneInterval: time.Minute, - MinBatchesLeft: 2, + Enable: true, + PruneInterval: time.Minute, + MinBatchesLeft: 2, } func MessagePrunerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultMessagePrunerConfig.Enable, "enable message pruning") - f.Duration(prefix+".prune-interval", DefaultMessagePrunerConfig.MessagePruneInterval, "interval for running message pruner") + f.Duration(prefix+".prune-interval", DefaultMessagePrunerConfig.PruneInterval, "interval for running message pruner") f.Uint64(prefix+".min-batches-left", DefaultMessagePrunerConfig.MinBatchesLeft, "min number of batches not pruned") } @@ -70,7 +71,7 @@ func (m *MessagePruner) UpdateLatestConfirmed(count arbutil.MessageIndex, global return } - if m.lastPruneDone.Add(m.config().MessagePruneInterval).After(time.Now()) { + if m.lastPruneDone.Add(m.config().PruneInterval).After(time.Now()) { m.pruningLock.Unlock() return } diff --git a/arbnode/node.go b/arbnode/node.go index 2f5e4a69b2..5f2a99592f 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -286,34 +286,34 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b } type Config struct { - RPC arbitrum.Config `koanf:"rpc"` - Sequencer execution.SequencerConfig `koanf:"sequencer" reload:"hot"` - L1Reader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` - InboxReader InboxReaderConfig `koanf:"inbox-reader" reload:"hot"` - DelayedSequencer DelayedSequencerConfig `koanf:"delayed-sequencer" reload:"hot"` - BatchPoster BatchPosterConfig `koanf:"batch-poster" reload:"hot"` - MessagePruner MessagePrunerConfig `koanf:"message-pruner" reload:"hot"` - ForwardingTargetImpl string `koanf:"forwarding-target"` - Forwarder execution.ForwarderConfig `koanf:"forwarder"` - TxPreChecker execution.TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"` - BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` - RecordingDB arbitrum.RecordingDatabaseConfig `koanf:"recording-database"` - Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` - Staker staker.L1ValidatorConfig `koanf:"staker"` - SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` - DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` - Dangerous DangerousConfig `koanf:"dangerous"` - Caching execution.CachingConfig `koanf:"caching"` - Archive bool `koanf:"archive"` - TxLookupLimit uint64 `koanf:"tx-lookup-limit"` - TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` - Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` - ResourceManagement resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` + RPC arbitrum.Config `koanf:"rpc"` + Sequencer execution.SequencerConfig `koanf:"sequencer" reload:"hot"` + ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` + InboxReader InboxReaderConfig `koanf:"inbox-reader" reload:"hot"` + DelayedSequencer DelayedSequencerConfig `koanf:"delayed-sequencer" reload:"hot"` + BatchPoster BatchPosterConfig `koanf:"batch-poster" reload:"hot"` + MessagePruner MessagePrunerConfig `koanf:"message-pruner" reload:"hot"` + ForwardingTarget string `koanf:"forwarding-target"` + Forwarder execution.ForwarderConfig `koanf:"forwarder"` + TxPreChecker execution.TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"` + BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` + RecordingDB arbitrum.RecordingDatabaseConfig `koanf:"recording-db"` + Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` + Staker staker.L1ValidatorConfig `koanf:"staker"` + SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` + DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` + SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` + Dangerous DangerousConfig `koanf:"dangerous"` + Caching execution.CachingConfig `koanf:"caching"` + Archive bool `koanf:"archive"` + TxLookupLimit uint64 `koanf:"tx-lookup-limit"` + TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` + Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` + ResourceManagement resourcemanager.Config `koanf:"resource-management" reload:"hot"` } func (c *Config) Validate() error { - if c.L1Reader.Enable && c.Sequencer.Enable && !c.DelayedSequencer.Enable { + if c.ParentChainReader.Enable && c.Sequencer.Enable && !c.DelayedSequencer.Enable { log.Warn("delayed sequencer is not enabled, despite sequencer and l1 reader being enabled") } if c.DelayedSequencer.Enable && !c.Sequencer.Enable { @@ -343,12 +343,12 @@ func (c *Config) Validate() error { return nil } -func (c *Config) ForwardingTarget() string { - if c.ForwardingTargetImpl == "null" { +func (c *Config) ForwardingTargetF() string { + if c.ForwardingTarget == "null" { return "" } - return c.ForwardingTargetImpl + return c.ForwardingTarget } func (c *Config) ValidatorRequired() bool { @@ -369,11 +369,11 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed DelayedSequencerConfigAddOptions(prefix+".delayed-sequencer", f) BatchPosterConfigAddOptions(prefix+".batch-poster", f) MessagePrunerConfigAddOptions(prefix+".message-pruner", f) - f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTargetImpl, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)") + f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTarget, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)") execution.AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) execution.TxPreCheckerConfigAddOptions(prefix+".tx-pre-checker", f) staker.BlockValidatorConfigAddOptions(prefix+".block-validator", f) - arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-database", f) + arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-db", f) broadcastclient.FeedConfigAddOptions(prefix+".feed", f, feedInputEnable, feedOutputEnable) staker.L1ValidatorConfigAddOptions(prefix+".staker", f) SeqCoordinatorConfigAddOptions(prefix+".seq-coordinator", f) @@ -384,35 +384,35 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)") TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f) MaintenanceConfigAddOptions(prefix+".maintenance", f) - resourcemanager.ConfigAddOptions(prefix+".resource-mgmt", f) + resourcemanager.ConfigAddOptions(prefix+".resource-management", f) archiveMsg := fmt.Sprintf("retain past block state (deprecated, please use %v.caching.archive)", prefix) f.Bool(prefix+".archive", ConfigDefault.Archive, archiveMsg) } var ConfigDefault = Config{ - RPC: arbitrum.DefaultConfig, - Sequencer: execution.DefaultSequencerConfig, - L1Reader: headerreader.DefaultConfig, - InboxReader: DefaultInboxReaderConfig, - DelayedSequencer: DefaultDelayedSequencerConfig, - BatchPoster: DefaultBatchPosterConfig, - MessagePruner: DefaultMessagePrunerConfig, - ForwardingTargetImpl: "", - TxPreChecker: execution.DefaultTxPreCheckerConfig, - BlockValidator: staker.DefaultBlockValidatorConfig, - RecordingDB: arbitrum.DefaultRecordingDatabaseConfig, - Feed: broadcastclient.FeedConfigDefault, - Staker: staker.DefaultL1ValidatorConfig, - SeqCoordinator: DefaultSeqCoordinatorConfig, - DataAvailability: das.DefaultDataAvailabilityConfig, - SyncMonitor: DefaultSyncMonitorConfig, - Dangerous: DefaultDangerousConfig, - Archive: false, - TxLookupLimit: 126_230_400, // 1 year at 4 blocks per second - Caching: execution.DefaultCachingConfig, - TransactionStreamer: DefaultTransactionStreamerConfig, - ResourceManagement: resourcemanager.DefaultConfig, + RPC: arbitrum.DefaultConfig, + Sequencer: execution.DefaultSequencerConfig, + ParentChainReader: headerreader.DefaultConfig, + InboxReader: DefaultInboxReaderConfig, + DelayedSequencer: DefaultDelayedSequencerConfig, + BatchPoster: DefaultBatchPosterConfig, + MessagePruner: DefaultMessagePrunerConfig, + ForwardingTarget: "", + TxPreChecker: execution.DefaultTxPreCheckerConfig, + BlockValidator: staker.DefaultBlockValidatorConfig, + RecordingDB: arbitrum.DefaultRecordingDatabaseConfig, + Feed: broadcastclient.FeedConfigDefault, + Staker: staker.DefaultL1ValidatorConfig, + SeqCoordinator: DefaultSeqCoordinatorConfig, + DataAvailability: das.DefaultDataAvailabilityConfig, + SyncMonitor: DefaultSyncMonitorConfig, + Dangerous: DefaultDangerousConfig, + Archive: false, + TxLookupLimit: 126_230_400, // 1 year at 4 blocks per second + Caching: execution.DefaultCachingConfig, + TransactionStreamer: DefaultTransactionStreamerConfig, + ResourceManagement: resourcemanager.DefaultConfig, } func ConfigDefaultL1Test() *Config { @@ -427,7 +427,7 @@ func ConfigDefaultL1Test() *Config { func ConfigDefaultL1NonSequencerTest() *Config { config := ConfigDefault - config.L1Reader = headerreader.TestConfig + config.ParentChainReader = headerreader.TestConfig config.InboxReader = TestInboxReaderConfig config.Sequencer.Enable = false config.DelayedSequencer.Enable = false @@ -445,12 +445,12 @@ func ConfigDefaultL1NonSequencerTest() *Config { func ConfigDefaultL2Test() *Config { config := ConfigDefault config.Sequencer = execution.TestSequencerConfig - config.L1Reader.Enable = false + config.ParentChainReader.Enable = false config.SeqCoordinator = TestSeqCoordinatorConfig config.Feed.Input.Verifier.Dangerous.AcceptMissing = true config.Feed.Output.Signed = false - config.SeqCoordinator.Signing.ECDSA.AcceptSequencer = false - config.SeqCoordinator.Signing.ECDSA.Dangerous.AcceptMissing = true + config.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false + config.SeqCoordinator.Signer.ECDSA.Dangerous.AcceptMissing = true config.Staker.Enable = false config.BlockValidator.ValidationServer.URL = "" config.TransactionStreamer = DefaultTransactionStreamerConfig @@ -603,8 +603,8 @@ func createNodeImpl( } var l1Reader *headerreader.HeaderReader - if config.L1Reader.Enable { - l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().L1Reader }) + if config.ParentChainReader.Enable { + l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }) if err != nil { return nil, err } @@ -613,7 +613,7 @@ func createNodeImpl( sequencerConfigFetcher := func() *execution.SequencerConfig { return &configFetcher.Get().Sequencer } txprecheckConfigFetcher := func() *execution.TxPreCheckerConfig { return &configFetcher.Get().TxPreChecker } exec, err := execution.CreateExecutionNode(stack, chainDb, l2BlockChain, l1Reader, syncMonitor, - config.ForwardingTarget(), &config.Forwarder, config.RPC, &config.RecordingDB, + config.ForwardingTargetF(), &config.Forwarder, config.RPC, &config.RecordingDB, sequencerConfigFetcher, txprecheckConfigFetcher) if err != nil { return nil, err @@ -683,7 +683,7 @@ func createNodeImpl( } } - if !config.L1Reader.Enable { + if !config.ParentChainReader.Enable { return &Node{ ArbDB: arbDb, Stack: stack, diff --git a/arbnode/resourcemanager/resource_management.go b/arbnode/resourcemanager/resource_management.go index acb5355987..1c0d798098 100644 --- a/arbnode/resourcemanager/resource_management.go +++ b/arbnode/resourcemanager/resource_management.go @@ -31,7 +31,7 @@ var ( // // Must be run before the go-ethereum stack is set up (ethereum/go-ethereum/node.New). func Init(conf *Config) { - if conf.MemoryLimitPercent > 0 { + if conf.MemLimitPercent > 0 { node.WrapHTTPHandler = func(srv http.Handler) (http.Handler, error) { return newHttpServer(srv, newLimitChecker(conf)), nil } @@ -42,18 +42,18 @@ func Init(conf *Config) { // Currently only a memory limit is supported, other limits may be added // in the future. type Config struct { - MemoryLimitPercent int `koanf:"mem-limit-percent" reload:"hot"` + MemLimitPercent int `koanf:"mem-limit-percent" reload:"hot"` } // DefaultConfig has the defaul resourcemanager configuration, // all limits are disabled. var DefaultConfig = Config{ - MemoryLimitPercent: 0, + MemLimitPercent: 0, } // ConfigAddOptions adds the configuration options for resourcemanager. func ConfigAddOptions(prefix string, f *pflag.FlagSet) { - f.Int(prefix+".mem-limit-percent", DefaultConfig.MemoryLimitPercent, "RPC calls are throttled if system memory utilization exceeds this percent value, zero (default) is disabled") + f.Int(prefix+".mem-limit-percent", DefaultConfig.MemLimitPercent, "RPC calls are throttled if system memory utilization exceeds this percent value, zero (default) is disabled") } // httpServer implements http.Handler and wraps calls to inner with a resource @@ -96,7 +96,7 @@ type limitChecker interface { // mechanism is discovered, it logs an error and fails open, ie // it creates a trivialLimitChecker that does no checks. func newLimitChecker(conf *Config) limitChecker { - c := newCgroupsV1MemoryLimitChecker(DefaultCgroupsV1MemoryDirectory, conf.MemoryLimitPercent) + c := newCgroupsV1MemoryLimitChecker(DefaultCgroupsV1MemoryDirectory, conf.MemLimitPercent) if isSupported(c) { log.Info("Cgroups v1 detected, enabling memory limit RPC throttling") return c diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index 31cab83b1f..82796f3905 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -58,28 +58,28 @@ type SeqCoordinator struct { } type SeqCoordinatorConfig struct { - Enable bool `koanf:"enable"` - ChosenHealthcheckAddr string `koanf:"chosen-healthcheck-addr"` - RedisUrl string `koanf:"redis-url"` - LockoutDuration time.Duration `koanf:"lockout-duration"` - LockoutSpare time.Duration `koanf:"lockout-spare"` - SeqNumDuration time.Duration `koanf:"seq-num-duration"` - UpdateInterval time.Duration `koanf:"update-interval"` - RetryInterval time.Duration `koanf:"retry-interval"` - HandoffTimeout time.Duration `koanf:"handoff-timeout"` - SafeShutdownDelay time.Duration `koanf:"safe-shutdown-delay"` - ReleaseRetries int `koanf:"release-retries"` - MaxMsgPerPoll arbutil.MessageIndex `koanf:"msg-per-poll"` - MyUrlImpl string `koanf:"my-url"` - Signing signature.SignVerifyConfig `koanf:"signer"` -} - -func (c *SeqCoordinatorConfig) MyUrl() string { - if c.MyUrlImpl == "" { + Enable bool `koanf:"enable"` + ChosenHealthcheckAddr string `koanf:"chosen-healthcheck-addr"` + RedisUrl string `koanf:"redis-url"` + LockoutDuration time.Duration `koanf:"lockout-duration"` + LockoutSpare time.Duration `koanf:"lockout-spare"` + SeqNumDuration time.Duration `koanf:"seq-num-duration"` + UpdateInterval time.Duration `koanf:"update-interval"` + RetryInterval time.Duration `koanf:"retry-interval"` + HandoffTimeout time.Duration `koanf:"handoff-timeout"` + SafeShutdownDelay time.Duration `koanf:"safe-shutdown-delay"` + ReleaseRetries int `koanf:"release-retries"` + // Max message per poll. + MsgPerPoll arbutil.MessageIndex `koanf:"msg-per-poll"` + MyUrl string `koanf:"my-url"` + Signer signature.SignVerifyConfig `koanf:"signer"` +} + +func (c *SeqCoordinatorConfig) Url() string { + if c.MyUrl == "" { return redisutil.INVALID_URL } - - return c.MyUrlImpl + return c.MyUrl } func SeqCoordinatorConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -94,8 +94,8 @@ func SeqCoordinatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Duration(prefix+".handoff-timeout", DefaultSeqCoordinatorConfig.HandoffTimeout, "the maximum amount of time to spend waiting for another sequencer to accept the lockout when handing it off on shutdown or db compaction") f.Duration(prefix+".safe-shutdown-delay", DefaultSeqCoordinatorConfig.SafeShutdownDelay, "if non-zero will add delay after transferring control") f.Int(prefix+".release-retries", DefaultSeqCoordinatorConfig.ReleaseRetries, "the number of times to retry releasing the wants lockout and chosen one status on shutdown") - f.Uint64(prefix+".msg-per-poll", uint64(DefaultSeqCoordinatorConfig.MaxMsgPerPoll), "will only be marked as wanting the lockout if not too far behind") - f.String(prefix+".my-url", DefaultSeqCoordinatorConfig.MyUrlImpl, "url for this sequencer if it is the chosen") + f.Uint64(prefix+".msg-per-poll", uint64(DefaultSeqCoordinatorConfig.MsgPerPoll), "will only be marked as wanting the lockout if not too far behind") + f.String(prefix+".my-url", DefaultSeqCoordinatorConfig.MyUrl, "url for this sequencer if it is the chosen") signature.SignVerifyConfigAddOptions(prefix+".signer", f) } @@ -111,9 +111,9 @@ var DefaultSeqCoordinatorConfig = SeqCoordinatorConfig{ SafeShutdownDelay: 5 * time.Second, ReleaseRetries: 4, RetryInterval: 50 * time.Millisecond, - MaxMsgPerPoll: 2000, - MyUrlImpl: redisutil.INVALID_URL, - Signing: signature.DefaultSignVerifyConfig, + MsgPerPoll: 2000, + MyUrl: redisutil.INVALID_URL, + Signer: signature.DefaultSignVerifyConfig, } var TestSeqCoordinatorConfig = SeqCoordinatorConfig{ @@ -127,9 +127,9 @@ var TestSeqCoordinatorConfig = SeqCoordinatorConfig{ SafeShutdownDelay: time.Millisecond * 100, ReleaseRetries: 4, RetryInterval: time.Millisecond * 3, - MaxMsgPerPoll: 20, - MyUrlImpl: redisutil.INVALID_URL, - Signing: signature.DefaultSignVerifyConfig, + MsgPerPoll: 20, + MyUrl: redisutil.INVALID_URL, + Signer: signature.DefaultSignVerifyConfig, } func NewSeqCoordinator(dataSigner signature.DataSignerFunc, bpvalidator *contracts.BatchPosterVerifier, streamer *TransactionStreamer, sequencer *execution.Sequencer, sync *SyncMonitor, config SeqCoordinatorConfig) (*SeqCoordinator, error) { @@ -137,7 +137,7 @@ func NewSeqCoordinator(dataSigner signature.DataSignerFunc, bpvalidator *contrac if err != nil { return nil, err } - signer, err := signature.NewSignVerify(&config.Signing, dataSigner, bpvalidator) + signer, err := signature.NewSignVerify(&config.Signer, dataSigner, bpvalidator) if err != nil { return nil, err } @@ -250,7 +250,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC if err != nil { return err } - if c.config.Signing.SymmetricSign { + if c.config.Signer.SymmetricSign { messageString := string(append(msgSig, msgBytes...)) messageData = &messageString } else { @@ -278,7 +278,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC if err != nil { return err } - if !wasEmpty && (current != c.config.MyUrl()) { + if !wasEmpty && (current != c.config.Url()) { return fmt.Errorf("%w: failed to catch lock. redis shows chosen: %s", execution.ErrRetrySequencer, current) } remoteMsgCount, err := c.getRemoteMsgCountImpl(ctx, tx) @@ -300,7 +300,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC initialDuration = 2 * time.Second } if wasEmpty { - pipe.Set(ctx, redisutil.CHOSENSEQ_KEY, c.config.MyUrl(), initialDuration) + pipe.Set(ctx, redisutil.CHOSENSEQ_KEY, c.config.Url(), initialDuration) } pipe.Set(ctx, redisutil.MSG_COUNT_KEY, msgCountMsg, c.config.SeqNumDuration) if messageData != nil { @@ -311,7 +311,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC } pipe.PExpireAt(ctx, redisutil.CHOSENSEQ_KEY, lockoutUntil) if setWantsLockout { - myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.MyUrl()) + myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.Url()) pipe.Set(ctx, myWantsLockoutKey, redisutil.WANTS_LOCKOUT_VAL, initialDuration) pipe.PExpireAt(ctx, myWantsLockoutKey, lockoutUntil) } @@ -362,7 +362,7 @@ func (c *SeqCoordinator) wantsLockoutUpdateWithMutex(ctx context.Context) error if c.avoidLockout > 0 { return nil } - myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.MyUrl()) + myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.Url()) wantsLockoutUntil := time.Now().Add(c.config.LockoutDuration) pipe := c.Client.TxPipeline() initialDuration := c.config.LockoutDuration @@ -390,7 +390,7 @@ func (c *SeqCoordinator) chosenOneRelease(ctx context.Context) error { if err != nil { return err } - if current != c.config.MyUrl() { + if current != c.config.Url() { return nil } pipe := tx.TxPipeline() @@ -409,7 +409,7 @@ func (c *SeqCoordinator) chosenOneRelease(ctx context.Context) error { if errors.Is(readErr, redis.Nil) { return nil } - if current != c.config.MyUrl() { + if current != c.config.Url() { return nil } return releaseErr @@ -421,7 +421,7 @@ func (c *SeqCoordinator) wantsLockoutRelease(ctx context.Context) error { if !c.reportedWantsLockout { return nil } - myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.MyUrl()) + myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.Url()) releaseErr := c.Client.Del(ctx, myWantsLockoutKey).Err() if releaseErr != nil { // got error - was it still deleted? @@ -450,7 +450,7 @@ func (c *SeqCoordinator) noRedisError() time.Duration { // update for the prev known-chosen sequencer (no need to load new messages) func (c *SeqCoordinator) updateWithLockout(ctx context.Context, nextChosen string) time.Duration { - if nextChosen != "" && nextChosen != c.config.MyUrl() { + if nextChosen != "" && nextChosen != c.config.Url() { // was the active sequencer, but no longer // we maintain chosen status if we had it and nobody in the priorities wants the lockout setPrevChosenTo := nextChosen @@ -467,7 +467,7 @@ func (c *SeqCoordinator) updateWithLockout(ctx context.Context, nextChosen strin return c.retryAfterRedisError() } c.prevChosenSequencer = setPrevChosenTo - log.Info("released chosen-coordinator lock", "myUrl", c.config.MyUrl(), "nextChosen", nextChosen) + log.Info("released chosen-coordinator lock", "myUrl", c.config.Url(), "nextChosen", nextChosen) return c.noRedisError() } // Was, and still is, the active sequencer @@ -496,10 +496,10 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { log.Warn("coordinator failed finding sequencer wanting lockout", "err", err) return c.retryAfterRedisError() } - if c.prevChosenSequencer == c.config.MyUrl() { + if c.prevChosenSequencer == c.config.Url() { return c.updateWithLockout(ctx, chosenSeq) } - if chosenSeq != c.config.MyUrl() && chosenSeq != c.prevChosenSequencer { + if chosenSeq != c.config.Url() && chosenSeq != c.prevChosenSequencer { var err error if c.sequencer != nil { err = c.sequencer.ForwardTo(chosenSeq) @@ -526,8 +526,8 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { return c.retryAfterRedisError() } readUntil := remoteMsgCount - if readUntil > localMsgCount+c.config.MaxMsgPerPoll { - readUntil = localMsgCount + c.config.MaxMsgPerPoll + if readUntil > localMsgCount+c.config.MsgPerPoll { + readUntil = localMsgCount + c.config.MsgPerPoll } var messages []arbostypes.MessageWithMetadata msgToRead := localMsgCount @@ -599,7 +599,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { } } - if c.config.MyUrl() == redisutil.INVALID_URL { + if c.config.Url() == redisutil.INVALID_URL { return c.noRedisError() } @@ -614,7 +614,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { } // can take over as main sequencer? - if synced && localMsgCount >= remoteMsgCount && chosenSeq == c.config.MyUrl() { + if synced && localMsgCount >= remoteMsgCount && chosenSeq == c.config.Url() { if c.sequencer == nil { log.Error("myurl main sequencer, but no sequencer exists") return c.noRedisError() @@ -639,7 +639,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { c.prevChosenSequencer = "" return c.retryAfterRedisError() } - log.Info("caught chosen-coordinator lock", "myUrl", c.config.MyUrl()) + log.Info("caught chosen-coordinator lock", "myUrl", c.config.Url()) if c.delayedSequencer != nil { err = c.delayedSequencer.ForceSequenceDelayed(ctx) if err != nil { @@ -651,7 +651,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { log.Warn("failed to populate the feed backlog on lockout acquisition", "err", err) } c.sequencer.Activate() - c.prevChosenSequencer = c.config.MyUrl() + c.prevChosenSequencer = c.config.Url() return c.noRedisError() } } @@ -684,7 +684,7 @@ func (c *SeqCoordinator) AvoidingLockout() bool { func (c *SeqCoordinator) DebugPrint() string { c.wantsLockoutMutex.Lock() defer c.wantsLockoutMutex.Unlock() - return fmt.Sprint("Url:", c.config.MyUrl(), + return fmt.Sprint("Url:", c.config.Url(), " prevChosenSequencer:", c.prevChosenSequencer, " reportedWantsLockout:", c.reportedWantsLockout, " lockoutUntil:", c.lockoutUntil, @@ -760,7 +760,7 @@ func (c *SeqCoordinator) StopAndWait() { // We've just stopped our normal context so we need to use our parent's context. parentCtx := c.StopWaiter.GetParentContext() for i := 0; i <= c.config.ReleaseRetries || c.config.ReleaseRetries < 0; i++ { - log.Info("releasing wants lockout key", "myUrl", c.config.MyUrl(), "attempt", i) + log.Info("releasing wants lockout key", "myUrl", c.config.Url(), "attempt", i) err := c.wantsLockoutRelease(parentCtx) if err == nil { c.noRedisError() @@ -771,7 +771,7 @@ func (c *SeqCoordinator) StopAndWait() { } } for i := 0; i < c.config.ReleaseRetries || c.config.ReleaseRetries < 0; i++ { - log.Info("releasing chosen one", "myUrl", c.config.MyUrl(), "attempt", i) + log.Info("releasing chosen one", "myUrl", c.config.Url(), "attempt", i) err := c.chosenOneRelease(parentCtx) if err == nil { c.noRedisError() @@ -804,7 +804,7 @@ func (c *SeqCoordinator) AvoidLockout(ctx context.Context) bool { c.wantsLockoutMutex.Lock() c.avoidLockout++ c.wantsLockoutMutex.Unlock() - log.Info("avoiding lockout", "myUrl", c.config.MyUrl()) + log.Info("avoiding lockout", "myUrl", c.config.Url()) err := c.wantsLockoutRelease(ctx) if err != nil { log.Error("failed to release wanting the lockout in redis", "err", err) @@ -818,7 +818,7 @@ func (c *SeqCoordinator) TryToHandoffChosenOne(ctx context.Context) bool { ctx, cancel := context.WithTimeout(ctx, c.config.HandoffTimeout) defer cancel() if c.CurrentlyChosen() { - log.Info("waiting for another sequencer to become chosen...", "timeout", c.config.HandoffTimeout, "myUrl", c.config.MyUrl()) + log.Info("waiting for another sequencer to become chosen...", "timeout", c.config.HandoffTimeout, "myUrl", c.config.Url()) success := c.waitFor(ctx, func() bool { return !c.CurrentlyChosen() }) @@ -842,7 +842,7 @@ func (c *SeqCoordinator) SeekLockout(ctx context.Context) { c.wantsLockoutMutex.Lock() defer c.wantsLockoutMutex.Unlock() c.avoidLockout-- - log.Info("seeking lockout", "myUrl", c.config.MyUrl()) + log.Info("seeking lockout", "myUrl", c.config.Url()) if c.sync.Synced() { // Even if this errors we still internally marked ourselves as wanting the lockout err := c.wantsLockoutUpdateWithMutex(ctx) diff --git a/arbnode/seq_coordinator_atomic_test.go b/arbnode/seq_coordinator_atomic_test.go index 8cc0acadae..61468a3adb 100644 --- a/arbnode/seq_coordinator_atomic_test.go +++ b/arbnode/seq_coordinator_atomic_test.go @@ -69,7 +69,7 @@ func coordinatorTestThread(ctx context.Context, coord *SeqCoordinator, data *Coo timeLaunching := time.Now() // didn't sequence.. should we have succeeded? if timeLaunching.Before(holdingLockout) { - execError = fmt.Errorf("failed while holding lock %s err %w", coord.config.MyUrl(), err) + execError = fmt.Errorf("failed while holding lock %s err %w", coord.config.Url(), err) break } } @@ -79,9 +79,9 @@ func coordinatorTestThread(ctx context.Context, coord *SeqCoordinator, data *Coo continue } if data.sequencer[i] != "" { - execError = fmt.Errorf("two sequencers for same msg: submsg %d, success for %s, %s", i, data.sequencer[i], coord.config.MyUrl()) + execError = fmt.Errorf("two sequencers for same msg: submsg %d, success for %s, %s", i, data.sequencer[i], coord.config.Url()) } - data.sequencer[i] = coord.config.MyUrl() + data.sequencer[i] = coord.config.Url() } if execError != nil { data.err = execError @@ -99,16 +99,16 @@ func TestRedisSeqCoordinatorAtomic(t *testing.T) { coordConfig := TestSeqCoordinatorConfig coordConfig.LockoutDuration = time.Millisecond * 100 coordConfig.LockoutSpare = time.Millisecond * 10 - coordConfig.Signing.ECDSA.AcceptSequencer = false - coordConfig.Signing.SymmetricFallback = true - coordConfig.Signing.SymmetricSign = true - coordConfig.Signing.Symmetric.Dangerous.DisableSignatureVerification = true - coordConfig.Signing.Symmetric.SigningKey = "" + coordConfig.Signer.ECDSA.AcceptSequencer = false + coordConfig.Signer.SymmetricFallback = true + coordConfig.Signer.SymmetricSign = true + coordConfig.Signer.Symmetric.Dangerous.DisableSignatureVerification = true + coordConfig.Signer.Symmetric.SigningKey = "" testData := CoordinatorTestData{ testStartRound: -1, sequencer: make([]string, messagesPerRound), } - nullSigner, err := signature.NewSignVerify(&coordConfig.Signing, nil, nil) + nullSigner, err := signature.NewSignVerify(&coordConfig.Signer, nil, nil) Require(t, err) redisUrl := redisutil.CreateTestRedis(ctx, t) @@ -121,7 +121,7 @@ func TestRedisSeqCoordinatorAtomic(t *testing.T) { for i := 0; i < NumOfThreads; i++ { config := coordConfig - config.MyUrlImpl = fmt.Sprint(i) + config.MyUrl = fmt.Sprint(i) redisCoordinator, err := redisutil.NewRedisCoordinator(config.RedisUrl) Require(t, err) coordinator := &SeqCoordinator{ diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index a8db1923a9..0f514ba9ca 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -69,7 +69,7 @@ type TransactionStreamer struct { } type TransactionStreamerConfig struct { - MaxBroadcastQueueSize int `koanf:"max-broadcaster-queue-size"` + MaxBroadcasterQueueSize int `koanf:"max-broadcaster-queue-size"` MaxReorgResequenceDepth int64 `koanf:"max-reorg-resequence-depth" reload:"hot"` ExecuteMessageLoopDelay time.Duration `koanf:"execute-message-loop-delay" reload:"hot"` } @@ -77,19 +77,19 @@ type TransactionStreamerConfig struct { type TransactionStreamerConfigFetcher func() *TransactionStreamerConfig var DefaultTransactionStreamerConfig = TransactionStreamerConfig{ - MaxBroadcastQueueSize: 1024, + MaxBroadcasterQueueSize: 1024, MaxReorgResequenceDepth: 1024, ExecuteMessageLoopDelay: time.Millisecond * 100, } var TestTransactionStreamerConfig = TransactionStreamerConfig{ - MaxBroadcastQueueSize: 10_000, + MaxBroadcasterQueueSize: 10_000, MaxReorgResequenceDepth: 128 * 1024, ExecuteMessageLoopDelay: time.Millisecond, } func TransactionStreamerConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Int(prefix+".max-broadcaster-queue-size", DefaultTransactionStreamerConfig.MaxBroadcastQueueSize, "maximum cache of pending broadcaster messages") + f.Int(prefix+".max-broadcaster-queue-size", DefaultTransactionStreamerConfig.MaxBroadcasterQueueSize, "maximum cache of pending broadcaster messages") f.Int64(prefix+".max-reorg-resequence-depth", DefaultTransactionStreamerConfig.MaxReorgResequenceDepth, "maximum number of messages to attempt to resequence on reorg (0 = never resequence, -1 = always resequence)") f.Duration(prefix+".execute-message-loop-delay", DefaultTransactionStreamerConfig.ExecuteMessageLoopDelay, "delay when polling calls to execute messages") } @@ -479,7 +479,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*broadcaster.B s.broadcasterQueuedMessagesActiveReorg = feedReorg } else if broadcasterQueuedMessagesPos+arbutil.MessageIndex(len(s.broadcasterQueuedMessages)) == broadcastStartPos { // Feed messages can be added directly to end of cache - maxQueueSize := s.config().MaxBroadcastQueueSize + maxQueueSize := s.config().MaxBroadcasterQueueSize if maxQueueSize == 0 || len(s.broadcasterQueuedMessages) <= maxQueueSize { s.broadcasterQueuedMessages = append(s.broadcasterQueuedMessages, messages...) } diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index f78ef2aa9f..9bfee17a5a 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -68,8 +68,8 @@ type Config struct { RequireChainId bool `koanf:"require-chain-id" reload:"hot"` RequireFeedVersion bool `koanf:"require-feed-version" reload:"hot"` Timeout time.Duration `koanf:"timeout" reload:"hot"` - URLs []string `koanf:"url"` - Verifier signature.VerifierConfig `koanf:"verify"` + URLs []string `koanf:"urls"` + Verifier signature.VerifierConfig `koanf:"verifier"` EnableCompression bool `koanf:"enable-compression" reload:"hot"` } @@ -85,8 +85,8 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".require-chain-id", DefaultConfig.RequireChainId, "require chain id to be present on connect") f.Bool(prefix+".require-feed-version", DefaultConfig.RequireFeedVersion, "require feed version to be present on connect") f.Duration(prefix+".timeout", DefaultConfig.Timeout, "duration to wait before timing out connection to sequencer feed") - f.StringSlice(prefix+".url", DefaultConfig.URLs, "URL of sequencer feed source") - signature.FeedVerifierConfigAddOptions(prefix+".verify", f) + f.StringSlice(prefix+".urls", DefaultConfig.URLs, "URL of sequencer feed source") + signature.FeedVerifierConfigAddOptions(prefix+".verifier", f) f.Bool(prefix+".enable-compression", DefaultConfig.EnableCompression, "enable per message deflate compression support") } diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index 54b6176f96..4eed9678ad 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -12,7 +12,7 @@ import ( ) type L1Config struct { - ChainID uint64 `koanf:"id"` + ChainID uint64 `koanf:"chain-id"` Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` Wallet genericconf.WalletConfig `koanf:"wallet"` } @@ -32,7 +32,7 @@ var L1ConfigDefault = L1Config{ var DefaultL1WalletConfig = genericconf.WalletConfig{ Pathname: "wallet", - PasswordImpl: genericconf.WalletConfigDefault.PasswordImpl, + Password: genericconf.WalletConfigDefault.Password, PrivateKey: genericconf.WalletConfigDefault.PrivateKey, Account: genericconf.WalletConfigDefault.Account, OnlyCreateKey: genericconf.WalletConfigDefault.OnlyCreateKey, @@ -53,35 +53,35 @@ func (c *L1Config) Validate() error { } type L2Config struct { - ChainID uint64 `koanf:"id"` - ChainName string `koanf:"name"` - ChainInfoFiles []string `koanf:"info-files"` - ChainInfoJson string `koanf:"info-json"` - DevWallet genericconf.WalletConfig `koanf:"dev-wallet"` - ChainInfoIpfsUrl string `koanf:"info-ipfs-url"` - ChainInfoIpfsDownloadPath string `koanf:"info-ipfs-download-path"` + ID uint64 `koanf:"id"` + Name string `koanf:"name"` + InfoFiles []string `koanf:"info-files"` + InfoJson string `koanf:"info-json"` + DevWallet genericconf.WalletConfig `koanf:"dev-wallet"` + InfoIpfsUrl string `koanf:"info-ipfs-url"` + InfoIpfsDownloadPath string `koanf:"info-ipfs-download-path"` } var L2ConfigDefault = L2Config{ - ChainID: 0, - ChainName: "", - ChainInfoFiles: []string{}, // Default file used is chaininfo/arbitrum_chain_info.json, stored in DefaultChainInfo in chain_info.go - ChainInfoJson: "", - DevWallet: genericconf.WalletConfigDefault, - ChainInfoIpfsUrl: "", - ChainInfoIpfsDownloadPath: "/tmp/", + ID: 0, + Name: "", + InfoFiles: []string{}, // Default file used is chaininfo/arbitrum_chain_info.json, stored in DefaultChainInfo in chain_info.go + InfoJson: "", + DevWallet: genericconf.WalletConfigDefault, + InfoIpfsUrl: "", + InfoIpfsDownloadPath: "/tmp/", } func L2ConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".id", L2ConfigDefault.ChainID, "L2 chain ID (determines Arbitrum network)") - f.String(prefix+".name", L2ConfigDefault.ChainName, "L2 chain name (determines Arbitrum network)") - f.StringSlice(prefix+".info-files", L2ConfigDefault.ChainInfoFiles, "L2 chain info json files") - f.String(prefix+".info-json", L2ConfigDefault.ChainInfoJson, "L2 chain info in json string format") + f.Uint64(prefix+".id", L2ConfigDefault.ID, "L2 chain ID (determines Arbitrum network)") + f.String(prefix+".name", L2ConfigDefault.Name, "L2 chain name (determines Arbitrum network)") + f.StringSlice(prefix+".info-files", L2ConfigDefault.InfoFiles, "L2 chain info json files") + f.String(prefix+".info-json", L2ConfigDefault.InfoJson, "L2 chain info in json string format") // Dev wallet does not exist unless specified genericconf.WalletConfigAddOptions(prefix+".dev-wallet", f, "") - f.String(prefix+".info-ipfs-url", L2ConfigDefault.ChainInfoIpfsUrl, "url to download chain info file") - f.String(prefix+".info-ipfs-download-path", L2ConfigDefault.ChainInfoIpfsDownloadPath, "path to save temp downloaded file") + f.String(prefix+".info-ipfs-url", L2ConfigDefault.InfoIpfsUrl, "url to download chain info file") + f.String(prefix+".info-ipfs-download-path", L2ConfigDefault.InfoIpfsDownloadPath, "path to save temp downloaded file") } diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index ba0451b0e0..7cdfc39915 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -38,10 +38,10 @@ type DAServerConfig struct { RESTPort uint64 `koanf:"rest-port"` RESTServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"rest-server-timeouts"` - DAConf das.DataAvailabilityConfig `koanf:"data-availability"` + DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` - LogLevel int `koanf:"log-level"` + Conf genericconf.ConfConfig `koanf:"conf"` + LogLevel int `koanf:"log-level"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` @@ -58,8 +58,8 @@ var DefaultDAServerConfig = DAServerConfig{ RESTAddr: "localhost", RESTPort: 9877, RESTServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, - DAConf: das.DefaultDataAvailabilityConfig, - ConfConfig: genericconf.ConfConfigDefault, + DataAvailability: das.DefaultDataAvailabilityConfig, + Conf: genericconf.ConfConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, PProf: false, @@ -109,7 +109,7 @@ func parseDAServer(args []string) (*DAServerConfig, error) { if err := confighelpers.EndCommonParse(k, &serverConfig); err != nil { return nil, err } - if serverConfig.ConfConfig.Dump { + if serverConfig.Conf.Dump { err = confighelpers.DumpConfig(k, map[string]interface{}{ "data-availability.key.priv-key": "", }) @@ -191,8 +191,8 @@ func startup() error { defer cancel() var l1Reader *headerreader.HeaderReader - if serverConfig.DAConf.L1NodeURL != "" && serverConfig.DAConf.L1NodeURL != "none" { - l1Client, err := das.GetL1Client(ctx, serverConfig.DAConf.L1ConnectionAttempts, serverConfig.DAConf.L1NodeURL) + if serverConfig.DataAvailability.ParentChainNodeURL != "" && serverConfig.DataAvailability.ParentChainNodeURL != "none" { + l1Client, err := das.GetL1Client(ctx, serverConfig.DataAvailability.ParentChainConnectionAttempts, serverConfig.DataAvailability.ParentChainNodeURL) if err != nil { return err } @@ -203,10 +203,10 @@ func startup() error { } var seqInboxAddress *common.Address - if serverConfig.DAConf.SequencerInboxAddress == "none" { + if serverConfig.DataAvailability.SequencerInboxAddress == "none" { seqInboxAddress = nil - } else if len(serverConfig.DAConf.SequencerInboxAddress) > 0 { - seqInboxAddress, err = das.OptionalAddressFromString(serverConfig.DAConf.SequencerInboxAddress) + } else if len(serverConfig.DataAvailability.SequencerInboxAddress) > 0 { + seqInboxAddress, err = das.OptionalAddressFromString(serverConfig.DataAvailability.SequencerInboxAddress) if err != nil { return err } @@ -217,7 +217,7 @@ func startup() error { return errors.New("sequencer-inbox-address must be set to a valid L1 URL and contract address, or 'none'") } - daReader, daWriter, daHealthChecker, dasLifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig.DAConf, l1Reader, seqInboxAddress) + daReader, daWriter, daHealthChecker, dasLifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig.DataAvailability, l1Reader, seqInboxAddress) if err != nil { return err } diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 6f975ec712..1de85037ee 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -91,7 +91,7 @@ type ClientStoreConfig struct { SigningKey string `koanf:"signing-key"` SigningWallet string `koanf:"signing-wallet"` SigningWalletPassword string `koanf:"signing-wallet-password"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` + Conf genericconf.ConfConfig `koanf:"conf"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -151,7 +151,7 @@ func startClientStore(args []string) error { } else if config.SigningWallet != "" { walletConf := &genericconf.WalletConfig{ Pathname: config.SigningWallet, - PasswordImpl: config.SigningWalletPassword, + Password: config.SigningWalletPassword, PrivateKey: "", Account: "", OnlyCreateKey: false, @@ -196,9 +196,9 @@ func startClientStore(args []string) error { // datool client rest getbyhash type RESTClientGetByHashConfig struct { - URL string `koanf:"url"` - DataHash string `koanf:"data-hash"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` + URL string `koanf:"url"` + DataHash string `koanf:"data-hash"` + Conf genericconf.ConfConfig `koanf:"conf"` } func parseRESTClientGetByHashConfig(args []string) (*RESTClientGetByHashConfig, error) { @@ -257,10 +257,12 @@ func startRESTClientGetByHash(args []string) error { // das keygen type KeyGenConfig struct { - Dir string - ConfConfig genericconf.ConfConfig `koanf:"conf"` - ECDSAMode bool `koanf:"ecdsa"` - WalletMode bool `koanf:"wallet"` + Dir string + Conf genericconf.ConfConfig `koanf:"conf"` + // ECDSA mode. + ECDSA bool `koanf:"ecdsa"` + // Wallet mode. + Wallet bool `koanf:"wallet"` } func parseKeyGenConfig(args []string) (*KeyGenConfig, error) { @@ -288,18 +290,18 @@ func startKeyGen(args []string) error { return err } - if !config.ECDSAMode { + if !config.ECDSA { _, _, err = das.GenerateAndStoreKeys(config.Dir) if err != nil { return err } return nil - } else if !config.WalletMode { + } else if !config.Wallet { return das.GenerateAndStoreECDSAKeys(config.Dir) } else { walletConf := &genericconf.WalletConfig{ Pathname: config.Dir, - PasswordImpl: genericconf.PASSWORD_NOT_SET, // This causes a prompt for the password + Password: genericconf.PASSWORD_NOT_SET, // This causes a prompt for the password PrivateKey: "", Account: "", OnlyCreateKey: true, @@ -333,7 +335,7 @@ func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { return nil, err } - if config.ConfConfig.Dump { + if config.Conf.Dump { c, err := k.Marshal(koanfjson.Parser()) if err != nil { return nil, fmt.Errorf("unable to marshal config file to JSON: %w", err) @@ -343,10 +345,10 @@ func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { os.Exit(0) } - if config.KeysetConfig.AssumedHonest == 0 { + if config.Keyset.AssumedHonest == 0 { return nil, errors.New("--keyset.assumed-honest must be set") } - if config.KeysetConfig.Backends == "" { + if config.Keyset.Backends == "" { return nil, errors.New("--keyset.backends must be set") } @@ -356,8 +358,8 @@ func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { // das keygen type DumpKeysetConfig struct { - KeysetConfig das.AggregatorConfig `koanf:"keyset"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` + Keyset das.AggregatorConfig `koanf:"keyset"` + Conf genericconf.ConfConfig `koanf:"conf"` } func dumpKeyset(args []string) error { @@ -366,12 +368,12 @@ func dumpKeyset(args []string) error { return err } - services, err := das.ParseServices(config.KeysetConfig) + services, err := das.ParseServices(config.Keyset) if err != nil { return err } - keysetHash, keysetBytes, err := das.KeysetHashFromServices(services, uint64(config.KeysetConfig.AssumedHonest)) + keysetHash, keysetBytes, err := das.KeysetHashFromServices(services, uint64(config.Keyset.AssumedHonest)) if err != nil { return err } diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 43906eb98e..357fda14e6 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -64,10 +64,10 @@ func main() { } wallet := genericconf.WalletConfig{ - Pathname: *l1keystore, - Account: *deployAccount, - PasswordImpl: *l1passphrase, - PrivateKey: *l1privatekey, + Pathname: *l1keystore, + Account: *deployAccount, + Password: *l1passphrase, + PrivateKey: *l1privatekey, } l1TransactionOpts, _, err := util.OpenWallet("l1", &wallet, l1ChainId) if err != nil { diff --git a/cmd/genericconf/wallet.go b/cmd/genericconf/wallet.go index 6e6f30e0c5..e05452e3b3 100644 --- a/cmd/genericconf/wallet.go +++ b/cmd/genericconf/wallet.go @@ -14,22 +14,22 @@ const PASSWORD_NOT_SET = "PASSWORD_NOT_SET" type WalletConfig struct { Pathname string `koanf:"pathname"` - PasswordImpl string `koanf:"password"` + Password string `koanf:"password"` PrivateKey string `koanf:"private-key"` Account string `koanf:"account"` OnlyCreateKey bool `koanf:"only-create-key"` } -func (w *WalletConfig) Password() *string { - if w.PasswordImpl == PASSWORD_NOT_SET { +func (w *WalletConfig) Pwd() *string { + if w.Password == PASSWORD_NOT_SET { return nil } - return &w.PasswordImpl + return &w.Password } var WalletConfigDefault = WalletConfig{ Pathname: "", - PasswordImpl: PASSWORD_NOT_SET, + Password: PASSWORD_NOT_SET, PrivateKey: "", Account: "", OnlyCreateKey: false, @@ -37,7 +37,7 @@ var WalletConfigDefault = WalletConfig{ func WalletConfigAddOptions(prefix string, f *flag.FlagSet, defaultPathname string) { f.String(prefix+".pathname", defaultPathname, "pathname for wallet") - f.String(prefix+".password", WalletConfigDefault.PasswordImpl, "wallet passphrase") + f.String(prefix+".password", WalletConfigDefault.Password, "wallet passphrase") f.String(prefix+".private-key", WalletConfigDefault.PrivateKey, "private key for wallet") f.String(prefix+".account", WalletConfigDefault.Account, "account to use (default is first account in keystore)") f.Bool(prefix+".only-create-key", WalletConfigDefault.OnlyCreateKey, "if true, creates new key then exits") diff --git a/cmd/nitro-val/config.go b/cmd/nitro-val/config.go index 12a359cfa4..cf10787d6d 100644 --- a/cmd/nitro-val/config.go +++ b/cmd/nitro-val/config.go @@ -27,7 +27,7 @@ type ValidationNodeConfig struct { HTTP genericconf.HTTPConfig `koanf:"http"` WS genericconf.WSConfig `koanf:"ws"` IPC genericconf.IPCConfig `koanf:"ipc"` - AuthRPC genericconf.AuthRPCConfig `koanf:"auth"` + Auth genericconf.AuthRPCConfig `koanf:"auth"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` PProf bool `koanf:"pprof"` @@ -66,7 +66,7 @@ var ValidationNodeConfigDefault = ValidationNodeConfig{ HTTP: HTTPConfigDefault, WS: WSConfigDefault, IPC: IPCConfigDefault, - AuthRPC: genericconf.AuthRPCConfigDefault, + Auth: genericconf.AuthRPCConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, PProf: false, diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index 503b016025..a461a36900 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -68,7 +68,7 @@ func mainImpl() int { stackConf.DataDir = "" // ephemeral nodeConfig.HTTP.Apply(&stackConf) nodeConfig.WS.Apply(&stackConf) - nodeConfig.AuthRPC.Apply(&stackConf) + nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) stackConf.P2P.ListenAddr = "" stackConf.P2P.NoDial = true diff --git a/cmd/nitro/config_test.go b/cmd/nitro/config_test.go index dad91ef936..67cd6ac055 100644 --- a/cmd/nitro/config_test.go +++ b/cmd/nitro/config_test.go @@ -85,7 +85,7 @@ func TestReloads(t *testing.T) { // check that non-reloadable fields fail assignment update.Metrics = !update.Metrics testUnsafe() - update.L2.ChainID++ + update.Chain.ID++ testUnsafe() update.Node.Sequencer.Forwarder.ConnectionTimeout++ testUnsafe() @@ -122,7 +122,7 @@ func TestLiveNodeConfig(t *testing.T) { // check that an invalid reload gets rejected update = config.ShallowClone() - update.L2.ChainID++ + update.Chain.ID++ if liveConfig.Set(update) == nil { Fail(t, "failed to reject invalid update") } diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index a1df2cbb2f..6480526897 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -51,7 +51,7 @@ type InitConfig struct { DownloadPath string `koanf:"download-path"` DownloadPoll time.Duration `koanf:"download-poll"` DevInit bool `koanf:"dev-init"` - DevInitAddr string `koanf:"dev-init-address"` + DevInitAddress string `koanf:"dev-init-address"` DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` Empty bool `koanf:"empty"` AccountsPerSync uint `koanf:"accounts-per-sync"` @@ -59,7 +59,7 @@ type InitConfig struct { ThenQuit bool `koanf:"then-quit"` Prune string `koanf:"prune"` PruneBloomSize uint64 `koanf:"prune-bloom-size"` - ResetToMsg int64 `koanf:"reset-to-message"` + ResetToMessage int64 `koanf:"reset-to-message"` } var InitConfigDefault = InitConfig{ @@ -68,14 +68,14 @@ var InitConfigDefault = InitConfig{ DownloadPath: "/tmp/", DownloadPoll: time.Minute, DevInit: false, - DevInitAddr: "", + DevInitAddress: "", DevInitBlockNum: 0, ImportFile: "", AccountsPerSync: 100000, ThenQuit: false, Prune: "", PruneBloomSize: 2048, - ResetToMsg: -1, + ResetToMessage: -1, } func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { @@ -84,7 +84,7 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".download-path", InitConfigDefault.DownloadPath, "path to save temp downloaded file") f.Duration(prefix+".download-poll", InitConfigDefault.DownloadPoll, "how long to wait between polling attempts") f.Bool(prefix+".dev-init", InitConfigDefault.DevInit, "init with dev data (1 account with balance) instead of file import") - f.String(prefix+".dev-init-address", InitConfigDefault.DevInitAddr, "Address of dev-account. Leave empty to use the dev-wallet.") + f.String(prefix+".dev-init-address", InitConfigDefault.DevInitAddress, "Address of dev-account. Leave empty to use the dev-wallet.") f.Uint64(prefix+".dev-init-blocknum", InitConfigDefault.DevInitBlockNum, "Number of preinit blocks. Must exist in ancient database.") f.Bool(prefix+".empty", InitConfigDefault.DevInit, "init with empty state") f.Bool(prefix+".then-quit", InitConfigDefault.ThenQuit, "quit after init is done") @@ -92,7 +92,7 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Uint(prefix+".accounts-per-sync", InitConfigDefault.AccountsPerSync, "during init - sync database every X accounts. Lower value for low-memory systems. 0 disables.") f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") - f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMsg, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") + f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") } func downloadInit(ctx context.Context, initConfig *InitConfig) (string, error) { @@ -515,7 +515,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo NextBlockNumber: config.Init.DevInitBlockNum, Accounts: []statetransfer.AccountInitializationInfo{ { - Addr: common.HexToAddress(config.Init.DevInitAddr), + Addr: common.HexToAddress(config.Init.DevInitAddress), EthBalance: new(big.Int).Mul(big.NewInt(params.Ether), big.NewInt(1000)), Nonce: 0, }, @@ -551,15 +551,15 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, nil, err } - combinedL2ChainInfoFiles := config.L2.ChainInfoFiles - if config.L2.ChainInfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, config.L2.ChainInfoIpfsUrl, config.L2.ChainInfoIpfsDownloadPath) + combinedL2ChainInfoFiles := config.Chain.InfoFiles + if config.Chain.InfoIpfsUrl != "" { + l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, config.Chain.InfoIpfsUrl, config.Chain.InfoIpfsDownloadPath) if err != nil { log.Error("error getting l2 chain info file from ipfs", "err", err) } combinedL2ChainInfoFiles = append(combinedL2ChainInfoFiles, l2ChainInfoIpfsFile) } - chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.L2.ChainID), config.L2.ChainName, genesisBlockNr, combinedL2ChainInfoFiles, config.L2.ChainInfoJson) + chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.Chain.ID), config.Chain.Name, genesisBlockNr, combinedL2ChainInfoFiles, config.Chain.InfoJson) if err != nil { return chainDb, nil, err } @@ -584,7 +584,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo cacheConfig.SnapshotWait = true } var parsedInitMessage *arbostypes.ParsedInitMessage - if config.Node.L1Reader.Enable { + if config.Node.ParentChainReader.Enable { delayedBridge, err := arbnode.NewDelayedBridge(l1Client, rollupAddrs.Bridge, rollupAddrs.DeployedAt) if err != nil { return chainDb, nil, fmt.Errorf("failed creating delayed bridge while attempting to get serialized chain config from init message: %w", err) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index e7e6d2075c..9e9bc1362e 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -87,21 +87,21 @@ func addUnlockWallet(accountManager *accounts.Manager, walletConf *genericconf.W account.Address = common.HexToAddress(walletConf.Account) account, err = myKeystore.Find(account) } else { - if walletConf.Password() == nil { + if walletConf.Pwd() == nil { return common.Address{}, errors.New("l2 password not set") } if devPrivKey == nil { return common.Address{}, errors.New("l2 private key not set") } - account, err = myKeystore.ImportECDSA(devPrivKey, *walletConf.Password()) + account, err = myKeystore.ImportECDSA(devPrivKey, *walletConf.Pwd()) } if err != nil { return common.Address{}, err } - if walletConf.Password() == nil { + if walletConf.Pwd() == nil { return common.Address{}, errors.New("l2 password not set") } - err = myKeystore.Unlock(account, *walletConf.Password()) + err = myKeystore.Unlock(account, *walletConf.Pwd()) if err != nil { return common.Address{}, err } @@ -159,7 +159,7 @@ func mainImpl() int { stackConf.DataDir = nodeConfig.Persistent.Chain nodeConfig.HTTP.Apply(&stackConf) nodeConfig.WS.Apply(&stackConf) - nodeConfig.AuthRPC.Apply(&stackConf) + nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) nodeConfig.GraphQL.Apply(&stackConf) if nodeConfig.WS.ExposeAll { @@ -207,23 +207,23 @@ func mainImpl() int { log.Info("Running Arbitrum nitro node", "revision", vcsRevision, "vcs.time", vcsTime) if nodeConfig.Node.Dangerous.NoL1Listener { - nodeConfig.Node.L1Reader.Enable = false + nodeConfig.Node.ParentChainReader.Enable = false nodeConfig.Node.BatchPoster.Enable = false nodeConfig.Node.DelayedSequencer.Enable = false } else { - nodeConfig.Node.L1Reader.Enable = true + nodeConfig.Node.ParentChainReader.Enable = true } if nodeConfig.Node.Sequencer.Enable { - if nodeConfig.Node.ForwardingTarget() != "" { + if nodeConfig.Node.ForwardingTargetF() != "" { flag.Usage() log.Crit("forwarding-target cannot be set when sequencer is enabled") } - if nodeConfig.Node.L1Reader.Enable && nodeConfig.Node.InboxReader.HardReorg { + if nodeConfig.Node.ParentChainReader.Enable && nodeConfig.Node.InboxReader.HardReorg { flag.Usage() log.Crit("hard reorgs cannot safely be enabled with sequencer mode enabled") } - } else if nodeConfig.Node.ForwardingTargetImpl == "" { + } else if nodeConfig.Node.ForwardingTarget == "" { flag.Usage() log.Crit("forwarding-target unset, and not sequencer (can set to \"null\" to disable forwarding)") } @@ -239,17 +239,17 @@ func mainImpl() int { defaultL1WalletConfig := conf.DefaultL1WalletConfig defaultL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - nodeConfig.Node.Staker.L1Wallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) + nodeConfig.Node.Staker.ParentChainWallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) defaultValidatorL1WalletConfig := staker.DefaultValidatorL1WalletConfig defaultValidatorL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - nodeConfig.Node.BatchPoster.L1Wallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) + nodeConfig.Node.BatchPoster.ParentChainWallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) defaultBatchPosterL1WalletConfig := arbnode.DefaultBatchPosterL1WalletConfig defaultBatchPosterL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - if nodeConfig.Node.Staker.L1Wallet == defaultValidatorL1WalletConfig && nodeConfig.Node.BatchPoster.L1Wallet == defaultBatchPosterL1WalletConfig { + if nodeConfig.Node.Staker.ParentChainWallet == defaultValidatorL1WalletConfig && nodeConfig.Node.BatchPoster.ParentChainWallet == defaultBatchPosterL1WalletConfig { if sequencerNeedsKey || validatorNeedsKey || l1Wallet.OnlyCreateKey { - l1TransactionOpts, dataSigner, err = util.OpenWallet("l1", l1Wallet, new(big.Int).SetUint64(nodeConfig.L1.ChainID)) + l1TransactionOpts, dataSigner, err = util.OpenWallet("l1", l1Wallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ChainID)) if err != nil { flag.Usage() log.Crit("error opening parent chain wallet", "path", l1Wallet.Pathname, "account", l1Wallet.Account, "err", err) @@ -264,31 +264,31 @@ func mainImpl() int { if *l1Wallet != defaultL1WalletConfig { log.Crit("--parent-chain.wallet cannot be set if either --node.staker.l1-wallet or --node.batch-poster.l1-wallet are set") } - if sequencerNeedsKey || nodeConfig.Node.BatchPoster.L1Wallet.OnlyCreateKey { - l1TransactionOptsBatchPoster, dataSigner, err = util.OpenWallet("l1-batch-poster", &nodeConfig.Node.BatchPoster.L1Wallet, new(big.Int).SetUint64(nodeConfig.L1.ChainID)) + if sequencerNeedsKey || nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { + l1TransactionOptsBatchPoster, dataSigner, err = util.OpenWallet("l1-batch-poster", &nodeConfig.Node.BatchPoster.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ChainID)) if err != nil { flag.Usage() - log.Crit("error opening Batch poster parent chain wallet", "path", nodeConfig.Node.BatchPoster.L1Wallet.Pathname, "account", nodeConfig.Node.BatchPoster.L1Wallet.Account, "err", err) + log.Crit("error opening Batch poster parent chain wallet", "path", nodeConfig.Node.BatchPoster.ParentChainWallet.Pathname, "account", nodeConfig.Node.BatchPoster.ParentChainWallet.Account, "err", err) } - if nodeConfig.Node.BatchPoster.L1Wallet.OnlyCreateKey { + if nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { return 0 } } - if validatorNeedsKey || nodeConfig.Node.Staker.L1Wallet.OnlyCreateKey { - l1TransactionOptsValidator, _, err = util.OpenWallet("l1-validator", &nodeConfig.Node.Staker.L1Wallet, new(big.Int).SetUint64(nodeConfig.L1.ChainID)) + if validatorNeedsKey || nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { + l1TransactionOptsValidator, _, err = util.OpenWallet("l1-validator", &nodeConfig.Node.Staker.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ChainID)) if err != nil { flag.Usage() - log.Crit("error opening Validator parent chain wallet", "path", nodeConfig.Node.Staker.L1Wallet.Pathname, "account", nodeConfig.Node.Staker.L1Wallet.Account, "err", err) + log.Crit("error opening Validator parent chain wallet", "path", nodeConfig.Node.Staker.ParentChainWallet.Pathname, "account", nodeConfig.Node.Staker.ParentChainWallet.Account, "err", err) } - if nodeConfig.Node.Staker.L1Wallet.OnlyCreateKey { + if nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { return 0 } } } - combinedL2ChainInfoFile := nodeConfig.L2.ChainInfoFiles - if nodeConfig.L2.ChainInfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, nodeConfig.L2.ChainInfoIpfsUrl, nodeConfig.L2.ChainInfoIpfsDownloadPath) + combinedL2ChainInfoFile := nodeConfig.Chain.InfoFiles + if nodeConfig.Chain.InfoIpfsUrl != "" { + l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, nodeConfig.Chain.InfoIpfsUrl, nodeConfig.Chain.InfoIpfsDownloadPath) if err != nil { log.Error("error getting chain info file from ipfs", "err", err) } @@ -296,7 +296,7 @@ func mainImpl() int { } if nodeConfig.Node.Staker.Enable { - if !nodeConfig.Node.L1Reader.Enable { + if !nodeConfig.Node.ParentChainReader.Enable { flag.Usage() log.Crit("validator must have the parent chain reader enabled") } @@ -316,8 +316,8 @@ func mainImpl() int { var rollupAddrs chaininfo.RollupAddresses var l1Client *ethclient.Client - if nodeConfig.Node.L1Reader.Enable { - confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().L1.Connection } + if nodeConfig.Node.ParentChainReader.Enable { + confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().ParentChain.Connection } rpcClient := rpcclient.NewRpcClient(confFetcher, nil) err := rpcClient.Start(ctx) if err != nil { @@ -328,13 +328,13 @@ func mainImpl() int { if err != nil { log.Crit("couldn't read L1 chainid", "err", err) } - if l1ChainId.Uint64() != nodeConfig.L1.ChainID { - log.Crit("L1 chainID doesn't fit config", "found", l1ChainId.Uint64(), "expected", nodeConfig.L1.ChainID) + if l1ChainId.Uint64() != nodeConfig.ParentChain.ChainID { + log.Crit("L1 chainID doesn't fit config", "found", l1ChainId.Uint64(), "expected", nodeConfig.ParentChain.ChainID) } - log.Info("connected to l1 chain", "l1url", nodeConfig.L1.Connection.URL, "l1chainid", nodeConfig.L1.ChainID) + log.Info("connected to l1 chain", "l1url", nodeConfig.ParentChain.Connection.URL, "l1chainid", nodeConfig.ParentChain.ChainID) - rollupAddrs, err = chaininfo.GetRollupAddressesConfig(nodeConfig.L2.ChainID, nodeConfig.L2.ChainName, combinedL2ChainInfoFile, nodeConfig.L2.ChainInfoJson) + rollupAddrs, err = chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) if err != nil { log.Crit("error getting rollup addresses", "err", err) } @@ -345,14 +345,14 @@ func mainImpl() int { flag.Usage() log.Crit("--node.validator.only-create-wallet-contract requires --node.validator.use-smart-contract-wallet") } - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.L1Reader }) + l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }) if err != nil { log.Crit("failed to get L1 headerreader", "error", err) } // Just create validator smart wallet if needed then exit - deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.L2.ChainID, nodeConfig.L2.ChainName, combinedL2ChainInfoFile, nodeConfig.L2.ChainInfoJson) + deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) if err != nil { log.Crit("error getting rollup addresses config", "err", err) } @@ -388,7 +388,7 @@ func mainImpl() int { log.Crit("error opening L2 dev wallet", "err", err) } if devAddr != (common.Address{}) { - nodeConfig.Init.DevInitAddr = devAddr.String() + nodeConfig.Init.DevInitAddress = devAddr.String() } } @@ -404,7 +404,7 @@ func mainImpl() int { } }() - chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.L2.ChainID), execution.DefaultCacheConfigFor(stack, &nodeConfig.Node.Caching), l1Client, rollupAddrs) + chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), execution.DefaultCacheConfigFor(stack, &nodeConfig.Node.Caching), l1Client, rollupAddrs) if l2BlockChain != nil { deferFuncs = append(deferFuncs, func() { l2BlockChain.Stop() }) } @@ -422,7 +422,7 @@ func mainImpl() int { return 1 } - if nodeConfig.Init.ThenQuit && nodeConfig.Init.ResetToMsg < 0 { + if nodeConfig.Init.ThenQuit && nodeConfig.Init.ResetToMessage < 0 { return 0 } @@ -517,8 +517,8 @@ func mainImpl() int { exitCode := 0 - if err == nil && nodeConfig.Init.ResetToMsg > 0 { - err = currentNode.TxStreamer.ReorgTo(arbutil.MessageIndex(nodeConfig.Init.ResetToMsg)) + if err == nil && nodeConfig.Init.ResetToMessage > 0 { + err = currentNode.TxStreamer.ReorgTo(arbutil.MessageIndex(nodeConfig.Init.ResetToMessage)) if err != nil { fatalErrChan <- fmt.Errorf("error reseting message: %w", err) exitCode = 1 @@ -549,8 +549,8 @@ type NodeConfig struct { Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` Node arbnode.Config `koanf:"node" reload:"hot"` Validation valnode.Config `koanf:"validation" reload:"hot"` - L1 conf.L1Config `koanf:"parent-chain" reload:"hot"` - L2 conf.L2Config `koanf:"chain"` + ParentChain conf.L1Config `koanf:"parent-chain" reload:"hot"` + Chain conf.L2Config `koanf:"chain"` LogLevel int `koanf:"log-level" reload:"hot"` LogType string `koanf:"log-type" reload:"hot"` FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` @@ -558,7 +558,7 @@ type NodeConfig struct { HTTP genericconf.HTTPConfig `koanf:"http"` WS genericconf.WSConfig `koanf:"ws"` IPC genericconf.IPCConfig `koanf:"ipc"` - AuthRPC genericconf.AuthRPCConfig `koanf:"auth"` + Auth genericconf.AuthRPCConfig `koanf:"auth"` GraphQL genericconf.GraphQLConfig `koanf:"graphql"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` @@ -571,8 +571,8 @@ type NodeConfig struct { var NodeConfigDefault = NodeConfig{ Conf: genericconf.ConfConfigDefault, Node: arbnode.ConfigDefault, - L1: conf.L1ConfigDefault, - L2: conf.L2ConfigDefault, + ParentChain: conf.L1ConfigDefault, + Chain: conf.L2ConfigDefault, LogLevel: int(log.LvlInfo), LogType: "plaintext", Persistent: conf.PersistentConfigDefault, @@ -614,8 +614,8 @@ func (c *NodeConfig) ResolveDirectoryNames() error { if err != nil { return err } - c.L1.ResolveDirectoryNames(c.Persistent.Chain) - c.L2.ResolveDirectoryNames(c.Persistent.Chain) + c.ParentChain.ResolveDirectoryNames(c.Persistent.Chain) + c.Chain.ResolveDirectoryNames(c.Persistent.Chain) return nil } @@ -659,7 +659,7 @@ func (c *NodeConfig) CanReload(new *NodeConfig) error { } func (c *NodeConfig) Validate() error { - if err := c.L1.Validate(); err != nil { + if err := c.ParentChain.Validate(); err != nil { return err } return c.Node.Validate() @@ -733,10 +733,10 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa } // Don't pass around wallet contents with normal configuration - l1Wallet := nodeConfig.L1.Wallet - l2DevWallet := nodeConfig.L2.DevWallet - nodeConfig.L1.Wallet = genericconf.WalletConfigDefault - nodeConfig.L2.DevWallet = genericconf.WalletConfigDefault + l1Wallet := nodeConfig.ParentChain.Wallet + l2DevWallet := nodeConfig.Chain.DevWallet + nodeConfig.ParentChain.Wallet = genericconf.WalletConfigDefault + nodeConfig.Chain.DevWallet = genericconf.WalletConfigDefault err = nodeConfig.Validate() if err != nil { diff --git a/cmd/relay/relay.go b/cmd/relay/relay.go index 2bff942a44..0105f94138 100644 --- a/cmd/relay/relay.go +++ b/cmd/relay/relay.go @@ -63,7 +63,7 @@ func startup() error { ctx := context.Background() relayConfig, err := relay.ParseRelay(ctx, os.Args[1:]) - if err != nil || len(relayConfig.Node.Feed.Input.URLs) == 0 || relayConfig.Node.Feed.Input.URLs[0] == "" || relayConfig.L2.ChainId == 0 { + if err != nil || len(relayConfig.Node.Feed.Input.URLs) == 0 || relayConfig.Node.Feed.Input.URLs[0] == "" || relayConfig.Chain.ID == 0 { confighelpers.PrintErrorAndExit(err, printSampleUsage) } diff --git a/cmd/util/keystore.go b/cmd/util/keystore.go index 56749f9722..52a18a42b5 100644 --- a/cmd/util/keystore.go +++ b/cmd/util/keystore.go @@ -79,7 +79,7 @@ func openKeystore(ks *keystore.KeyStore, description string, walletConfig *gener if !creatingNew && walletConfig.OnlyCreateKey { return nil, fmt.Errorf("wallet key already created, backup key (%s) and remove --%s.wallet.only-create-key to run normally", walletConfig.Pathname, description) } - passOpt := walletConfig.Password() + passOpt := walletConfig.Pwd() var password string if passOpt != nil { password = *passOpt diff --git a/cmd/util/keystore_test.go b/cmd/util/keystore_test.go index 17a0498d68..1ee579de28 100644 --- a/cmd/util/keystore_test.go +++ b/cmd/util/keystore_test.go @@ -29,7 +29,7 @@ func createWallet(t *testing.T, pathname string) { walletConf := genericconf.WalletConfigDefault walletConf.Pathname = pathname walletConf.OnlyCreateKey = true - walletConf.PasswordImpl = "foo" + walletConf.Password = "foo" testPassCalled := false testPass := func() (string, error) { @@ -69,7 +69,7 @@ func TestExistingKeystoreNoCreate(t *testing.T) { walletConf := genericconf.WalletConfigDefault walletConf.Pathname = pathname walletConf.OnlyCreateKey = true - walletConf.PasswordImpl = "foo" + walletConf.Password = "foo" testPassCalled := false testPass := func() (string, error) { diff --git a/das/aggregator.go b/das/aggregator.go index 33ce5ad489..3b34f12767 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -82,10 +82,10 @@ func NewServiceDetails(service DataAvailabilityServiceWriter, pubKey blsSignatur } func NewAggregator(ctx context.Context, config DataAvailabilityConfig, services []ServiceDetails) (*Aggregator, error) { - if config.L1NodeURL == "none" { + if config.ParentChainNodeURL == "none" { return NewAggregatorWithSeqInboxCaller(config, services, nil) } - l1client, err := GetL1Client(ctx, config.L1ConnectionAttempts, config.L1NodeURL) + l1client, err := GetL1Client(ctx, config.ParentChainConnectionAttempts, config.ParentChainNodeURL) if err != nil { return nil, err } @@ -118,7 +118,7 @@ func NewAggregatorWithSeqInboxCaller( seqInboxCaller *bridgegen.SequencerInboxCaller, ) (*Aggregator, error) { - keysetHash, keysetBytes, err := KeysetHashFromServices(services, uint64(config.AggregatorConfig.AssumedHonest)) + keysetHash, keysetBytes, err := KeysetHashFromServices(services, uint64(config.RPCAggregator.AssumedHonest)) if err != nil { return nil, err } @@ -129,11 +129,11 @@ func NewAggregatorWithSeqInboxCaller( } return &Aggregator{ - config: config.AggregatorConfig, + config: config.RPCAggregator, services: services, requestTimeout: config.RequestTimeout, - requiredServicesForStore: len(services) + 1 - config.AggregatorConfig.AssumedHonest, - maxAllowedServiceStoreFailures: config.AggregatorConfig.AssumedHonest - 1, + requiredServicesForStore: len(services) + 1 - config.RPCAggregator.AssumedHonest, + maxAllowedServiceStoreFailures: config.RPCAggregator.AssumedHonest - 1, keysetHash: keysetHash, keysetBytes: keysetBytes, bpVerifier: bpVerifier, diff --git a/das/aggregator_test.go b/das/aggregator_test.go index 1b6c60c675..776af3975b 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -34,10 +34,10 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ PrivKey: privKey, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } storageServices = append(storageServices, NewMemoryBackedStorageService(ctx)) @@ -49,7 +49,7 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { backends = append(backends, *details) } - aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{AggregatorConfig: AggregatorConfig{AssumedHonest: 1}, L1NodeURL: "none"}, backends) + aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{RPCAggregator: AggregatorConfig{AssumedHonest: 1}, ParentChainNodeURL: "none"}, backends) Require(t, err) rawMsg := []byte("It's time for you to see the fnords.") @@ -187,10 +187,10 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ PrivKey: privKey, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } storageServices = append(storageServices, NewMemoryBackedStorageService(ctx)) @@ -205,9 +205,9 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { aggregator, err := NewAggregator( ctx, DataAvailabilityConfig{ - AggregatorConfig: AggregatorConfig{AssumedHonest: assumedHonest}, - L1NodeURL: "none", - RequestTimeout: time.Millisecond * 2000, + RPCAggregator: AggregatorConfig{AssumedHonest: assumedHonest}, + ParentChainNodeURL: "none", + RequestTimeout: time.Millisecond * 2000, }, backends) Require(t, err) diff --git a/das/das.go b/das/das.go index a5d5c8d560..208a12cc83 100644 --- a/das/das.go +++ b/das/das.go @@ -40,22 +40,22 @@ type DataAvailabilityConfig struct { RequestTimeout time.Duration `koanf:"request-timeout"` - LocalCacheConfig BigCacheConfig `koanf:"local-cache"` - RedisCacheConfig RedisConfig `koanf:"redis-cache"` + LocalCache BigCacheConfig `koanf:"local-cache"` + RedisCache RedisConfig `koanf:"redis-cache"` - LocalDBStorageConfig LocalDBStorageConfig `koanf:"local-db-storage"` - LocalFileStorageConfig LocalFileStorageConfig `koanf:"local-file-storage"` - S3StorageServiceConfig S3StorageServiceConfig `koanf:"s3-storage"` - IpfsStorageServiceConfig IpfsStorageServiceConfig `koanf:"ipfs-storage"` - RegularSyncStorageConfig RegularSyncStorageConfig `koanf:"regular-sync-storage"` + LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` + LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` + S3Storage S3StorageServiceConfig `koanf:"s3-storage"` + IpfsStorage IpfsStorageServiceConfig `koanf:"ipfs-storage"` + RegularSyncStorage RegularSyncStorageConfig `koanf:"regular-sync-storage"` - KeyConfig KeyConfig `koanf:"key"` + Key KeyConfig `koanf:"key"` - AggregatorConfig AggregatorConfig `koanf:"rpc-aggregator"` - RestfulClientAggregatorConfig RestfulClientAggregatorConfig `koanf:"rest-aggregator"` + RPCAggregator AggregatorConfig `koanf:"rpc-aggregator"` + RestAggregator RestfulClientAggregatorConfig `koanf:"rest-aggregator"` - L1NodeURL string `koanf:"parent-chain-node-url"` - L1ConnectionAttempts int `koanf:"parent-chain-connection-attempts"` + ParentChainNodeURL string `koanf:"parent-chain-node-url"` + ParentChainConnectionAttempts int `koanf:"parent-chain-connection-attempts"` SequencerInboxAddress string `koanf:"sequencer-inbox-address"` ExtraSignatureCheckingPublicKey string `koanf:"extra-signature-checking-public-key"` @@ -66,8 +66,8 @@ type DataAvailabilityConfig struct { var DefaultDataAvailabilityConfig = DataAvailabilityConfig{ RequestTimeout: 5 * time.Second, Enable: false, - RestfulClientAggregatorConfig: DefaultRestfulClientAggregatorConfig, - L1ConnectionAttempts: 15, + RestAggregator: DefaultRestfulClientAggregatorConfig, + ParentChainConnectionAttempts: 15, PanicOnError: false, } @@ -132,8 +132,8 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { IpfsStorageServiceConfigAddOptions(prefix+".ipfs-storage", f) RestfulClientAggregatorConfigAddOptions(prefix+".rest-aggregator", f) - f.String(prefix+".parent-chain-node-url", DefaultDataAvailabilityConfig.L1NodeURL, "URL for L1 node, only used in standalone daserver; when running as part of a node that node's L1 configuration is used") - f.Int(prefix+".parent-chain-connection-attempts", DefaultDataAvailabilityConfig.L1ConnectionAttempts, "layer 1 RPC connection attempts (spaced out at least 1 second per attempt, 0 to retry infinitely), only used in standalone daserver; when running as part of a node that node's L1 configuration is used") + f.String(prefix+".parent-chain-node-url", DefaultDataAvailabilityConfig.ParentChainNodeURL, "URL for L1 node, only used in standalone daserver; when running as part of a node that node's L1 configuration is used") + f.Int(prefix+".parent-chain-connection-attempts", DefaultDataAvailabilityConfig.ParentChainConnectionAttempts, "layer 1 RPC connection attempts (spaced out at least 1 second per attempt, 0 to retry infinitely), only used in standalone daserver; when running as part of a node that node's L1 configuration is used") f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "L1 address of SequencerInbox contract") } diff --git a/das/das_test.go b/das/das_test.go index 7318afac19..416744535b 100644 --- a/das/das_test.go +++ b/das/das_test.go @@ -32,18 +32,18 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ KeyDir: dbPath, }, - LocalFileStorageConfig: LocalFileStorageConfig{ + LocalFileStorage: LocalFileStorageConfig{ Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorageConfig: LocalDBStorageConfig{ + LocalDBStorage: LocalDBStorageConfig{ Enable: enableDbStorage, DataDir: dbPath, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } var syncFromStorageServicesFirst []*IterableStorageService @@ -124,18 +124,18 @@ func testDASMissingMessage(t *testing.T, storageType string) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ KeyDir: dbPath, }, - LocalFileStorageConfig: LocalFileStorageConfig{ + LocalFileStorage: LocalFileStorageConfig{ Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorageConfig: LocalDBStorageConfig{ + LocalDBStorage: LocalDBStorageConfig{ Enable: enableDbStorage, DataDir: dbPath, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } var syncFromStorageServices []*IterableStorageService diff --git a/das/db_storage_service.go b/das/db_storage_service.go index fb89b1cf30..b9af530b9e 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -20,11 +20,11 @@ import ( ) type LocalDBStorageConfig struct { - Enable bool `koanf:"enable"` - DataDir string `koanf:"data-dir"` - DiscardAfterTimeout bool `koanf:"discard-after-timeout"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + DataDir string `koanf:"data-dir"` + DiscardAfterTimeout bool `koanf:"discard-after-timeout"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultLocalDBStorageConfig = LocalDBStorageConfig{} @@ -33,8 +33,8 @@ func LocalDBStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultLocalDBStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a database on the local filesystem") f.String(prefix+".data-dir", DefaultLocalDBStorageConfig.DataDir, "directory in which to store the database") f.Bool(prefix+".discard-after-timeout", DefaultLocalDBStorageConfig.DiscardAfterTimeout, "discard data after its expiry timeout") - f.Bool(prefix+".sync-from-storage-service", DefaultLocalDBStorageConfig.SyncFromStorageServices, "enable db storage to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultLocalDBStorageConfig.SyncToStorageServices, "enable db storage to be used as a sink for regular sync storage") + f.Bool(prefix+".sync-from-storage-service", DefaultLocalDBStorageConfig.SyncFromStorageService, "enable db storage to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultLocalDBStorageConfig.SyncToStorageService, "enable db storage to be used as a sink for regular sync storage") } type DBStorageService struct { diff --git a/das/factory.go b/das/factory.go index 96df5b474d..0e6b292005 100644 --- a/das/factory.go +++ b/das/factory.go @@ -27,59 +27,59 @@ func CreatePersistentStorageService( ) (StorageService, *LifecycleManager, error) { storageServices := make([]StorageService, 0, 10) var lifecycleManager LifecycleManager - if config.LocalDBStorageConfig.Enable { - s, err := NewDBStorageService(ctx, config.LocalDBStorageConfig.DataDir, config.LocalDBStorageConfig.DiscardAfterTimeout) + if config.LocalDBStorage.Enable { + s, err := NewDBStorageService(ctx, config.LocalDBStorage.DataDir, config.LocalDBStorage.DiscardAfterTimeout) if err != nil { return nil, nil, err } - if config.LocalDBStorageConfig.SyncFromStorageServices { + if config.LocalDBStorage.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) s = iterableStorageService } - if config.LocalDBStorageConfig.SyncToStorageServices { + if config.LocalDBStorage.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, s) } lifecycleManager.Register(s) storageServices = append(storageServices, s) } - if config.LocalFileStorageConfig.Enable { - s, err := NewLocalFileStorageService(config.LocalFileStorageConfig.DataDir) + if config.LocalFileStorage.Enable { + s, err := NewLocalFileStorageService(config.LocalFileStorage.DataDir) if err != nil { return nil, nil, err } - if config.LocalFileStorageConfig.SyncFromStorageServices { + if config.LocalFileStorage.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) s = iterableStorageService } - if config.LocalFileStorageConfig.SyncToStorageServices { + if config.LocalFileStorage.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, s) } lifecycleManager.Register(s) storageServices = append(storageServices, s) } - if config.S3StorageServiceConfig.Enable { - s, err := NewS3StorageService(config.S3StorageServiceConfig) + if config.S3Storage.Enable { + s, err := NewS3StorageService(config.S3Storage) if err != nil { return nil, nil, err } lifecycleManager.Register(s) - if config.S3StorageServiceConfig.SyncFromStorageServices { + if config.S3Storage.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) s = iterableStorageService } - if config.S3StorageServiceConfig.SyncToStorageServices { + if config.S3Storage.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, s) } storageServices = append(storageServices, s) } - if config.IpfsStorageServiceConfig.Enable { - s, err := NewIpfsStorageService(ctx, config.IpfsStorageServiceConfig) + if config.IpfsStorage.Enable { + s, err := NewIpfsStorageService(ctx, config.IpfsStorage) if err != nil { return nil, nil, err } @@ -114,23 +114,23 @@ func WrapStorageWithCache( // Enable caches, Redis and (local) BigCache. Local is the outermost, so it will be tried first. var err error - if config.RedisCacheConfig.Enable { - storageService, err = NewRedisStorageService(config.RedisCacheConfig, storageService) + if config.RedisCache.Enable { + storageService, err = NewRedisStorageService(config.RedisCache, storageService) lifecycleManager.Register(storageService) if err != nil { return nil, err } - if config.RedisCacheConfig.SyncFromStorageServices { + if config.RedisCache.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(storageService)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) storageService = iterableStorageService } - if config.RedisCacheConfig.SyncToStorageServices { + if config.RedisCache.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, storageService) } } - if config.LocalCacheConfig.Enable { - storageService, err = NewBigCacheStorageService(config.LocalCacheConfig, storageService) + if config.LocalCache.Enable { + storageService, err = NewBigCacheStorageService(config.LocalCache, storageService) lifecycleManager.Register(storageService) if err != nil { return nil, err @@ -151,11 +151,11 @@ func CreateBatchPosterDAS( } // Check config requirements - if !config.AggregatorConfig.Enable || !config.RestfulClientAggregatorConfig.Enable { + if !config.RPCAggregator.Enable || !config.RestAggregator.Enable { return nil, nil, nil, errors.New("--node.data-availability.rpc-aggregator.enable and rest-aggregator.enable must be set when running a Batch Poster in AnyTrust mode") } - if config.IpfsStorageServiceConfig.Enable { + if config.IpfsStorage.Enable { return nil, nil, nil, errors.New("--node.data-availability.ipfs-storage.enable may not be set when running a Nitro AnyTrust node in Batch Poster mode") } // Done checking config requirements @@ -173,7 +173,7 @@ func CreateBatchPosterDAS( } } - restAgg, err := NewRestfulClientAggregator(ctx, &config.RestfulClientAggregatorConfig) + restAgg, err := NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { return nil, nil, nil, err } @@ -200,10 +200,10 @@ func CreateDAComponentsForDaserver( } // Check config requirements - if !config.LocalDBStorageConfig.Enable && - !config.LocalFileStorageConfig.Enable && - !config.S3StorageServiceConfig.Enable && - !config.IpfsStorageServiceConfig.Enable { + if !config.LocalDBStorage.Enable && + !config.LocalFileStorage.Enable && + !config.S3Storage.Enable && + !config.IpfsStorage.Enable { return nil, nil, nil, nil, errors.New("At least one of --data-availability.(local-db-storage|local-file-storage|s3-storage|ipfs-storage) must be enabled.") } // Done checking config requirements @@ -222,15 +222,15 @@ func CreateDAComponentsForDaserver( // The REST aggregator is used as the fallback if requested data is not present // in the storage service. - if config.RestfulClientAggregatorConfig.Enable { - restAgg, err := NewRestfulClientAggregator(ctx, &config.RestfulClientAggregatorConfig) + if config.RestAggregator.Enable { + restAgg, err := NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { return nil, nil, nil, nil, err } restAgg.Start(ctx) dasLifecycleManager.Register(restAgg) - syncConf := &config.RestfulClientAggregatorConfig.SyncToStorageConfig + syncConf := &config.RestAggregator.SyncToStorage var retentionPeriodSeconds uint64 if uint64(syncConf.RetentionPeriod) == math.MaxUint64 { retentionPeriodSeconds = math.MaxUint64 @@ -266,7 +266,7 @@ func CreateDAComponentsForDaserver( var daReader DataAvailabilityServiceReader = storageService var daHealthChecker DataAvailabilityServiceHealthChecker = storageService - if config.KeyConfig.KeyDir != "" || config.KeyConfig.PrivKey != "" { + if config.Key.KeyDir != "" || config.Key.PrivKey != "" { var seqInboxCaller *bridgegen.SequencerInboxCaller if seqInboxAddress != nil { seqInbox, err := bridgegen.NewSequencerInbox(*seqInboxAddress, (*l1Reader).Client()) @@ -280,7 +280,7 @@ func CreateDAComponentsForDaserver( seqInboxCaller = nil } - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() if err != nil { return nil, nil, nil, nil, err } @@ -296,8 +296,8 @@ func CreateDAComponentsForDaserver( } } - if config.RegularSyncStorageConfig.Enable && len(syncFromStorageServices) != 0 && len(syncToStorageServices) != 0 { - regularlySyncStorage := NewRegularlySyncStorage(syncFromStorageServices, syncToStorageServices, config.RegularSyncStorageConfig) + if config.RegularSyncStorage.Enable && len(syncFromStorageServices) != 0 && len(syncToStorageServices) != 0 { + regularlySyncStorage := NewRegularlySyncStorage(syncFromStorageServices, syncToStorageServices, config.RegularSyncStorage) regularlySyncStorage.Start(ctx) } @@ -322,15 +322,15 @@ func CreateDAReaderForNode( } // Check config requirements - if config.AggregatorConfig.Enable { + if config.RPCAggregator.Enable { return nil, nil, errors.New("node.data-availability.rpc-aggregator is only for Batch Poster mode") } - if !config.RestfulClientAggregatorConfig.Enable && !config.IpfsStorageServiceConfig.Enable { + if !config.RestAggregator.Enable && !config.IpfsStorage.Enable { return nil, nil, fmt.Errorf("--node.data-availability.enable was set but neither of --node.data-availability.(rest-aggregator|ipfs-storage) were enabled. When running a Nitro Anytrust node in non-Batch Poster mode, some way to get the batch data is required.") } - if config.RestfulClientAggregatorConfig.SyncToStorageConfig.Eager { + if config.RestAggregator.SyncToStorage.Eager { return nil, nil, errors.New("--node.data-availability.rest-aggregator.sync-to-storage.eager can't be used with a Nitro node, only lazy syncing can be used.") } // Done checking config requirements @@ -341,9 +341,9 @@ func CreateDAReaderForNode( } var daReader DataAvailabilityServiceReader - if config.RestfulClientAggregatorConfig.Enable { + if config.RestAggregator.Enable { var restAgg *SimpleDASReaderAggregator - restAgg, err = NewRestfulClientAggregator(ctx, &config.RestfulClientAggregatorConfig) + restAgg, err = NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { return nil, nil, err } @@ -351,7 +351,7 @@ func CreateDAReaderForNode( dasLifecycleManager.Register(restAgg) if storageService != nil { - syncConf := &config.RestfulClientAggregatorConfig.SyncToStorageConfig + syncConf := &config.RestAggregator.SyncToStorage var retentionPeriodSeconds uint64 if uint64(syncConf.RetentionPeriod) == math.MaxUint64 { retentionPeriodSeconds = math.MaxUint64 diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 9fd831f480..5fa5306e39 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -22,10 +22,10 @@ import ( ) type LocalFileStorageConfig struct { - Enable bool `koanf:"enable"` - DataDir string `koanf:"data-dir"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + DataDir string `koanf:"data-dir"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultLocalFileStorageConfig = LocalFileStorageConfig{ @@ -35,8 +35,8 @@ var DefaultLocalFileStorageConfig = LocalFileStorageConfig{ func LocalFileStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultLocalFileStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a directory of files, one per batch") f.String(prefix+".data-dir", DefaultLocalFileStorageConfig.DataDir, "local data directory") - f.Bool(prefix+".sync-from-storage-service", DefaultLocalFileStorageConfig.SyncFromStorageServices, "enable local storage to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultLocalFileStorageConfig.SyncToStorageServices, "enable local storage to be used as a sink for regular sync storage") + f.Bool(prefix+".sync-from-storage-service", DefaultLocalFileStorageConfig.SyncFromStorageService, "enable local storage to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultLocalFileStorageConfig.SyncToStorageService, "enable local storage to be used as a sink for regular sync storage") } type LocalFileStorageService struct { diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index a005c70a44..3449a8e78c 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -24,27 +24,27 @@ import ( ) type RedisConfig struct { - Enable bool `koanf:"enable"` - RedisUrl string `koanf:"redis-url"` - Expiration time.Duration `koanf:"redis-expiration"` - KeyConfig string `koanf:"redis-key-config"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + Url string `koanf:"url"` + Expiration time.Duration `koanf:"expiration"` + KeyConfig string `koanf:"key-config"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultRedisConfig = RedisConfig{ - RedisUrl: "", + Url: "", Expiration: time.Hour, KeyConfig: "", } func RedisConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultRedisConfig.Enable, "enable Redis caching of sequencer batch data") - f.String(prefix+".redis-url", DefaultRedisConfig.RedisUrl, "Redis url") - f.Duration(prefix+".redis-expiration", DefaultRedisConfig.Expiration, "Redis expiration") - f.String(prefix+".redis-key-config", DefaultRedisConfig.KeyConfig, "Redis key config") - f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageServices, "enable Redis to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageServices, "enable Redis to be used as a sink for regular sync storage") + f.String(prefix+".url", DefaultRedisConfig.Url, "Redis url") + f.Duration(prefix+".expiration", DefaultRedisConfig.Expiration, "Redis expiration") + f.String(prefix+".key-config", DefaultRedisConfig.KeyConfig, "Redis key config") + f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageService, "enable Redis to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageService, "enable Redis to be used as a sink for regular sync storage") } type RedisStorageService struct { @@ -55,7 +55,7 @@ type RedisStorageService struct { } func NewRedisStorageService(redisConfig RedisConfig, baseStorageService StorageService) (StorageService, error) { - redisClient, err := redisutil.RedisClientFromURL(redisConfig.RedisUrl) + redisClient, err := redisutil.RedisClientFromURL(redisConfig.Url) if err != nil { return nil, err } diff --git a/das/redis_storage_service_test.go b/das/redis_storage_service_test.go index 2481358cf6..55f3ecd82c 100644 --- a/das/redis_storage_service_test.go +++ b/das/redis_storage_service_test.go @@ -23,7 +23,7 @@ func TestRedisStorageService(t *testing.T) { redisService, err := NewRedisStorageService( RedisConfig{ Enable: true, - RedisUrl: "redis://" + server.Addr(), + Url: "redis://" + server.Addr(), Expiration: time.Hour, KeyConfig: "b561f5d5d98debc783aa8a1472d67ec3bcd532a1c8d95e5cb23caa70c649f7c9", }, baseStorageService) @@ -75,7 +75,7 @@ func TestRedisStorageService(t *testing.T) { redisServiceWithEmptyBaseStorage, err := NewRedisStorageService( RedisConfig{ Enable: true, - RedisUrl: "redis://" + server.Addr(), + Url: "redis://" + server.Addr(), Expiration: time.Hour, KeyConfig: "b561f5d5d98debc783aa8a1472d67ec3bcd532a1c8d95e5cb23caa70c649f7c9", }, emptyBaseStorageService) diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index cc455250d3..134c4229c8 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -28,7 +28,7 @@ type BackendConfig struct { } func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggregator, error) { - services, err := ParseServices(config.AggregatorConfig) + services, err := ParseServices(config.RPCAggregator) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggr } func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil.L1Interface, seqInboxAddress common.Address) (*Aggregator, error) { - services, err := ParseServices(config.AggregatorConfig) + services, err := ParseServices(config.RPCAggregator) if err != nil { return nil, err } @@ -44,7 +44,7 @@ func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil. } func NewRPCAggregatorWithSeqInboxCaller(config DataAvailabilityConfig, seqInboxCaller *bridgegen.SequencerInboxCaller) (*Aggregator, error) { - services, err := ParseServices(config.AggregatorConfig) + services, err := ParseServices(config.RPCAggregator) if err != nil { return nil, err } diff --git a/das/rpc_test.go b/das/rpc_test.go index 6dcb8457c2..044ba597be 100644 --- a/das/rpc_test.go +++ b/das/rpc_test.go @@ -35,15 +35,15 @@ func TestRPC(t *testing.T) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ KeyDir: keyDir, }, - LocalFileStorageConfig: LocalFileStorageConfig{ + LocalFileStorage: LocalFileStorageConfig{ Enable: true, DataDir: dataDir, }, - L1NodeURL: "none", - RequestTimeout: 5 * time.Second, + ParentChainNodeURL: "none", + RequestTimeout: 5 * time.Second, } var syncFromStorageServices []*IterableStorageService @@ -51,7 +51,7 @@ func TestRPC(t *testing.T) { storageService, lifecycleManager, err := CreatePersistentStorageService(ctx, &config, &syncFromStorageServices, &syncToStorageServices) testhelpers.RequireImpl(t, err) defer lifecycleManager.StopAndWaitUntil(time.Second) - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() testhelpers.RequireImpl(t, err) localDas, err := NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, nil, storageService, "") testhelpers.RequireImpl(t, err) @@ -71,7 +71,7 @@ func TestRPC(t *testing.T) { backendsJsonByte, err := json.Marshal([]BackendConfig{beConfig}) testhelpers.RequireImpl(t, err) aggConf := DataAvailabilityConfig{ - AggregatorConfig: AggregatorConfig{ + RPCAggregator: AggregatorConfig{ AssumedHonest: 1, Backends: string(backendsJsonByte), }, diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index 18a9ce1475..1a3ae94114 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -34,15 +34,15 @@ type S3Downloader interface { } type S3StorageServiceConfig struct { - Enable bool `koanf:"enable"` - AccessKey string `koanf:"access-key"` - Bucket string `koanf:"bucket"` - ObjectPrefix string `koanf:"object-prefix"` - Region string `koanf:"region"` - SecretKey string `koanf:"secret-key"` - DiscardAfterTimeout bool `koanf:"discard-after-timeout"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + AccessKey string `koanf:"access-key"` + Bucket string `koanf:"bucket"` + ObjectPrefix string `koanf:"object-prefix"` + Region string `koanf:"region"` + SecretKey string `koanf:"secret-key"` + DiscardAfterTimeout bool `koanf:"discard-after-timeout"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultS3StorageServiceConfig = S3StorageServiceConfig{} @@ -55,8 +55,8 @@ func S3ConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".region", DefaultS3StorageServiceConfig.Region, "S3 region") f.String(prefix+".secret-key", DefaultS3StorageServiceConfig.SecretKey, "S3 secret key") f.Bool(prefix+".discard-after-timeout", DefaultS3StorageServiceConfig.DiscardAfterTimeout, "discard data after its expiry timeout") - f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageServices, "enable s3 to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageServices, "enable s3 to be used as a sink for regular sync storage") + f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageService, "enable s3 to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageService, "enable s3 to be used as a sink for regular sync storage") } type S3StorageService struct { diff --git a/das/sign_after_store_das_writer.go b/das/sign_after_store_das_writer.go index 1a244ab640..5d612574bf 100644 --- a/das/sign_after_store_das_writer.go +++ b/das/sign_after_store_das_writer.go @@ -86,14 +86,14 @@ type SignAfterStoreDASWriter struct { } func NewSignAfterStoreDASWriter(ctx context.Context, config DataAvailabilityConfig, storageService StorageService) (*SignAfterStoreDASWriter, error) { - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() if err != nil { return nil, err } - if config.L1NodeURL == "none" { + if config.ParentChainNodeURL == "none" { return NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, nil, storageService, config.ExtraSignatureCheckingPublicKey) } - l1client, err := GetL1Client(ctx, config.L1ConnectionAttempts, config.L1NodeURL) + l1client, err := GetL1Client(ctx, config.ParentChainConnectionAttempts, config.ParentChainNodeURL) if err != nil { return nil, err } diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index b2499b680a..eb82a33837 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -25,28 +25,28 @@ import ( // RestfulDasClients, so the configuration and factory function are given more // specific names. type RestfulClientAggregatorConfig struct { - Enable bool `koanf:"enable"` - Urls []string `koanf:"urls"` - OnlineUrlList string `koanf:"online-url-list"` - OnlineUrlListFetchInterval time.Duration `koanf:"online-url-list-fetch-interval"` - Strategy string `koanf:"strategy"` - StrategyUpdateInterval time.Duration `koanf:"strategy-update-interval"` - WaitBeforeTryNext time.Duration `koanf:"wait-before-try-next"` - MaxPerEndpointStats int `koanf:"max-per-endpoint-stats"` - SimpleExploreExploitStrategyConfig SimpleExploreExploitStrategyConfig `koanf:"simple-explore-exploit-strategy"` - SyncToStorageConfig SyncToStorageConfig `koanf:"sync-to-storage"` + Enable bool `koanf:"enable"` + Urls []string `koanf:"urls"` + OnlineUrlList string `koanf:"online-url-list"` + OnlineUrlListFetchInterval time.Duration `koanf:"online-url-list-fetch-interval"` + Strategy string `koanf:"strategy"` + StrategyUpdateInterval time.Duration `koanf:"strategy-update-interval"` + WaitBeforeTryNext time.Duration `koanf:"wait-before-try-next"` + MaxPerEndpointStats int `koanf:"max-per-endpoint-stats"` + SimpleExploreExploitStrategy SimpleExploreExploitStrategyConfig `koanf:"simple-explore-exploit-strategy"` + SyncToStorage SyncToStorageConfig `koanf:"sync-to-storage"` } var DefaultRestfulClientAggregatorConfig = RestfulClientAggregatorConfig{ - Urls: []string{}, - OnlineUrlList: "", - OnlineUrlListFetchInterval: 1 * time.Hour, - Strategy: "simple-explore-exploit", - StrategyUpdateInterval: 10 * time.Second, - WaitBeforeTryNext: 2 * time.Second, - MaxPerEndpointStats: 20, - SimpleExploreExploitStrategyConfig: DefaultSimpleExploreExploitStrategyConfig, - SyncToStorageConfig: DefaultSyncToStorageConfig, + Urls: []string{}, + OnlineUrlList: "", + OnlineUrlListFetchInterval: 1 * time.Hour, + Strategy: "simple-explore-exploit", + StrategyUpdateInterval: 10 * time.Second, + WaitBeforeTryNext: 2 * time.Second, + MaxPerEndpointStats: 20, + SimpleExploreExploitStrategy: DefaultSimpleExploreExploitStrategyConfig, + SyncToStorage: DefaultSyncToStorageConfig, } type SimpleExploreExploitStrategyConfig struct { @@ -120,8 +120,8 @@ func NewRestfulClientAggregator(ctx context.Context, config *RestfulClientAggreg switch strings.ToLower(config.Strategy) { case "simple-explore-exploit": a.strategy = &simpleExploreExploitStrategy{ - exploreIterations: uint32(config.SimpleExploreExploitStrategyConfig.ExploreIterations), - exploitIterations: uint32(config.SimpleExploreExploitStrategyConfig.ExploitIterations), + exploreIterations: uint32(config.SimpleExploreExploitStrategy.ExploreIterations), + exploitIterations: uint32(config.SimpleExploreExploitStrategy.ExploitIterations), } case "testing-sequential": a.strategy = &testingSequentialStrategy{} diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 7c67dbec68..143365bd23 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -57,32 +57,32 @@ func init() { } type SyncToStorageConfig struct { - CheckAlreadyExists bool `koanf:"check-already-exists"` - Eager bool `koanf:"eager"` - EagerLowerBoundBlock uint64 `koanf:"eager-lower-bound-block"` - RetentionPeriod time.Duration `koanf:"retention-period"` - DelayOnError time.Duration `koanf:"delay-on-error"` - IgnoreWriteErrors bool `koanf:"ignore-write-errors"` - L1BlocksPerRead uint64 `koanf:"parent-chain-blocks-per-read"` - StateDir string `koanf:"state-dir"` + CheckAlreadyExists bool `koanf:"check-already-exists"` + Eager bool `koanf:"eager"` + EagerLowerBoundBlock uint64 `koanf:"eager-lower-bound-block"` + RetentionPeriod time.Duration `koanf:"retention-period"` + DelayOnError time.Duration `koanf:"delay-on-error"` + IgnoreWriteErrors bool `koanf:"ignore-write-errors"` + ParentChainBlocksPerRead uint64 `koanf:"parent-chain-blocks-per-read"` + StateDir string `koanf:"state-dir"` } var DefaultSyncToStorageConfig = SyncToStorageConfig{ - CheckAlreadyExists: true, - Eager: false, - EagerLowerBoundBlock: 0, - RetentionPeriod: time.Duration(math.MaxInt64), - DelayOnError: time.Second, - IgnoreWriteErrors: true, - L1BlocksPerRead: 100, - StateDir: "", + CheckAlreadyExists: true, + Eager: false, + EagerLowerBoundBlock: 0, + RetentionPeriod: time.Duration(math.MaxInt64), + DelayOnError: time.Second, + IgnoreWriteErrors: true, + ParentChainBlocksPerRead: 100, + StateDir: "", } func SyncToStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".check-already-exists", DefaultSyncToStorageConfig.CheckAlreadyExists, "check if the data already exists in this DAS's storage. Must be disabled for fast sync with an IPFS backend") f.Bool(prefix+".eager", DefaultSyncToStorageConfig.Eager, "eagerly sync batch data to this DAS's storage from the rest endpoints, using L1 as the index of batch data hashes; otherwise only sync lazily") f.Uint64(prefix+".eager-lower-bound-block", DefaultSyncToStorageConfig.EagerLowerBoundBlock, "when eagerly syncing, start indexing forward from this L1 block. Only used if there is no sync state") - f.Uint64(prefix+".parent-chain-blocks-per-read", DefaultSyncToStorageConfig.L1BlocksPerRead, "when eagerly syncing, max l1 blocks to read per poll") + f.Uint64(prefix+".parent-chain-blocks-per-read", DefaultSyncToStorageConfig.ParentChainBlocksPerRead, "when eagerly syncing, max l1 blocks to read per poll") f.Duration(prefix+".retention-period", DefaultSyncToStorageConfig.RetentionPeriod, "period to retain synced data (defaults to forever)") f.Duration(prefix+".delay-on-error", DefaultSyncToStorageConfig.DelayOnError, "time to wait if encountered an error before retrying") f.Bool(prefix+".ignore-write-errors", DefaultSyncToStorageConfig.IgnoreWriteErrors, "log only on failures to write when syncing; otherwise treat it as an error") @@ -346,9 +346,9 @@ func (s *l1SyncService) readMore(ctx context.Context) error { } } } - if highBlockNr > s.lowBlockNr+s.config.L1BlocksPerRead { + if highBlockNr > s.lowBlockNr+s.config.ParentChainBlocksPerRead { s.catchingUp = true - highBlockNr = s.lowBlockNr + s.config.L1BlocksPerRead + highBlockNr = s.lowBlockNr + s.config.ParentChainBlocksPerRead if finalizedHighBlockNr > highBlockNr { finalizedHighBlockNr = highBlockNr } diff --git a/relay/relay.go b/relay/relay.go index f4fc33d9e3..81d931c0ce 100644 --- a/relay/relay.go +++ b/relay/relay.go @@ -52,7 +52,7 @@ func NewRelay(config *Config, feedErrChan chan error) (*Relay, error) { clients, err := broadcastclients.NewBroadcastClients( func() *broadcastclient.Config { return &config.Node.Feed.Input }, - config.L2.ChainId, + config.Chain.ID, 0, &q, confirmedSequenceNumberListener, @@ -70,7 +70,7 @@ func NewRelay(config *Config, feedErrChan chan error) (*Relay, error) { return nil, errors.New("relay attempted to sign feed message") } return &Relay{ - broadcaster: broadcaster.NewBroadcaster(func() *wsbroadcastserver.BroadcasterConfig { return &config.Node.Feed.Output }, config.L2.ChainId, feedErrChan, dataSignerErr), + broadcaster: broadcaster.NewBroadcaster(func() *wsbroadcastserver.BroadcasterConfig { return &config.Node.Feed.Output }, config.Chain.ID, feedErrChan, dataSignerErr), broadcastClients: clients, confirmedSequenceNumberChan: confirmedSequenceNumberListener, messageChan: q.queue, @@ -141,7 +141,7 @@ func (r *Relay) StopAndWait() { type Config struct { Conf genericconf.ConfConfig `koanf:"conf"` - L2 L2Config `koanf:"chain"` + Chain L2Config `koanf:"chain"` LogLevel int `koanf:"log-level"` LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` @@ -154,7 +154,7 @@ type Config struct { var ConfigDefault = Config{ Conf: genericconf.ConfConfigDefault, - L2: L2ConfigDefault, + Chain: L2ConfigDefault, LogLevel: int(log.LvlInfo), LogType: "plaintext", Metrics: false, @@ -191,15 +191,15 @@ func NodeConfigAddOptions(prefix string, f *flag.FlagSet) { } type L2Config struct { - ChainId uint64 `koanf:"id"` + ID uint64 `koanf:"id"` } var L2ConfigDefault = L2Config{ - ChainId: 0, + ID: 0, } func L2ConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".id", L2ConfigDefault.ChainId, "L2 chain ID") + f.Uint64(prefix+".id", L2ConfigDefault.ID, "L2 chain ID") } func ParseRelay(_ context.Context, args []string) (*Config, error) { diff --git a/staker/block_validator.go b/staker/block_validator.go index 109f9d82b2..333a096813 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -81,7 +81,7 @@ type BlockValidator struct { type BlockValidatorConfig struct { Enable bool `koanf:"enable"` ValidationServer rpcclient.ClientConfig `koanf:"validation-server" reload:"hot"` - ValidationPoll time.Duration `koanf:"check-validations-poll" reload:"hot"` + ValidationPoll time.Duration `koanf:"validation-poll" reload:"hot"` PrerecordedBlocks uint64 `koanf:"prerecorded-blocks" reload:"hot"` ForwardBlocks uint64 `koanf:"forward-blocks" reload:"hot"` CurrentModuleRoot string `koanf:"current-module-root"` // TODO(magic) requires reinitialization on hot reload @@ -107,7 +107,7 @@ type BlockValidatorConfigFetcher func() *BlockValidatorConfig func BlockValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultBlockValidatorConfig.Enable, "enable block-by-block validation") rpcclient.RPCClientAddOptions(prefix+".validation-server", f, &DefaultBlockValidatorConfig.ValidationServer) - f.Duration(prefix+".check-validations-poll", DefaultBlockValidatorConfig.ValidationPoll, "poll time to check validations") + f.Duration(prefix+".validation-poll", DefaultBlockValidatorConfig.ValidationPoll, "poll time to check validations") f.Uint64(prefix+".forward-blocks", DefaultBlockValidatorConfig.ForwardBlocks, "prepare entries for up to that many blocks ahead of validation (small footprint)") f.Uint64(prefix+".prerecorded-blocks", DefaultBlockValidatorConfig.PrerecordedBlocks, "record that many blocks ahead of validation (larger footprint)") f.String(prefix+".current-module-root", DefaultBlockValidatorConfig.CurrentModuleRoot, "current wasm module root ('current' read from chain, 'latest' from machines/latest dir, or provide hash)") diff --git a/staker/staker.go b/staker/staker.go index 1fe1b83fcf..a35f5088c1 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -69,20 +69,20 @@ func L1PostingStrategyAddOptions(prefix string, f *flag.FlagSet) { } type L1ValidatorConfig struct { - Enable bool `koanf:"enable"` - Strategy string `koanf:"strategy"` - StakerInterval time.Duration `koanf:"staker-interval"` - MakeAssertionInterval time.Duration `koanf:"make-assertion-interval"` - L1PostingStrategy L1PostingStrategy `koanf:"posting-strategy"` - DisableChallenge bool `koanf:"disable-challenge"` - ConfirmationBlocks int64 `koanf:"confirmation-blocks"` - UseSmartContractWallet bool `koanf:"use-smart-contract-wallet"` - OnlyCreateWalletContract bool `koanf:"only-create-wallet-contract"` - StartFromStaked bool `koanf:"start-validation-from-staked"` - ContractWalletAddress string `koanf:"contract-wallet-address"` - GasRefunderAddress string `koanf:"gas-refunder-address"` - Dangerous DangerousConfig `koanf:"dangerous"` - L1Wallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` + Enable bool `koanf:"enable"` + Strategy string `koanf:"strategy"` + StakerInterval time.Duration `koanf:"staker-interval"` + MakeAssertionInterval time.Duration `koanf:"make-assertion-interval"` + PostingStrategy L1PostingStrategy `koanf:"posting-strategy"` + DisableChallenge bool `koanf:"disable-challenge"` + ConfirmationBlocks int64 `koanf:"confirmation-blocks"` + UseSmartContractWallet bool `koanf:"use-smart-contract-wallet"` + OnlyCreateWalletContract bool `koanf:"only-create-wallet-contract"` + StartValidationFromStaked bool `koanf:"start-validation-from-staked"` + ContractWalletAddress string `koanf:"contract-wallet-address"` + GasRefunderAddress string `koanf:"gas-refunder-address"` + Dangerous DangerousConfig `koanf:"dangerous"` + ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` strategy StakerStrategy gasRefunder common.Address @@ -132,25 +132,25 @@ func (c *L1ValidatorConfig) Validate() error { } var DefaultL1ValidatorConfig = L1ValidatorConfig{ - Enable: true, - Strategy: "Watchtower", - StakerInterval: time.Minute, - MakeAssertionInterval: time.Hour, - L1PostingStrategy: L1PostingStrategy{}, - DisableChallenge: false, - ConfirmationBlocks: 12, - UseSmartContractWallet: false, - OnlyCreateWalletContract: false, - StartFromStaked: true, - ContractWalletAddress: "", - GasRefunderAddress: "", - Dangerous: DefaultDangerousConfig, - L1Wallet: DefaultValidatorL1WalletConfig, + Enable: true, + Strategy: "Watchtower", + StakerInterval: time.Minute, + MakeAssertionInterval: time.Hour, + PostingStrategy: L1PostingStrategy{}, + DisableChallenge: false, + ConfirmationBlocks: 12, + UseSmartContractWallet: false, + OnlyCreateWalletContract: false, + StartValidationFromStaked: true, + ContractWalletAddress: "", + GasRefunderAddress: "", + Dangerous: DefaultDangerousConfig, + ParentChainWallet: DefaultValidatorL1WalletConfig, } var DefaultValidatorL1WalletConfig = genericconf.WalletConfig{ Pathname: "validator-wallet", - PasswordImpl: genericconf.WalletConfigDefault.PasswordImpl, + Password: genericconf.WalletConfigDefault.Password, PrivateKey: genericconf.WalletConfigDefault.PrivateKey, Account: genericconf.WalletConfigDefault.Account, OnlyCreateKey: genericconf.WalletConfigDefault.OnlyCreateKey, @@ -166,11 +166,11 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int64(prefix+".confirmation-blocks", DefaultL1ValidatorConfig.ConfirmationBlocks, "confirmation blocks") f.Bool(prefix+".use-smart-contract-wallet", DefaultL1ValidatorConfig.UseSmartContractWallet, "use a smart contract wallet instead of an EOA address") f.Bool(prefix+".only-create-wallet-contract", DefaultL1ValidatorConfig.OnlyCreateWalletContract, "only create smart wallet contract and exit") - f.Bool(prefix+".start-validation-from-staked", DefaultL1ValidatorConfig.StartFromStaked, "assume staked nodes are valid") + f.Bool(prefix+".start-validation-from-staked", DefaultL1ValidatorConfig.StartValidationFromStaked, "assume staked nodes are valid") f.String(prefix+".contract-wallet-address", DefaultL1ValidatorConfig.ContractWalletAddress, "validator smart contract wallet public address") f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") DangerousConfigAddOptions(prefix+".dangerous", f) - genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.L1Wallet.Pathname) + genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) } type DangerousConfig struct { @@ -242,7 +242,7 @@ func NewStaker( return nil, err } stakerLastSuccessfulActionGauge.Update(time.Now().Unix()) - if config.StartFromStaked && blockValidator != nil { + if config.StartValidationFromStaked && blockValidator != nil { stakedNotifiers = append(stakedNotifiers, blockValidator) } return &Staker{ @@ -252,7 +252,7 @@ func NewStaker( confirmedNotifiers: confirmedNotifiers, baseCallOpts: callOpts, config: config, - highGasBlocksBuffer: big.NewInt(config.L1PostingStrategy.HighGasDelayBlocks), + highGasBlocksBuffer: big.NewInt(config.PostingStrategy.HighGasDelayBlocks), lastActCalledBlock: nil, inboxReader: statelessBlockValidator.inboxReader, statelessBlockValidator: statelessBlockValidator, @@ -269,7 +269,7 @@ func (s *Staker) Initialize(ctx context.Context) error { if walletAddressOrZero != (common.Address{}) { s.updateStakerBalanceMetric(ctx) } - if s.blockValidator != nil && s.config.StartFromStaked { + if s.blockValidator != nil && s.config.StartValidationFromStaked { latestStaked, _, err := s.validatorUtils.LatestStaked(&s.baseCallOpts, s.rollupAddress, walletAddressOrZero) if err != nil { return err @@ -450,7 +450,7 @@ func (s *Staker) shouldAct(ctx context.Context) bool { log.Warn("error getting gas price", "err", err) } else { gasPriceFloat = float64(gasPrice.Int64()) / 1e9 - if gasPriceFloat >= s.config.L1PostingStrategy.HighGasThreshold { + if gasPriceFloat >= s.config.PostingStrategy.HighGasThreshold { gasPriceHigh = true } } @@ -475,14 +475,14 @@ func (s *Staker) shouldAct(ctx context.Context) bool { // Clamp `s.highGasBlocksBuffer` to between 0 and HighGasDelayBlocks if s.highGasBlocksBuffer.Sign() < 0 { s.highGasBlocksBuffer.SetInt64(0) - } else if s.highGasBlocksBuffer.Cmp(big.NewInt(s.config.L1PostingStrategy.HighGasDelayBlocks)) > 0 { - s.highGasBlocksBuffer.SetInt64(s.config.L1PostingStrategy.HighGasDelayBlocks) + } else if s.highGasBlocksBuffer.Cmp(big.NewInt(s.config.PostingStrategy.HighGasDelayBlocks)) > 0 { + s.highGasBlocksBuffer.SetInt64(s.config.PostingStrategy.HighGasDelayBlocks) } if gasPriceHigh && s.highGasBlocksBuffer.Sign() > 0 { log.Warn( "not acting yet as gas price is high", "gasPrice", gasPriceFloat, - "highGasPriceConfig", s.config.L1PostingStrategy.HighGasThreshold, + "highGasPriceConfig", s.config.PostingStrategy.HighGasThreshold, "highGasBuffer", s.highGasBlocksBuffer, ) return false diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 6f6c041c41..11bf92608b 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -76,7 +76,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { Require(t, err) seqTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) conf.BatchPoster.Enable = true - conf.BatchPoster.MaxBatchSize = len(firstTxData) * 2 + conf.BatchPoster.MaxSize = len(firstTxData) * 2 startL1Block, err := l1client.BlockNumber(ctx) Require(t, err) for i := 0; i < parallelBatchPosters; i++ { @@ -175,7 +175,7 @@ func TestBatchPosterKeepsUp(t *testing.T) { conf := arbnode.ConfigDefaultL1Test() conf.BatchPoster.CompressionLevel = brotli.BestCompression - conf.BatchPoster.MaxBatchPostDelay = time.Hour + conf.BatchPoster.MaxDelay = time.Hour conf.RPC.RPCTxFeeCap = 1000. l2info, nodeA, l2clientA, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil) defer requireClose(t, l1stack) diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index 7fe1a65969..fa3d902b18 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -46,7 +46,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops var delayEvery int if workloadLoops > 1 { - l1NodeConfigA.BatchPoster.MaxBatchPostDelay = time.Millisecond * 500 + l1NodeConfigA.BatchPoster.MaxDelay = time.Millisecond * 500 delayEvery = workloadLoops / 3 } @@ -59,7 +59,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops validatorConfig := arbnode.ConfigDefaultL1NonSequencerTest() validatorConfig.BlockValidator.Enable = true validatorConfig.DataAvailability = l1NodeConfigA.DataAvailability - validatorConfig.DataAvailability.AggregatorConfig.Enable = false + validatorConfig.DataAvailability.RPCAggregator.Enable = false AddDefaultValNode(t, ctx, validatorConfig, !arbitrator) l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, validatorConfig, nil) defer nodeB.StopAndWait() diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 4311ca6540..872147320c 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -831,19 +831,19 @@ func setupConfigWithDAS( dasConfig := &das.DataAvailabilityConfig{ Enable: enableDas, - KeyConfig: das.KeyConfig{ + Key: das.KeyConfig{ KeyDir: dbPath, }, - LocalFileStorageConfig: das.LocalFileStorageConfig{ + LocalFileStorage: das.LocalFileStorageConfig{ Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorageConfig: das.LocalDBStorageConfig{ + LocalDBStorage: das.LocalDBStorageConfig{ Enable: enableDbStorage, DataDir: dbPath, }, RequestTimeout: 5 * time.Second, - L1NodeURL: "none", + ParentChainNodeURL: "none", SequencerInboxAddress: "none", PanicOnError: true, DisableSignatureChecking: true, @@ -872,12 +872,12 @@ func setupConfigWithDAS( PubKeyBase64Encoded: blsPubToBase64(dasSignerKey), SignerMask: 1, } - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, beConfigA) + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, beConfigA) l1NodeConfigA.DataAvailability.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{"http://" + restLis.Addr().String()} - l1NodeConfigA.DataAvailability.L1NodeURL = "none" + l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigA.DataAvailability.RestAggregator.Enable = true + l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} + l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" } return chainConfig, l1NodeConfigA, lifecycleManager, dbPath, dasSignerKey diff --git a/system_tests/das_test.go b/system_tests/das_test.go index d813253670..01bd96663c 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -45,15 +45,15 @@ func startLocalDASServer( config := das.DataAvailabilityConfig{ Enable: true, - KeyConfig: das.KeyConfig{ + Key: das.KeyConfig{ KeyDir: keyDir, }, - LocalFileStorageConfig: das.LocalFileStorageConfig{ + LocalFileStorage: das.LocalFileStorageConfig{ Enable: true, DataDir: dataDir, }, - L1NodeURL: "none", - RequestTimeout: 5 * time.Second, + ParentChainNodeURL: "none", + RequestTimeout: 5 * time.Second, } var syncFromStorageServices []*das.IterableStorageService @@ -64,7 +64,7 @@ func startLocalDASServer( Require(t, err) seqInboxCaller, err := bridgegen.NewSequencerInboxCaller(seqInboxAddress, l1client) Require(t, err) - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() Require(t, err) daWriter, err := das.NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, seqInboxCaller, storageService, "") Require(t, err) @@ -132,11 +132,11 @@ func TestDASRekey(t *testing.T) { // Setup DAS config l1NodeConfigA.DataAvailability.Enable = true - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, backendConfigA) - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{restServerUrlA} - l1NodeConfigA.DataAvailability.L1NodeURL = "none" + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigA) + l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigA.DataAvailability.RestAggregator.Enable = true + l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{restServerUrlA} + l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" nodeA, err := arbnode.CreateNode(ctx, l2stackA, l2chainDb, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain, l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) Require(t, err) @@ -145,11 +145,11 @@ func TestDASRekey(t *testing.T) { l1NodeConfigB.BlockValidator.Enable = false l1NodeConfigB.DataAvailability.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{restServerUrlA} + l1NodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigB.DataAvailability.RestAggregator.Enable = true + l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{restServerUrlA} - l1NodeConfigB.DataAvailability.L1NodeURL = "none" + l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) @@ -179,7 +179,7 @@ func TestDASRekey(t *testing.T) { l2blockchain, err := execution.GetBlockChain(l2chainDb, nil, chainConfig, arbnode.ConfigDefaultL2Test().TxLookupLimit) Require(t, err) - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, backendConfigB) + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) nodeA, err := arbnode.CreateNode(ctx, l2stackA, l2chainDb, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain, l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) Require(t, err) Require(t, nodeA.Start(ctx)) @@ -247,18 +247,18 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { serverConfig := das.DataAvailabilityConfig{ Enable: true, - LocalCacheConfig: das.TestBigCacheConfig, + LocalCache: das.TestBigCacheConfig, - LocalFileStorageConfig: das.LocalFileStorageConfig{ + LocalFileStorage: das.LocalFileStorageConfig{ Enable: true, DataDir: fileDataDir, }, - LocalDBStorageConfig: das.LocalDBStorageConfig{ + LocalDBStorage: das.LocalDBStorageConfig{ Enable: true, DataDir: dbDataDir, }, - KeyConfig: das.KeyConfig{ + Key: das.KeyConfig{ KeyDir: keyDir, }, @@ -293,11 +293,11 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { PubKeyBase64Encoded: blsPubToBase64(pubkey), SignerMask: 1, } - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, beConfigA) - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{"http://" + restLis.Addr().String()} - l1NodeConfigA.DataAvailability.L1NodeURL = "none" + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, beConfigA) + l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigA.DataAvailability.RestAggregator.Enable = true + l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} + l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" dataSigner := signature.DataSignerFromPrivateKey(l1info.Accounts["Sequencer"].PrivateKey) @@ -321,16 +321,16 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { // AggregatorConfig set up below - L1NodeURL: "none", - RequestTimeout: 5 * time.Second, + ParentChainNodeURL: "none", + RequestTimeout: 5 * time.Second, } l1NodeConfigB.BlockValidator.Enable = false l1NodeConfigB.DataAvailability.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{"http://" + restLis.Addr().String()} - l1NodeConfigB.DataAvailability.L1NodeURL = "none" + l1NodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigB.DataAvailability.RestAggregator.Enable = true + l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} + l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 0e5cca319a..93c928d145 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -48,7 +48,7 @@ func TestStaticForwarder(t *testing.T) { nodeConfigB.Sequencer.Enable = false nodeConfigB.DelayedSequencer.Enable = false nodeConfigB.Forwarder.RedisUrl = "" - nodeConfigB.ForwardingTargetImpl = ipcPath + nodeConfigB.ForwardingTarget = ipcPath nodeConfigB.BatchPoster.Enable = false clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nodeConfigB, nil) @@ -104,7 +104,7 @@ func fallbackSequencer( nodeConfig := arbnode.ConfigDefaultL1Test() nodeConfig.SeqCoordinator.Enable = opts.enableSecCoordinator nodeConfig.SeqCoordinator.RedisUrl = opts.redisUrl - nodeConfig.SeqCoordinator.MyUrlImpl = opts.ipcPath + nodeConfig.SeqCoordinator.MyUrl = opts.ipcPath return createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, stackConfig) } @@ -128,7 +128,7 @@ func createForwardingNode( nodeConfig.Sequencer.Enable = false nodeConfig.DelayedSequencer.Enable = false nodeConfig.Forwarder.RedisUrl = redisUrl - nodeConfig.ForwardingTargetImpl = fallbackPath + nodeConfig.ForwardingTarget = fallbackPath // nodeConfig.Feed.Output.Enable = false return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, stackConfig) @@ -151,7 +151,7 @@ func createSequencer( nodeConfig.BatchPoster.Enable = true nodeConfig.SeqCoordinator.Enable = true nodeConfig.SeqCoordinator.RedisUrl = redisUrl - nodeConfig.SeqCoordinator.MyUrlImpl = ipcPath + nodeConfig.SeqCoordinator.MyUrl = ipcPath return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, stackConfig) } diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 2209e82d93..b1f50c9436 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -61,7 +61,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { initRedisForTest(t, ctx, nodeConfig.SeqCoordinator.RedisUrl, nodeNames) createStartNode := func(nodeNum int) { - nodeConfig.SeqCoordinator.MyUrlImpl = nodeNames[nodeNum] + nodeConfig.SeqCoordinator.MyUrl = nodeNames[nodeNum] _, node, _ := CreateTestL2WithConfig(t, ctx, l2Info, nodeConfig, false) nodes[nodeNum] = node } @@ -277,7 +277,7 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { initRedisForTest(t, ctx, nodeConfig.SeqCoordinator.RedisUrl, nodeNames) - nodeConfig.SeqCoordinator.MyUrlImpl = nodeNames[0] + nodeConfig.SeqCoordinator.MyUrl = nodeNames[0] l2Info, nodeA, clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, params.ArbitrumDevTestChainConfig(), nil) defer requireClose(t, l1stack) defer nodeA.StopAndWait() @@ -302,10 +302,10 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { nodeConfigDup := *nodeConfig nodeConfig = &nodeConfigDup - nodeConfig.SeqCoordinator.MyUrlImpl = nodeNames[1] + nodeConfig.SeqCoordinator.MyUrl = nodeNames[1] if !successCase { - nodeConfig.SeqCoordinator.Signing.ECDSA.AcceptSequencer = false - nodeConfig.SeqCoordinator.Signing.ECDSA.AllowedAddresses = []string{l2Info.GetAddress("User2").Hex()} + nodeConfig.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false + nodeConfig.SeqCoordinator.Signer.ECDSA.AllowedAddresses = []string{l2Info.GetAddress("User2").Hex()} } clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2Info.ArbInitData, nodeConfig, nil) defer nodeB.StopAndWait() diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index 0ae72e384f..d509d20aa1 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -89,7 +89,7 @@ func TestRelayedSequencerFeed(t *testing.T) { port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port config.Node.Feed.Input = *newBroadcastClientConfigTest(port) config.Node.Feed.Output = *newBroadcasterConfigTest() - config.L2.ChainId = bigChainId.Uint64() + config.Chain.ID = bigChainId.Uint64() feedErrChan := make(chan error, 10) currentRelay, err := relay.NewRelay(&config, feedErrChan) @@ -145,7 +145,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigC := arbnode.ConfigDefaultL1Test() nodeConfigC.BatchPoster.Enable = false nodeConfigC.DataAvailability = nodeConfigA.DataAvailability - nodeConfigC.DataAvailability.AggregatorConfig.Enable = false + nodeConfigC.DataAvailability.RPCAggregator.Enable = false nodeConfigC.Feed.Output = *newBroadcasterConfigTest() l2clientC, nodeC := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigC, nil) defer nodeC.StopAndWait() @@ -157,7 +157,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigB.Feed.Output.Enable = false nodeConfigB.Feed.Input = *newBroadcastClientConfigTest(port) nodeConfigB.DataAvailability = nodeConfigA.DataAvailability - nodeConfigB.DataAvailability.AggregatorConfig.Enable = false + nodeConfigB.DataAvailability.RPCAggregator.Enable = false l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigB, nil) defer nodeB.StopAndWait() diff --git a/system_tests/twonodes_test.go b/system_tests/twonodes_test.go index 165b01b35a..72de2aa50a 100644 --- a/system_tests/twonodes_test.go +++ b/system_tests/twonodes_test.go @@ -26,7 +26,7 @@ func testTwoNodesSimple(t *testing.T, dasModeStr string) { authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) l1NodeConfigBDataAvailability := l1NodeConfigA.DataAvailability - l1NodeConfigBDataAvailability.AggregatorConfig.Enable = false + l1NodeConfigBDataAvailability.RPCAggregator.Enable = false l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, &l1NodeConfigBDataAvailability) defer nodeB.StopAndWait() diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go index 3987e5cf7b..0cac9d6442 100644 --- a/system_tests/twonodeslong_test.go +++ b/system_tests/twonodeslong_test.go @@ -48,7 +48,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) l1NodeConfigBDataAvailability := l1NodeConfigA.DataAvailability - l1NodeConfigBDataAvailability.AggregatorConfig.Enable = false + l1NodeConfigBDataAvailability.RPCAggregator.Enable = false l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, &l1NodeConfigBDataAvailability) defer nodeB.StopAndWait() diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index f9d0705f59..9a165beee9 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -26,7 +26,7 @@ type ArbitratorSpawnerConfig struct { Workers int `koanf:"workers" reload:"hot"` OutputPath string `koanf:"output-path" reload:"hot"` Execution MachineCacheConfig `koanf:"execution" reload:"hot"` // hot reloading for new executions only - ExecRunTimeout time.Duration `koanf:"execution-run-timeout" reload:"hot"` + ExecRunTimeout time.Duration `koanf:"exec-run-timeout" reload:"hot"` } type ArbitratorSpawnerConfigFecher func() *ArbitratorSpawnerConfig From 46c960079603c418972c4a1bb76897cff1043357 Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 16 Aug 2023 17:59:10 +0200 Subject: [PATCH 012/117] Revert chain-id flag to id, fix tests to use urls instead of url flag --- cmd/conf/chain.go | 6 +++--- cmd/nitro/config_test.go | 4 ++-- cmd/nitro/nitro.go | 14 +++++++------- system_tests/validation_mock_test.go | 2 +- validator/server_api/valiation_api.go | 4 ++-- validator/server_arb/validator_spawner.go | 18 +++++++++--------- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index 4eed9678ad..505957f45e 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -12,7 +12,7 @@ import ( ) type L1Config struct { - ChainID uint64 `koanf:"chain-id"` + ID uint64 `koanf:"id"` Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` Wallet genericconf.WalletConfig `koanf:"wallet"` } @@ -25,7 +25,7 @@ var L1ConnectionConfigDefault = rpcclient.ClientConfig{ } var L1ConfigDefault = L1Config{ - ChainID: 0, + ID: 0, Connection: L1ConnectionConfigDefault, Wallet: DefaultL1WalletConfig, } @@ -39,7 +39,7 @@ var DefaultL1WalletConfig = genericconf.WalletConfig{ } func L1ConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".id", L1ConfigDefault.ChainID, "if set other than 0, will be used to validate database and L1 connection") + f.Uint64(prefix+".id", L1ConfigDefault.ID, "if set other than 0, will be used to validate database and L1 connection") rpcclient.RPCClientAddOptions(prefix+".connection", f, &L1ConfigDefault.Connection) genericconf.WalletConfigAddOptions(prefix+".wallet", f, L1ConfigDefault.Wallet.Pathname) } diff --git a/cmd/nitro/config_test.go b/cmd/nitro/config_test.go index 67cd6ac055..4b99b798ee 100644 --- a/cmd/nitro/config_test.go +++ b/cmd/nitro/config_test.go @@ -85,7 +85,7 @@ func TestReloads(t *testing.T) { // check that non-reloadable fields fail assignment update.Metrics = !update.Metrics testUnsafe() - update.Chain.ID++ + update.ParentChain.ID++ testUnsafe() update.Node.Sequencer.Forwarder.ConnectionTimeout++ testUnsafe() @@ -122,7 +122,7 @@ func TestLiveNodeConfig(t *testing.T) { // check that an invalid reload gets rejected update = config.ShallowClone() - update.Chain.ID++ + update.ParentChain.ID++ if liveConfig.Set(update) == nil { Fail(t, "failed to reject invalid update") } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 9e9bc1362e..678a397769 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -249,7 +249,7 @@ func mainImpl() int { if nodeConfig.Node.Staker.ParentChainWallet == defaultValidatorL1WalletConfig && nodeConfig.Node.BatchPoster.ParentChainWallet == defaultBatchPosterL1WalletConfig { if sequencerNeedsKey || validatorNeedsKey || l1Wallet.OnlyCreateKey { - l1TransactionOpts, dataSigner, err = util.OpenWallet("l1", l1Wallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ChainID)) + l1TransactionOpts, dataSigner, err = util.OpenWallet("l1", l1Wallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) if err != nil { flag.Usage() log.Crit("error opening parent chain wallet", "path", l1Wallet.Pathname, "account", l1Wallet.Account, "err", err) @@ -265,7 +265,7 @@ func mainImpl() int { log.Crit("--parent-chain.wallet cannot be set if either --node.staker.l1-wallet or --node.batch-poster.l1-wallet are set") } if sequencerNeedsKey || nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { - l1TransactionOptsBatchPoster, dataSigner, err = util.OpenWallet("l1-batch-poster", &nodeConfig.Node.BatchPoster.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ChainID)) + l1TransactionOptsBatchPoster, dataSigner, err = util.OpenWallet("l1-batch-poster", &nodeConfig.Node.BatchPoster.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) if err != nil { flag.Usage() log.Crit("error opening Batch poster parent chain wallet", "path", nodeConfig.Node.BatchPoster.ParentChainWallet.Pathname, "account", nodeConfig.Node.BatchPoster.ParentChainWallet.Account, "err", err) @@ -275,7 +275,7 @@ func mainImpl() int { } } if validatorNeedsKey || nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { - l1TransactionOptsValidator, _, err = util.OpenWallet("l1-validator", &nodeConfig.Node.Staker.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ChainID)) + l1TransactionOptsValidator, _, err = util.OpenWallet("l1-validator", &nodeConfig.Node.Staker.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) if err != nil { flag.Usage() log.Crit("error opening Validator parent chain wallet", "path", nodeConfig.Node.Staker.ParentChainWallet.Pathname, "account", nodeConfig.Node.Staker.ParentChainWallet.Account, "err", err) @@ -328,11 +328,11 @@ func mainImpl() int { if err != nil { log.Crit("couldn't read L1 chainid", "err", err) } - if l1ChainId.Uint64() != nodeConfig.ParentChain.ChainID { - log.Crit("L1 chainID doesn't fit config", "found", l1ChainId.Uint64(), "expected", nodeConfig.ParentChain.ChainID) + if l1ChainId.Uint64() != nodeConfig.ParentChain.ID { + log.Crit("L1 chainID doesn't fit config", "found", l1ChainId.Uint64(), "expected", nodeConfig.ParentChain.ID) } - log.Info("connected to l1 chain", "l1url", nodeConfig.ParentChain.Connection.URL, "l1chainid", nodeConfig.ParentChain.ChainID) + log.Info("connected to l1 chain", "l1url", nodeConfig.ParentChain.Connection.URL, "l1chainid", nodeConfig.ParentChain.ID) rollupAddrs, err = chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) if err != nil { @@ -768,7 +768,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c chainDefaults["node.forwarding-target"] = chainInfo.SequencerUrl } if chainInfo.FeedUrl != "" { - chainDefaults["node.feed.input.url"] = chainInfo.FeedUrl + chainDefaults["node.feed.input.urls"] = chainInfo.FeedUrl } if chainInfo.DasIndexUrl != "" { chainDefaults["node.data-availability.enable"] = true diff --git a/system_tests/validation_mock_test.go b/system_tests/validation_mock_test.go index bfa2d67839..86e082ff5d 100644 --- a/system_tests/validation_mock_test.go +++ b/system_tests/validation_mock_test.go @@ -325,7 +325,7 @@ func TestExecutionKeepAlive(t *testing.T) { defer cancel() _, validationDefault := createMockValidationNode(t, ctx, nil) shortTimeoutConfig := server_arb.DefaultArbitratorSpawnerConfig - shortTimeoutConfig.ExecRunTimeout = time.Second + shortTimeoutConfig.ExecutionRunTimeout = time.Second _, validationShortTO := createMockValidationNode(t, ctx, &shortTimeoutConfig) configFetcher := StaticFetcherFrom(t, &rpcclient.TestClientConfig) diff --git a/validator/server_api/valiation_api.go b/validator/server_api/valiation_api.go index 9e5191ec81..ca5aafcee2 100644 --- a/validator/server_api/valiation_api.go +++ b/validator/server_api/valiation_api.go @@ -91,7 +91,7 @@ func (a *ExecServerAPI) LatestWasmModuleRoot(ctx context.Context) (common.Hash, } func (a *ExecServerAPI) removeOldRuns(ctx context.Context) time.Duration { - oldestKept := time.Now().Add(-1 * a.config().ExecRunTimeout) + oldestKept := time.Now().Add(-1 * a.config().ExecutionRunTimeout) a.runIdLock.Lock() defer a.runIdLock.Unlock() for id, entry := range a.runs { @@ -99,7 +99,7 @@ func (a *ExecServerAPI) removeOldRuns(ctx context.Context) time.Duration { delete(a.runs, id) } } - return a.config().ExecRunTimeout / 5 + return a.config().ExecutionRunTimeout / 5 } func (a *ExecServerAPI) Start(ctx_in context.Context) { diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index 9a165beee9..ab462c45b5 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -23,24 +23,24 @@ import ( ) type ArbitratorSpawnerConfig struct { - Workers int `koanf:"workers" reload:"hot"` - OutputPath string `koanf:"output-path" reload:"hot"` - Execution MachineCacheConfig `koanf:"execution" reload:"hot"` // hot reloading for new executions only - ExecRunTimeout time.Duration `koanf:"exec-run-timeout" reload:"hot"` + Workers int `koanf:"workers" reload:"hot"` + OutputPath string `koanf:"output-path" reload:"hot"` + Execution MachineCacheConfig `koanf:"execution" reload:"hot"` // hot reloading for new executions only + ExecutionRunTimeout time.Duration `koanf:"execution-run-timeout" reload:"hot"` } type ArbitratorSpawnerConfigFecher func() *ArbitratorSpawnerConfig var DefaultArbitratorSpawnerConfig = ArbitratorSpawnerConfig{ - Workers: 0, - OutputPath: "./target/output", - Execution: DefaultMachineCacheConfig, - ExecRunTimeout: time.Minute * 15, + Workers: 0, + OutputPath: "./target/output", + Execution: DefaultMachineCacheConfig, + ExecutionRunTimeout: time.Minute * 15, } func ArbitratorSpawnerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".workers", DefaultArbitratorSpawnerConfig.Workers, "number of concurrent validation threads") - f.Duration(prefix+".execution-run-timeout", DefaultArbitratorSpawnerConfig.ExecRunTimeout, "timeout before discarding execution run") + f.Duration(prefix+".execution-run-timeout", DefaultArbitratorSpawnerConfig.ExecutionRunTimeout, "timeout before discarding execution run") f.String(prefix+".output-path", DefaultArbitratorSpawnerConfig.OutputPath, "path to write machines to") MachineCacheConfigConfigAddOptions(prefix+".execution", f) } From 1c31be29371c5cc9260ed6c842165686d6ce5499 Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 16 Aug 2023 18:15:14 +0200 Subject: [PATCH 013/117] Rename url to urls in RelayConfig test --- cmd/relay/config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/relay/config_test.go b/cmd/relay/config_test.go index cdaf763e6b..5fb5cab876 100644 --- a/cmd/relay/config_test.go +++ b/cmd/relay/config_test.go @@ -13,7 +13,7 @@ import ( ) func TestRelayConfig(t *testing.T) { - args := strings.Split("--node.feed.output.port 9652 --node.feed.input.url ws://sequencer:9642/feed", " ") + args := strings.Split("--node.feed.output.port 9652 --node.feed.input.urls ws://sequencer:9642/feed", " ") _, err := relay.ParseRelay(context.Background(), args) testhelpers.RequireImpl(t, err) } From fd54b19f6b5bcb4b9d01fba0317f2ea6613d5dae Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 16 Aug 2023 18:24:26 +0200 Subject: [PATCH 014/117] Match struct field names to flags (instead of other way around) to avoid breaking changes --- arbnode/node.go | 16 ++++++++-------- broadcastclient/broadcastclient.go | 22 +++++++++++----------- broadcastclient/broadcastclient_test.go | 4 ++-- broadcastclients/broadcastclients.go | 4 ++-- cmd/nitro/nitro.go | 4 ++-- cmd/relay/config_test.go | 2 +- cmd/relay/relay.go | 2 +- system_tests/seqfeed_test.go | 4 ++-- 8 files changed, 29 insertions(+), 29 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 5f2a99592f..17d49ff0b4 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -297,7 +297,7 @@ type Config struct { Forwarder execution.ForwarderConfig `koanf:"forwarder"` TxPreChecker execution.TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"` BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` - RecordingDB arbitrum.RecordingDatabaseConfig `koanf:"recording-db"` + RecordingDatabase arbitrum.RecordingDatabaseConfig `koanf:"recording-database"` Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` Staker staker.L1ValidatorConfig `koanf:"staker"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` @@ -309,7 +309,7 @@ type Config struct { TxLookupLimit uint64 `koanf:"tx-lookup-limit"` TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` - ResourceManagement resourcemanager.Config `koanf:"resource-management" reload:"hot"` + ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` } func (c *Config) Validate() error { @@ -373,7 +373,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed execution.AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) execution.TxPreCheckerConfigAddOptions(prefix+".tx-pre-checker", f) staker.BlockValidatorConfigAddOptions(prefix+".block-validator", f) - arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-db", f) + arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-database", f) broadcastclient.FeedConfigAddOptions(prefix+".feed", f, feedInputEnable, feedOutputEnable) staker.L1ValidatorConfigAddOptions(prefix+".staker", f) SeqCoordinatorConfigAddOptions(prefix+".seq-coordinator", f) @@ -384,7 +384,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)") TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f) MaintenanceConfigAddOptions(prefix+".maintenance", f) - resourcemanager.ConfigAddOptions(prefix+".resource-management", f) + resourcemanager.ConfigAddOptions(prefix+".resource-mgmt", f) archiveMsg := fmt.Sprintf("retain past block state (deprecated, please use %v.caching.archive)", prefix) f.Bool(prefix+".archive", ConfigDefault.Archive, archiveMsg) @@ -401,7 +401,7 @@ var ConfigDefault = Config{ ForwardingTarget: "", TxPreChecker: execution.DefaultTxPreCheckerConfig, BlockValidator: staker.DefaultBlockValidatorConfig, - RecordingDB: arbitrum.DefaultRecordingDatabaseConfig, + RecordingDatabase: arbitrum.DefaultRecordingDatabaseConfig, Feed: broadcastclient.FeedConfigDefault, Staker: staker.DefaultL1ValidatorConfig, SeqCoordinator: DefaultSeqCoordinatorConfig, @@ -412,7 +412,7 @@ var ConfigDefault = Config{ TxLookupLimit: 126_230_400, // 1 year at 4 blocks per second Caching: execution.DefaultCachingConfig, TransactionStreamer: DefaultTransactionStreamerConfig, - ResourceManagement: resourcemanager.DefaultConfig, + ResourceMgmt: resourcemanager.DefaultConfig, } func ConfigDefaultL1Test() *Config { @@ -447,7 +447,7 @@ func ConfigDefaultL2Test() *Config { config.Sequencer = execution.TestSequencerConfig config.ParentChainReader.Enable = false config.SeqCoordinator = TestSeqCoordinatorConfig - config.Feed.Input.Verifier.Dangerous.AcceptMissing = true + config.Feed.Input.Verify.Dangerous.AcceptMissing = true config.Feed.Output.Signed = false config.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false config.SeqCoordinator.Signer.ECDSA.Dangerous.AcceptMissing = true @@ -613,7 +613,7 @@ func createNodeImpl( sequencerConfigFetcher := func() *execution.SequencerConfig { return &configFetcher.Get().Sequencer } txprecheckConfigFetcher := func() *execution.TxPreCheckerConfig { return &configFetcher.Get().TxPreChecker } exec, err := execution.CreateExecutionNode(stack, chainDb, l2BlockChain, l1Reader, syncMonitor, - config.ForwardingTargetF(), &config.Forwarder, config.RPC, &config.RecordingDB, + config.ForwardingTargetF(), &config.Forwarder, config.RPC, &config.RecordingDatabase, sequencerConfigFetcher, txprecheckConfigFetcher) if err != nil { return nil, err diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index 9bfee17a5a..c4277c2ef6 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -68,13 +68,13 @@ type Config struct { RequireChainId bool `koanf:"require-chain-id" reload:"hot"` RequireFeedVersion bool `koanf:"require-feed-version" reload:"hot"` Timeout time.Duration `koanf:"timeout" reload:"hot"` - URLs []string `koanf:"urls"` - Verifier signature.VerifierConfig `koanf:"verifier"` + URL []string `koanf:"url"` + Verify signature.VerifierConfig `koanf:"verify"` EnableCompression bool `koanf:"enable-compression" reload:"hot"` } func (c *Config) Enable() bool { - return len(c.URLs) > 0 && c.URLs[0] != "" + return len(c.URL) > 0 && c.URL[0] != "" } type ConfigFetcher func() *Config @@ -85,8 +85,8 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".require-chain-id", DefaultConfig.RequireChainId, "require chain id to be present on connect") f.Bool(prefix+".require-feed-version", DefaultConfig.RequireFeedVersion, "require feed version to be present on connect") f.Duration(prefix+".timeout", DefaultConfig.Timeout, "duration to wait before timing out connection to sequencer feed") - f.StringSlice(prefix+".urls", DefaultConfig.URLs, "URL of sequencer feed source") - signature.FeedVerifierConfigAddOptions(prefix+".verifier", f) + f.StringSlice(prefix+".url", DefaultConfig.URL, "URL of sequencer feed source") + signature.FeedVerifierConfigAddOptions(prefix+".verify", f) f.Bool(prefix+".enable-compression", DefaultConfig.EnableCompression, "enable per message deflate compression support") } @@ -95,8 +95,8 @@ var DefaultConfig = Config{ ReconnectMaximumBackoff: time.Second * 64, RequireChainId: false, RequireFeedVersion: false, - Verifier: signature.DefultFeedVerifierConfig, - URLs: []string{""}, + Verify: signature.DefultFeedVerifierConfig, + URL: []string{""}, Timeout: 20 * time.Second, EnableCompression: true, } @@ -106,8 +106,8 @@ var DefaultTestConfig = Config{ ReconnectMaximumBackoff: 0, RequireChainId: false, RequireFeedVersion: false, - Verifier: signature.DefultFeedVerifierConfig, - URLs: []string{""}, + Verify: signature.DefultFeedVerifierConfig, + URL: []string{""}, Timeout: 200 * time.Millisecond, EnableCompression: true, } @@ -156,7 +156,7 @@ func NewBroadcastClient( bpVerifier contracts.BatchPosterVerifierInterface, adjustCount func(int32), ) (*BroadcastClient, error) { - sigVerifier, err := signature.NewVerifier(&config().Verifier, bpVerifier) + sigVerifier, err := signature.NewVerifier(&config().Verify, bpVerifier) if err != nil { return nil, err } @@ -480,7 +480,7 @@ func (bc *BroadcastClient) StopAndWait() { } func (bc *BroadcastClient) isValidSignature(ctx context.Context, message *broadcaster.BroadcastFeedMessage) error { - if bc.config().Verifier.Dangerous.AcceptMissing && bc.sigVerifier == nil { + if bc.config().Verify.Dangerous.AcceptMissing && bc.sigVerifier == nil { // Verifier disabled return nil } diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go index 871d9d8d8a..5912749bf0 100644 --- a/broadcastclient/broadcastclient_test.go +++ b/broadcastclient/broadcastclient_test.go @@ -202,10 +202,10 @@ func newTestBroadcastClient(config Config, listenerAddress net.Addr, chainId uin port := listenerAddress.(*net.TCPAddr).Port var bpv contracts.BatchPosterVerifierInterface if validAddr != nil { - config.Verifier.AcceptSequencer = true + config.Verify.AcceptSequencer = true bpv = contracts.NewMockBatchPosterVerifier(*validAddr) } else { - config.Verifier.AcceptSequencer = false + config.Verify.AcceptSequencer = false } return NewBroadcastClient(func() *Config { return &config }, fmt.Sprintf("ws://127.0.0.1:%d/", port), chainId, currentMessageCount, txStreamer, confirmedSequenceNumberListener, feedErrChan, bpv, func(_ int32) {}) } diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index baf7cf6394..9fc2e6348c 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -31,7 +31,7 @@ func NewBroadcastClients( bpVerifier contracts.BatchPosterVerifierInterface, ) (*BroadcastClients, error) { config := configFetcher() - urlCount := len(config.URLs) + urlCount := len(config.URL) if urlCount <= 0 { return nil, nil } @@ -39,7 +39,7 @@ func NewBroadcastClients( clients := BroadcastClients{} clients.clients = make([]*broadcastclient.BroadcastClient, 0, urlCount) var lastClientErr error - for _, address := range config.URLs { + for _, address := range config.URL { client, err := broadcastclient.NewBroadcastClient( configFetcher, address, diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 678a397769..035e129a88 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -369,7 +369,7 @@ func mainImpl() int { nodeConfig.Node.TxLookupLimit = 0 } - resourcemanager.Init(&nodeConfig.Node.ResourceManagement) + resourcemanager.Init(&nodeConfig.Node.ResourceMgmt) var sameProcessValidationNodeEnabled bool if nodeConfig.Node.BlockValidator.Enable && (nodeConfig.Node.BlockValidator.ValidationServer.URL == "self" || nodeConfig.Node.BlockValidator.ValidationServer.URL == "self-auth") { @@ -768,7 +768,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c chainDefaults["node.forwarding-target"] = chainInfo.SequencerUrl } if chainInfo.FeedUrl != "" { - chainDefaults["node.feed.input.urls"] = chainInfo.FeedUrl + chainDefaults["node.feed.input.url"] = chainInfo.FeedUrl } if chainInfo.DasIndexUrl != "" { chainDefaults["node.data-availability.enable"] = true diff --git a/cmd/relay/config_test.go b/cmd/relay/config_test.go index 5fb5cab876..cdaf763e6b 100644 --- a/cmd/relay/config_test.go +++ b/cmd/relay/config_test.go @@ -13,7 +13,7 @@ import ( ) func TestRelayConfig(t *testing.T) { - args := strings.Split("--node.feed.output.port 9652 --node.feed.input.urls ws://sequencer:9642/feed", " ") + args := strings.Split("--node.feed.output.port 9652 --node.feed.input.url ws://sequencer:9642/feed", " ") _, err := relay.ParseRelay(context.Background(), args) testhelpers.RequireImpl(t, err) } diff --git a/cmd/relay/relay.go b/cmd/relay/relay.go index 0105f94138..552838308d 100644 --- a/cmd/relay/relay.go +++ b/cmd/relay/relay.go @@ -63,7 +63,7 @@ func startup() error { ctx := context.Background() relayConfig, err := relay.ParseRelay(ctx, os.Args[1:]) - if err != nil || len(relayConfig.Node.Feed.Input.URLs) == 0 || relayConfig.Node.Feed.Input.URLs[0] == "" || relayConfig.Chain.ID == 0 { + if err != nil || len(relayConfig.Node.Feed.Input.URL) == 0 || relayConfig.Node.Feed.Input.URL[0] == "" || relayConfig.Chain.ID == 0 { confighelpers.PrintErrorAndExit(err, printSampleUsage) } diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index d509d20aa1..23c0e44c02 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -27,9 +27,9 @@ func newBroadcasterConfigTest() *wsbroadcastserver.BroadcasterConfig { func newBroadcastClientConfigTest(port int) *broadcastclient.Config { return &broadcastclient.Config{ - URLs: []string{fmt.Sprintf("ws://localhost:%d/feed", port)}, + URL: []string{fmt.Sprintf("ws://localhost:%d/feed", port)}, Timeout: 200 * time.Millisecond, - Verifier: signature.VerifierConfig{ + Verify: signature.VerifierConfig{ Dangerous: signature.DangerousVerifierConfig{ AcceptMissing: true, }, From 03176699fbd032e78f53c3245087dee3787422a7 Mon Sep 17 00:00:00 2001 From: Jason-Wanxt <61458343+Jason-Wanxt@users.noreply.github.com> Date: Thu, 17 Aug 2023 12:03:59 +0800 Subject: [PATCH 015/117] fix command-line typo --- arbnode/node.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbnode/node.go b/arbnode/node.go index 2f5e4a69b2..4e23de37be 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -369,7 +369,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed DelayedSequencerConfigAddOptions(prefix+".delayed-sequencer", f) BatchPosterConfigAddOptions(prefix+".batch-poster", f) MessagePrunerConfigAddOptions(prefix+".message-pruner", f) - f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTargetImpl, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)") + f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTargetImpl, "transaction forwarding target URL, or \"null\" to disable forwarding (if not sequencer)") execution.AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) execution.TxPreCheckerConfigAddOptions(prefix+".tx-pre-checker", f) staker.BlockValidatorConfigAddOptions(prefix+".block-validator", f) From a45dac53a0e6f48cec0c971b81bf3e580c765c05 Mon Sep 17 00:00:00 2001 From: Nodar Date: Thu, 17 Aug 2023 17:54:00 +0200 Subject: [PATCH 016/117] Introduce koanf linter that checks that field names match koanf tags --- .gitignore | 1 + .gitmodules | 3 + .golangci.yml | 7 +++ Makefile | 4 +- go.mod | 17 +++--- go.sum | 34 ++++++----- linter/golangci-lint | 1 + linter/koanf/koanf.go | 116 +++++++++++++++++++++++++++++++++++++ linter/koanf/koanf_test.go | 31 ++++++++++ 9 files changed, 189 insertions(+), 25 deletions(-) create mode 160000 linter/golangci-lint create mode 100644 linter/koanf/koanf.go create mode 100644 linter/koanf/koanf_test.go diff --git a/.gitignore b/.gitignore index f0eb5c2ec3..8937c1b5d1 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ yarn-error.log local/ testdata system_tests/test-data/* +linter/koanf/koanf.so \ No newline at end of file diff --git a/.gitmodules b/.gitmodules index 7c78791c78..11d9caa2aa 100644 --- a/.gitmodules +++ b/.gitmodules @@ -20,3 +20,6 @@ [submodule "nitro-testnode"] path = nitro-testnode url = https://github.com/OffchainLabs/nitro-testnode.git +[submodule "linter/golangci-lint"] + path = linter/golangci-lint + url = https://github.com/golangci/golangci-lint diff --git a/.golangci.yml b/.golangci.yml index e794cdb844..952a52a0c8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,6 +23,7 @@ linters: - nilerr # ensure errors aren't mishandled - staticcheck # check for suspicious constructs - unused # check for unused constructs + - koanf linters-settings: errcheck: @@ -51,3 +52,9 @@ linters-settings: disable: - shadow - fieldalignment + + + custom: + koanf: + path: linter/koanf/koanf.so + description: Koanf configuration linter diff --git a/Makefile b/Makefile index 205025dfe9..71c051087a 100644 --- a/Makefile +++ b/Makefile @@ -304,7 +304,9 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make - golangci-lint run --fix + make build -C linter/golangci-lint + go build -buildmode=plugin -o linter/koanf/koanf.so linter/koanf/koanf.go + ./linter/golangci-lint/golangci-lint run --fix yarn --cwd contracts solhint @touch $@ diff --git a/go.mod b/go.mod index 5adfd19388..526f0a819f 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 github.com/ethereum/go-ethereum v1.10.26 + github.com/fatih/structtag v1.2.0 github.com/google/go-cmp v0.5.9 github.com/hashicorp/golang-lru/v2 v2.0.1 github.com/ipfs/go-cid v0.3.2 @@ -31,7 +32,8 @@ require ( github.com/multiformats/go-multihash v0.2.1 github.com/spf13/pflag v1.0.5 github.com/wealdtech/go-merkletree v1.0.0 - golang.org/x/term v0.6.0 + golang.org/x/term v0.11.0 + golang.org/x/tools v0.12.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -256,8 +258,7 @@ require ( go.uber.org/zap v1.24.0 // indirect go4.org v0.0.0-20200411211856-f5505b9728dd // indirect golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/mod v0.12.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect google.golang.org/grpc v1.46.0 // indirect @@ -309,11 +310,11 @@ require ( github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect - golang.org/x/crypto v0.6.0 - golang.org/x/net v0.8.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.6.0 - golang.org/x/text v0.8.0 // indirect + golang.org/x/crypto v0.12.0 + golang.org/x/net v0.14.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.11.0 + golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect ) diff --git a/go.sum b/go.sum index 58155db124..43240135c5 100644 --- a/go.sum +++ b/go.sum @@ -326,6 +326,8 @@ github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:Jp github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -1737,8 +1739,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1774,8 +1776,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1835,8 +1837,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1857,8 +1859,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1951,12 +1953,12 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1966,8 +1968,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2035,8 +2037,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/linter/golangci-lint b/linter/golangci-lint new file mode 160000 index 0000000000..8000abaf0e --- /dev/null +++ b/linter/golangci-lint @@ -0,0 +1 @@ +Subproject commit 8000abaf0e6e28e8179864f0317349cecab47c05 diff --git a/linter/koanf/koanf.go b/linter/koanf/koanf.go new file mode 100644 index 0000000000..4aaddaa495 --- /dev/null +++ b/linter/koanf/koanf.go @@ -0,0 +1,116 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" + "strings" + "unicode" + + "github.com/fatih/structtag" + + "golang.org/x/tools/go/analysis" +) + +func New(conf any) ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{Analyzer}, nil +} + +var Analyzer = &analysis.Analyzer{ + Name: "koanfcheck", + Doc: "check for koanf misconfigurations", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "testkoanfcheck", + Doc: "check for koanf misconfigurations (for tests)", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +// koanfError indicates the position of an error in configuration. +type koanfError struct { + Pos token.Position + Message string +} + +// Result is returned from the checkStruct function, and holds all the +// configuration errors. +type Result struct { + Errors []koanfError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ret Result + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + var res Result + switch v := node.(type) { + case *ast.StructType: + res = checkStruct(pass, v) + default: + } + for _, err := range res.Errors { + ret.Errors = append(ret.Errors, err) + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), + Message: err.Message, + Category: "koanf", + }) + } + } + return true + }, + ) + } + return ret, nil +} + +func checkStruct(pass *analysis.Pass, s *ast.StructType) Result { + var res Result + for _, f := range s.Fields.List { + if f.Tag == nil { + continue + } + tags, err := structtag.Parse(strings.Trim((f.Tag.Value), "`")) + if err != nil { + continue + } + tag, err := tags.Get("koanf") + if err != nil { + continue + } + tagName := normalize(tag.Name) + fieldName := f.Names[0].Name + if !strings.EqualFold(tagName, fieldName) { + res.Errors = append(res.Errors, koanfError{ + Pos: pass.Fset.Position(f.Pos()), + Message: fmt.Sprintf("field name: %q doesn't match tag name: %q\n", fieldName, tagName), + }) + } + } + return res +} + +func normalize(s string) string { + ans := s[:1] + for i := 1; i < len(s); i++ { + c := rune(s[i]) + if !isAlphanumeric(c) { + continue + } + if !isAlphanumeric(rune(s[i-1])) && unicode.IsLower(c) { + c = unicode.ToUpper(c) + } + ans += string(c) + } + return ans +} + +func isAlphanumeric(c rune) bool { + return unicode.IsLetter(c) || unicode.IsDigit(c) +} diff --git a/linter/koanf/koanf_test.go b/linter/koanf/koanf_test.go new file mode 100644 index 0000000000..2e3e68b0f4 --- /dev/null +++ b/linter/koanf/koanf_test.go @@ -0,0 +1,31 @@ +package main + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +func TestAll(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get wd: %s", err) + } + testdata := filepath.Join(filepath.Dir(wd), "testdata") + res := analysistest.Run(t, testdata, analyzerForTests, "a") + if cnt := countErrors(res); cnt != 1 { + t.Errorf("analysistest.Run() got %v errors, expected 1", cnt) + } +} + +func countErrors(errs []*analysistest.Result) int { + cnt := 0 + for _, e := range errs { + if r, ok := e.Result.(Result); ok { + cnt += len(r.Errors) + } + } + return cnt +} From 7547edb550f6bca2761d2b06d761e6cf201cb7bf Mon Sep 17 00:00:00 2001 From: Nodar Date: Thu, 17 Aug 2023 17:56:28 +0200 Subject: [PATCH 017/117] Add empty line at the end of gitignore --- .gitignore | 2 +- .golangci.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 8937c1b5d1..02cc86192b 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,4 @@ yarn-error.log local/ testdata system_tests/test-data/* -linter/koanf/koanf.so \ No newline at end of file +linter/koanf/koanf.so diff --git a/.golangci.yml b/.golangci.yml index 952a52a0c8..8f2fdda037 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,7 +23,7 @@ linters: - nilerr # ensure errors aren't mishandled - staticcheck # check for suspicious constructs - unused # check for unused constructs - - koanf + - koanf # check for koanf configurations linters-settings: errcheck: From eba8989a80857d89573223c385ea5f4dabbe66a8 Mon Sep 17 00:00:00 2001 From: Nodar Date: Thu, 17 Aug 2023 17:57:31 +0200 Subject: [PATCH 018/117] drop extra empty line in golangci.yml --- .golangci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index 8f2fdda037..47f471b5a1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -53,7 +53,6 @@ linters-settings: - shadow - fieldalignment - custom: koanf: path: linter/koanf/koanf.so From 2d89a05745c0990daa42b5352b606a3995ecfe03 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 17 Aug 2023 19:16:44 -0600 Subject: [PATCH 019/117] In the data poster, properly allocate fee spending across transactions --- arbnode/dataposter/data_poster.go | 53 +++++++++++++++++++++---------- util/arbmath/math.go | 5 +++ 2 files changed, 42 insertions(+), 16 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 1e7b74834e..e4fe798a9c 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -201,7 +201,7 @@ func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, e const minRbfIncrease = arbmath.OneInBips * 11 / 10 -func (p *DataPoster) feeAndTipCaps(ctx context.Context, gasLimit uint64, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, backlogOfBatches uint64) (*big.Int, *big.Int, error) { +func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, backlogOfBatches uint64) (*big.Int, *big.Int, error) { config := p.config() latestHeader, err := p.headerReader.LastHeader(ctx) if err != nil { @@ -210,6 +210,11 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, gasLimit uint64, lastFee if latestHeader.BaseFee == nil { return nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) } + softConfBlock := arbmath.BigSubByUint(latestHeader.Number, config.NonceRbfSoftConfs) + softConfNonce, err := p.client.NonceAt(ctx, p.sender, softConfBlock) + if err != nil { + return nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) + } newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, big.NewInt(2)) newFeeCap = arbmath.BigMax(newFeeCap, arbmath.FloatToBig(config.MinFeeCapGwei*params.GWei)) @@ -252,14 +257,29 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, gasLimit uint64, lastFee newFeeCap = maxFeeCap } - balanceFeeCap := new(big.Int).Div(p.balance, new(big.Int).SetUint64(gasLimit)) + latestBalance := p.balance + balanceForTx := new(big.Int).Set(latestBalance) + if !config.UseNoOpStorage { + // We reserve half the balance for the first transaction, and then split the remaining balance for all after that. + // With noop storage, we don't try to replace-by-fee, so we don't need to worry about this. + balanceForTx.Div(balanceForTx, common.Big2) + if nonce != softConfNonce && config.MaxMempoolTransactions > 1 { + // balanceForTx /= config.MaxMempoolTransactions-1 + balanceForTx.Div(balanceForTx, arbmath.UintToBig(config.MaxMempoolTransactions-1)) + } + } + balanceFeeCap := arbmath.BigDivByUint(balanceForTx, gasLimit) if arbmath.BigGreaterThan(newFeeCap, balanceFeeCap) { log.Error( "lack of L1 balance prevents posting transaction with desired fee cap", - "balance", p.balance, + "balance", latestBalance, + "maxTransactions", config.MaxMempoolTransactions, + "balanceForTransaction", balanceForTx, "gasLimit", gasLimit, "desiredFeeCap", newFeeCap, "balanceFeeCap", balanceFeeCap, + "nonce", nonce, + "softConfNonce", softConfNonce, ) newFeeCap = balanceFeeCap } @@ -284,7 +304,7 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, fmt.Errorf("failed to update data poster balance: %w", err) } - feeCap, tipCap, err := p.feeAndTipCaps(ctx, gasLimit, nil, nil, dataCreatedAt, 0) + feeCap, tipCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, nil, nil, dataCreatedAt, 0) if err != nil { return nil, err } @@ -342,7 +362,7 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti // The mutex must be held by the caller. func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogOfBatches uint64) error { - newFeeCap, newTipCap, err := p.feeAndTipCaps(ctx, prevTx.Data.Gas, prevTx.Data.GasFeeCap, prevTx.Data.GasTipCap, prevTx.Created, backlogOfBatches) + newFeeCap, newTipCap, err := p.feeAndTipCaps(ctx, prevTx.Data.Nonce, prevTx.Data.Gas, prevTx.Data.GasFeeCap, prevTx.Data.GasTipCap, prevTx.Created, backlogOfBatches) if err != nil { return err } @@ -459,7 +479,7 @@ func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg } else { delete(p.errorCount, nonce) } - logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.Data.GasFeeCap, "tipCap", tx.Data.GasTipCap) + logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.Data.GasFeeCap, "tipCap", tx.Data.GasTipCap, "gas", tx.Data.Gas) } const minWait = time.Second * 10 @@ -499,7 +519,7 @@ func (p *DataPoster) Start(ctxIn context.Context) { // replacing them by fee. queueContents, err := p.queue.FetchContents(ctx, unconfirmedNonce, maxTxsToRbf) if err != nil { - log.Warn("Failed to get tx queue contents", "err", err) + log.Error("Failed to get tx queue contents", "err", err) return minWait } for index, tx := range queueContents { @@ -564,10 +584,10 @@ type DataPosterConfig struct { UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` + MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` + NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` UseLevelDB bool `koanf:"use-leveldb" reload:"hot"` UseNoOpStorage bool `koanf:"use-noop-storage" reload:"hot"` - MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` - EnableLevelDB bool `koanf:"enable-leveldb" reload:"hot"` } // ConfigFetcher function type is used instead of directly passing config so @@ -582,7 +602,8 @@ func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Float64(prefix+".target-price-gwei", DefaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") f.Float64(prefix+".urgency-gwei", DefaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") f.Float64(prefix+".min-fee-cap-gwei", DefaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") - f.Float64(prefix+".min-tip-cap-gwei", DefaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") + f.Float64(prefix+".max-tip-cap-gwei", DefaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") + f.Uint64(prefix+".nonce-rbf-soft-confs", DefaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") f.Bool(prefix+".use-leveldb", DefaultDataPosterConfig.UseLevelDB, "uses leveldb when enabled") f.Bool(prefix+".use-noop-storage", DefaultDataPosterConfig.UseLevelDB, "uses noop storage, it doesn't store anything") signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f) @@ -593,12 +614,12 @@ var DefaultDataPosterConfig = DataPosterConfig{ WaitForL1Finality: true, TargetPriceGwei: 60., UrgencyGwei: 2., - MaxMempoolTransactions: 64, + MaxMempoolTransactions: 10, MinTipCapGwei: 0.05, + MaxTipCapGwei: 5, + NonceRbfSoftConfs: 2, UseLevelDB: false, UseNoOpStorage: false, - MaxTipCapGwei: 5, - EnableLevelDB: false, } var TestDataPosterConfig = DataPosterConfig{ @@ -607,10 +628,10 @@ var TestDataPosterConfig = DataPosterConfig{ WaitForL1Finality: false, TargetPriceGwei: 60., UrgencyGwei: 2., - MaxMempoolTransactions: 64, + MaxMempoolTransactions: 10, MinTipCapGwei: 0.05, + MaxTipCapGwei: 5, + NonceRbfSoftConfs: 1, UseLevelDB: false, UseNoOpStorage: false, - MaxTipCapGwei: 5, - EnableLevelDB: false, } diff --git a/util/arbmath/math.go b/util/arbmath/math.go index a9758db1c0..467ee58a14 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -175,6 +175,11 @@ func BigAddByUint(augend *big.Int, addend uint64) *big.Int { return new(big.Int).Add(augend, UintToBig(addend)) } +// BigSub subtracts a uint from a huge +func BigSubByUint(minuend *big.Int, subtrahend uint64) *big.Int { + return new(big.Int).Sub(minuend, UintToBig(subtrahend)) +} + // BigMulByFrac multiply a huge by a rational func BigMulByFrac(value *big.Int, numerator, denominator int64) *big.Int { value = new(big.Int).Set(value) From 33790dee755740c01c34f02a7f8cad126b2f973d Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Thu, 17 Aug 2023 19:03:04 -0700 Subject: [PATCH 020/117] Improve help messages for relay options --- relay/relay.go | 2 +- wsbroadcastserver/wsbroadcastserver.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/relay/relay.go b/relay/relay.go index 81d931c0ce..bb07251190 100644 --- a/relay/relay.go +++ b/relay/relay.go @@ -175,7 +175,7 @@ func ConfigAddOptions(f *flag.FlagSet) { f.Bool("pprof", ConfigDefault.PProf, "enable pprof") genericconf.PProfAddOptions("pprof-cfg", f) NodeConfigAddOptions("node", f) - f.Int("queue", ConfigDefault.Queue, "size of relay queue") + f.Int("queue", ConfigDefault.Queue, "queue for incoming messages from sequencer") } type NodeConfig struct { diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index 913eae81f3..014995cee0 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -83,7 +83,7 @@ func BroadcasterConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".port", DefaultBroadcasterConfig.Port, "port to bind the relay feed output to") f.Duration(prefix+".ping", DefaultBroadcasterConfig.Ping, "duration for ping interval") f.Duration(prefix+".client-timeout", DefaultBroadcasterConfig.ClientTimeout, "duration to wait before timing out connections to client") - f.Int(prefix+".queue", DefaultBroadcasterConfig.Queue, "queue size") + f.Int(prefix+".queue", DefaultBroadcasterConfig.Queue, "queue size for HTTP to WS upgrade") f.Int(prefix+".workers", DefaultBroadcasterConfig.Workers, "number of threads to reserve for HTTP to WS upgrade") f.Int(prefix+".max-send-queue", DefaultBroadcasterConfig.MaxSendQueue, "maximum number of messages allowed to accumulate before client is disconnected") f.Bool(prefix+".require-version", DefaultBroadcasterConfig.RequireVersion, "don't connect if client version not present") From 8b7ff56f92a2b0332f2bc50ab8132c830f5cb57e Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 17 Aug 2023 20:47:03 -0600 Subject: [PATCH 021/117] Add config option for mempool balance allocation --- arbnode/dataposter/data_poster.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index e4fe798a9c..73a0e92ce8 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -259,7 +259,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u latestBalance := p.balance balanceForTx := new(big.Int).Set(latestBalance) - if !config.UseNoOpStorage { + if config.AllocateMempoolBalance && !config.UseNoOpStorage { // We reserve half the balance for the first transaction, and then split the remaining balance for all after that. // With noop storage, we don't try to replace-by-fee, so we don't need to worry about this. balanceForTx.Div(balanceForTx, common.Big2) @@ -586,6 +586,7 @@ type DataPosterConfig struct { MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` + AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` UseLevelDB bool `koanf:"use-leveldb" reload:"hot"` UseNoOpStorage bool `koanf:"use-noop-storage" reload:"hot"` } @@ -604,6 +605,7 @@ func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Float64(prefix+".min-fee-cap-gwei", DefaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") f.Float64(prefix+".max-tip-cap-gwei", DefaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") f.Uint64(prefix+".nonce-rbf-soft-confs", DefaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") + f.Bool(prefix+".allocate-mempool-balance", DefaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") f.Bool(prefix+".use-leveldb", DefaultDataPosterConfig.UseLevelDB, "uses leveldb when enabled") f.Bool(prefix+".use-noop-storage", DefaultDataPosterConfig.UseLevelDB, "uses noop storage, it doesn't store anything") signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f) @@ -617,7 +619,8 @@ var DefaultDataPosterConfig = DataPosterConfig{ MaxMempoolTransactions: 10, MinTipCapGwei: 0.05, MaxTipCapGwei: 5, - NonceRbfSoftConfs: 2, + NonceRbfSoftConfs: 1, + AllocateMempoolBalance: true, UseLevelDB: false, UseNoOpStorage: false, } @@ -632,6 +635,7 @@ var TestDataPosterConfig = DataPosterConfig{ MinTipCapGwei: 0.05, MaxTipCapGwei: 5, NonceRbfSoftConfs: 1, + AllocateMempoolBalance: true, UseLevelDB: false, UseNoOpStorage: false, } From a5c667343a62d839baa5384141001bd3b182778f Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 18 Aug 2023 15:11:13 +0200 Subject: [PATCH 022/117] Run custom linter as go binary instead of plugin for golangci-lint, drop golangci-lint submodule --- .gitignore | 1 - .gitmodules | 3 --- .golangci.yml | 6 ------ Makefile | 4 +--- linter/golangci-lint | 1 - linter/koanf/koanf.go | 6 +++++- 6 files changed, 6 insertions(+), 15 deletions(-) delete mode 160000 linter/golangci-lint diff --git a/.gitignore b/.gitignore index 02cc86192b..f0eb5c2ec3 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,3 @@ yarn-error.log local/ testdata system_tests/test-data/* -linter/koanf/koanf.so diff --git a/.gitmodules b/.gitmodules index 11d9caa2aa..7c78791c78 100644 --- a/.gitmodules +++ b/.gitmodules @@ -20,6 +20,3 @@ [submodule "nitro-testnode"] path = nitro-testnode url = https://github.com/OffchainLabs/nitro-testnode.git -[submodule "linter/golangci-lint"] - path = linter/golangci-lint - url = https://github.com/golangci/golangci-lint diff --git a/.golangci.yml b/.golangci.yml index 47f471b5a1..e794cdb844 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,7 +23,6 @@ linters: - nilerr # ensure errors aren't mishandled - staticcheck # check for suspicious constructs - unused # check for unused constructs - - koanf # check for koanf configurations linters-settings: errcheck: @@ -52,8 +51,3 @@ linters-settings: disable: - shadow - fieldalignment - - custom: - koanf: - path: linter/koanf/koanf.so - description: Koanf configuration linter diff --git a/Makefile b/Makefile index 71c051087a..896bdd6a6e 100644 --- a/Makefile +++ b/Makefile @@ -304,9 +304,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make - make build -C linter/golangci-lint - go build -buildmode=plugin -o linter/koanf/koanf.so linter/koanf/koanf.go - ./linter/golangci-lint/golangci-lint run --fix + go run linter/koanf/koanf.go ./... yarn --cwd contracts solhint @touch $@ diff --git a/linter/golangci-lint b/linter/golangci-lint deleted file mode 160000 index 8000abaf0e..0000000000 --- a/linter/golangci-lint +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 8000abaf0e6e28e8179864f0317349cecab47c05 diff --git a/linter/koanf/koanf.go b/linter/koanf/koanf.go index 4aaddaa495..bc94a9c20e 100644 --- a/linter/koanf/koanf.go +++ b/linter/koanf/koanf.go @@ -9,8 +9,8 @@ import ( "unicode" "github.com/fatih/structtag" - "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/singlechecker" ) func New(conf any) ([]*analysis.Analyzer, error) { @@ -114,3 +114,7 @@ func normalize(s string) string { func isAlphanumeric(c rune) bool { return unicode.IsLetter(c) || unicode.IsDigit(c) } + +func main() { + singlechecker.Main(Analyzer) +} From ed4942d38adc5afe1918740d35c3e67d1204f372 Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 18 Aug 2023 16:02:53 +0200 Subject: [PATCH 023/117] Rename testdata to testsdata because testdata is already in .gitignore --- linter/koanf/koanf_test.go | 2 +- linter/testsdata/src/a/a.go | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 linter/testsdata/src/a/a.go diff --git a/linter/koanf/koanf_test.go b/linter/koanf/koanf_test.go index 2e3e68b0f4..5582e61605 100644 --- a/linter/koanf/koanf_test.go +++ b/linter/koanf/koanf_test.go @@ -13,7 +13,7 @@ func TestAll(t *testing.T) { if err != nil { t.Fatalf("Failed to get wd: %s", err) } - testdata := filepath.Join(filepath.Dir(wd), "testdata") + testdata := filepath.Join(filepath.Dir(wd), "testsdata") res := analysistest.Run(t, testdata, analyzerForTests, "a") if cnt := countErrors(res); cnt != 1 { t.Errorf("analysistest.Run() got %v errors, expected 1", cnt) diff --git a/linter/testsdata/src/a/a.go b/linter/testsdata/src/a/a.go new file mode 100644 index 0000000000..ddf77b6ed1 --- /dev/null +++ b/linter/testsdata/src/a/a.go @@ -0,0 +1,11 @@ +package a + +type Config struct { + L2 int `koanf:"chain"` + LogLevel int `koanf:"log-level"` + LogType int `koanf:"log-type"` + Metrics int `koanf:"metrics"` + PProf int `koanf:"pprof"` + Node int `koanf:"node"` + Queue int `koanf:"queue"` +} From 89aa1131d3b04d85714d345aa5a326d47e021667 Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 18 Aug 2023 17:01:00 +0200 Subject: [PATCH 024/117] Introduce linter for detecting pointer comparisons --- .gitignore | 1 - Makefile | 1 + go.mod | 3 +- go.sum | 2 + linter/pointercheck/pointer.go | 99 +++++++++++++++++++ linter/pointercheck/pointer_test.go | 31 ++++++ .../testdata/src/pointercheck/pointercheck.go | 34 +++++++ 7 files changed, 169 insertions(+), 2 deletions(-) create mode 100644 linter/pointercheck/pointer.go create mode 100644 linter/pointercheck/pointer_test.go create mode 100644 linter/testdata/src/pointercheck/pointercheck.go diff --git a/.gitignore b/.gitignore index f0eb5c2ec3..60df842f0e 100644 --- a/.gitignore +++ b/.gitignore @@ -19,5 +19,4 @@ solgen/go/ target/ yarn-error.log local/ -testdata system_tests/test-data/* diff --git a/Makefile b/Makefile index 205025dfe9..d180a8cfff 100644 --- a/Makefile +++ b/Makefile @@ -304,6 +304,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make + go run linter/pointercheck/pointer.go ./... golangci-lint run --fix yarn --cwd contracts solhint @touch $@ diff --git a/go.mod b/go.mod index 5adfd19388..b0e0b09d08 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 github.com/ethereum/go-ethereum v1.10.26 + github.com/fatih/structtag v1.2.0 github.com/google/go-cmp v0.5.9 github.com/hashicorp/golang-lru/v2 v2.0.1 github.com/ipfs/go-cid v0.3.2 @@ -32,6 +33,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/wealdtech/go-merkletree v1.0.0 golang.org/x/term v0.6.0 + golang.org/x/tools v0.7.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -257,7 +259,6 @@ require ( go4.org v0.0.0-20200411211856-f5505b9728dd // indirect golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect golang.org/x/mod v0.9.0 // indirect - golang.org/x/tools v0.7.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect google.golang.org/grpc v1.46.0 // indirect diff --git a/go.sum b/go.sum index 58155db124..e24fe60926 100644 --- a/go.sum +++ b/go.sum @@ -326,6 +326,8 @@ github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:Jp github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= diff --git a/linter/pointercheck/pointer.go b/linter/pointercheck/pointer.go new file mode 100644 index 0000000000..2054921029 --- /dev/null +++ b/linter/pointercheck/pointer.go @@ -0,0 +1,99 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/singlechecker" +) + +func New(conf any) ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{Analyzer}, nil +} + +var Analyzer = &analysis.Analyzer{ + Name: "pointercheck", + Doc: "check for pointer comparison", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "testpointercheck", + Doc: "check for pointer comparison (for tests)", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +// pointerCmpError indicates the position of pointer comparison. +type pointerCmpError struct { + Pos token.Position + Message string +} + +// Result is returned from the checkStruct function, and holds all the +// configuration errors. +type Result struct { + Errors []pointerCmpError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ret Result + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + var res *Result + switch e := node.(type) { + case *ast.BinaryExpr: + res = checkExpr(pass, e) + default: + } + if res == nil { + return true + } + for _, err := range res.Errors { + ret.Errors = append(ret.Errors, err) + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), + Message: err.Message, + Category: "pointercheck", + }) + } + } + return true + }, + ) + } + return ret, nil +} + +func checkExpr(pass *analysis.Pass, e *ast.BinaryExpr) *Result { + if e.Op != token.EQL && e.Op != token.NEQ { + return nil + } + ret := &Result{} + if ptrIdent(pass, e.X) && ptrIdent(pass, e.Y) { + ret.Errors = append(ret.Errors, pointerCmpError{ + Pos: pass.Fset.Position(e.OpPos), + Message: fmt.Sprintf("comparison of two pointers in expression %q", e), + }) + } + return ret +} + +func ptrIdent(pass *analysis.Pass, e ast.Expr) bool { + if _, ok := e.(*ast.Ident); ok { + et := pass.TypesInfo.Types[e].Type + _, isPtr := (et).(*types.Pointer) + return isPtr + } + return false +} + +func main() { + singlechecker.Main(Analyzer) +} diff --git a/linter/pointercheck/pointer_test.go b/linter/pointercheck/pointer_test.go new file mode 100644 index 0000000000..6ed74a9685 --- /dev/null +++ b/linter/pointercheck/pointer_test.go @@ -0,0 +1,31 @@ +package main + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +func TestAll(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get wd: %s", err) + } + testdata := filepath.Join(filepath.Dir(wd), "testdata") + res := analysistest.Run(t, testdata, analyzerForTests, "pointercheck") + if cnt := countErrors(res); cnt != 5 { + t.Errorf("analysistest.Run() got %v errors, expected 5", cnt) + } +} + +func countErrors(errs []*analysistest.Result) int { + cnt := 0 + for _, e := range errs { + if r, ok := e.Result.(Result); ok { + cnt += len(r.Errors) + } + } + return cnt +} diff --git a/linter/testdata/src/pointercheck/pointercheck.go b/linter/testdata/src/pointercheck/pointercheck.go new file mode 100644 index 0000000000..55ebd18293 --- /dev/null +++ b/linter/testdata/src/pointercheck/pointercheck.go @@ -0,0 +1,34 @@ +package pointercheck + +import "fmt" + +type A struct { + x, y int +} + +// pointerCmp compares pointers, sometimes inside +func pointerCmp() { + a, b := &A{}, &A{} + // Simple comparions. + if a != b { + fmt.Println("Not Equal") + } + if a == b { + fmt.Println("Equals") + } + // Nested binary expressions. + if (2 > 1) && (a != b) { + fmt.Println("Still not equal") + } + if (174%15 > 3) && (2 > 1 && (1+2 > 2 || a != b)) { + fmt.Println("Who knows at this point") + } + // Nested and inside unary operator. + if 10 > 5 && !(2 > 1 || a == b) { + fmt.Println("Not equal") + } + c, d := 1, 2 + if &c != &d { + fmt.Println("Not equal") + } +} From c93e79a45f37bc729e2d83a70991f4078964b0f4 Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 18 Aug 2023 17:18:21 +0200 Subject: [PATCH 025/117] rename 'testsdata' to 'testdata' drop it from .gitignore --- .gitignore | 1 - linter/koanf/koanf_test.go | 2 +- linter/{testsdata => testdata}/src/a/a.go | 0 3 files changed, 1 insertion(+), 2 deletions(-) rename linter/{testsdata => testdata}/src/a/a.go (100%) diff --git a/.gitignore b/.gitignore index f0eb5c2ec3..60df842f0e 100644 --- a/.gitignore +++ b/.gitignore @@ -19,5 +19,4 @@ solgen/go/ target/ yarn-error.log local/ -testdata system_tests/test-data/* diff --git a/linter/koanf/koanf_test.go b/linter/koanf/koanf_test.go index 5582e61605..2e3e68b0f4 100644 --- a/linter/koanf/koanf_test.go +++ b/linter/koanf/koanf_test.go @@ -13,7 +13,7 @@ func TestAll(t *testing.T) { if err != nil { t.Fatalf("Failed to get wd: %s", err) } - testdata := filepath.Join(filepath.Dir(wd), "testsdata") + testdata := filepath.Join(filepath.Dir(wd), "testdata") res := analysistest.Run(t, testdata, analyzerForTests, "a") if cnt := countErrors(res); cnt != 1 { t.Errorf("analysistest.Run() got %v errors, expected 1", cnt) diff --git a/linter/testsdata/src/a/a.go b/linter/testdata/src/a/a.go similarity index 100% rename from linter/testsdata/src/a/a.go rename to linter/testdata/src/a/a.go From 00dfaed58a2037cdc5c15275eaebe8d73305c71a Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 18 Aug 2023 17:40:02 +0200 Subject: [PATCH 026/117] Drop pointer comparisons --- arbnode/dataposter/data_poster.go | 2 +- arbnode/execution/executionengine.go | 2 +- arbnode/execution/tx_pre_checker.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 1e7b74834e..713eca6dd5 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -321,7 +321,7 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr } func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransaction, newTx *storage.QueuedTransaction) error { - if prevTx != newTx { + if prevTx == nil || (newTx != nil && newTx.FullTx.Hash() != prevTx.FullTx.Hash()) { if err := p.saveTx(ctx, prevTx, newTx); err != nil { return err } diff --git a/arbnode/execution/executionengine.go b/arbnode/execution/executionengine.go index d8029650d7..71610b308c 100644 --- a/arbnode/execution/executionengine.go +++ b/arbnode/execution/executionengine.go @@ -599,7 +599,7 @@ func (s *ExecutionEngine) Start(ctx_in context.Context) { s.latestBlockMutex.Lock() block := s.latestBlock s.latestBlockMutex.Unlock() - if block != lastBlock && block != nil { + if block != nil && (lastBlock == nil || block.TxHash() != lastBlock.TxHash()) { log.Info( "created block", "l2Block", block.Number(), diff --git a/arbnode/execution/tx_pre_checker.go b/arbnode/execution/tx_pre_checker.go index 01cef6d7a4..4a0645e97b 100644 --- a/arbnode/execution/tx_pre_checker.go +++ b/arbnode/execution/tx_pre_checker.go @@ -170,7 +170,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty oldHeader = previousHeader blocksTraversed++ } - if oldHeader != header { + if oldHeader == nil || (header != nil && oldHeader.Hash() != header.Hash()) { secondOldStatedb, err := bc.StateAt(oldHeader.Root) if err != nil { return fmt.Errorf("failed to get old state: %w", err) From 810887cd35aa10a590def3db03945a3ea308debe Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 18 Aug 2023 11:56:03 -0500 Subject: [PATCH 027/117] add sequencer coordinator management UI tool --- Dockerfile | 1 + Makefile | 5 +- .../rediscoordinator/redis_coordinator.go | 77 ++++++ .../seq-coordinator-manager.go | 249 ++++++++++++++++++ go.mod | 8 +- go.sum | 26 ++ 6 files changed, 364 insertions(+), 2 deletions(-) create mode 100644 cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go create mode 100644 cmd/seq-coordinator-manager/seq-coordinator-manager.go diff --git a/Dockerfile b/Dockerfile index 367d76d4b1..c1a28760c4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -202,6 +202,7 @@ WORKDIR /home/user COPY --from=node-builder /workspace/target/bin/nitro /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/relay /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/nitro-val /usr/local/bin/ +COPY --from=node-builder /workspace/target/bin/seq-coordinator-manager /usr/local/bin/ COPY --from=machine-versions /workspace/machines /home/user/target/machines USER root RUN export DEBIAN_FRONTEND=noninteractive && \ diff --git a/Makefile b/Makefile index 205025dfe9..1358f961e9 100644 --- a/Makefile +++ b/Makefile @@ -88,7 +88,7 @@ push: lint test-go .make/fmt all: build build-replay-env test-gen-proofs @touch .make/all -build: $(patsubst %,$(output_root)/bin/%, nitro deploy relay daserver datool seq-coordinator-invalidate nitro-val) +build: $(patsubst %,$(output_root)/bin/%, nitro deploy relay daserver datool seq-coordinator-invalidate nitro-val seq-coordinator-manager) @printf $(done) build-node-deps: $(go_source) build-prover-header build-prover-lib build-jit .make/solgen .make/cbrotli-lib @@ -185,6 +185,9 @@ $(output_root)/bin/seq-coordinator-invalidate: $(DEP_PREDICATE) build-node-deps $(output_root)/bin/nitro-val: $(DEP_PREDICATE) build-node-deps go build $(GOLANG_PARAMS) -o $@ "$(CURDIR)/cmd/nitro-val" +$(output_root)/bin/seq-coordinator-manager: $(DEP_PREDICATE) build-node-deps + go build $(GOLANG_PARAMS) -o $@ "$(CURDIR)/cmd/seq-coordinator-manager" + # recompile wasm, but don't change timestamp unless files differ $(replay_wasm): $(DEP_PREDICATE) $(go_source) .make/solgen mkdir -p `dirname $(replay_wasm)` diff --git a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go new file mode 100644 index 0000000000..db3724240e --- /dev/null +++ b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go @@ -0,0 +1,77 @@ +package rediscoordinator + +import ( + "context" + "errors" + "strings" + + "github.com/go-redis/redis/v8" + "github.com/offchainlabs/nitro/util/redisutil" +) + +type RedisCoordinator struct { + Client redis.UniversalClient +} + +func NewRedisCoordinator(redisURL string) (*RedisCoordinator, error) { + redisClient, err := redisutil.RedisClientFromURL(redisURL) + if err != nil { + return nil, err + } + + return &RedisCoordinator{ + Client: redisClient, + }, nil +} + +func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, map[string]int, error) { + prioritiesMap := make(map[string]int) + prioritiesString, err := rc.Client.Get(ctx, redisutil.PRIORITIES_KEY).Result() + if err != nil { + if errors.Is(err, redis.Nil) { + err = errors.New("sequencer priorities unset") + } + return []string{}, prioritiesMap, err + } + priorities := strings.Split(prioritiesString, ",") + for _, url := range priorities { + prioritiesMap[url]++ + } + return priorities, prioritiesMap, nil +} + +func (rc *RedisCoordinator) GetLivelinessMap(ctx context.Context) (map[string]int, error) { + livelinessMap := make(map[string]int) + livelinessList, _, err := rc.Client.Scan(ctx, 0, redisutil.WANTS_LOCKOUT_KEY_PREFIX+"*", 0).Result() + if err != nil { + return livelinessMap, err + } + for _, elem := range livelinessList { + url := strings.TrimPrefix(elem, redisutil.WANTS_LOCKOUT_KEY_PREFIX) + livelinessMap[url]++ + } + return livelinessMap, nil +} + +func (rc *RedisCoordinator) UpdatePriorities(ctx context.Context, priorities []string) error { + prioritiesString := strings.Join(priorities, ",") + err := rc.Client.Set(ctx, redisutil.PRIORITIES_KEY, prioritiesString, 0).Err() + if err != nil { + if errors.Is(err, redis.Nil) { + err = errors.New("sequencer priorities unset") + } + } + return err +} + +// CurrentChosenSequencer retrieves the current chosen sequencer holding the lock +func (c *RedisCoordinator) CurrentChosenSequencer(ctx context.Context) (string, error) { + current, err := c.Client.Get(ctx, redisutil.CHOSENSEQ_KEY).Result() + if errors.Is(err, redis.Nil) { + return "", nil + } + if err != nil { + return "", err + } + return current, nil +} diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go new file mode 100644 index 0000000000..5844e2e7e4 --- /dev/null +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -0,0 +1,249 @@ +package main + +import ( + "context" + "fmt" + "os" + "strconv" + + "github.com/enescakir/emoji" + "github.com/ethereum/go-ethereum/log" + "github.com/gdamore/tcell/v2" + "github.com/offchainlabs/nitro/cmd/seq-coordinator-manager/rediscoordinator" + "github.com/rivo/tview" +) + +// Tview +var pages = tview.NewPages() +var app = tview.NewApplication() + +// Lists +var prioritySeqList = tview.NewList().ShowSecondaryText(false) +var nonPrioritySeqList = tview.NewList().ShowSecondaryText(false) + +// Forms +var addSeqForm = tview.NewForm() +var priorityForm = tview.NewForm() +var nonPriorityForm = tview.NewForm() + +// Sequencer coordinator managment UI data store +type manager struct { + redisCoordinator *rediscoordinator.RedisCoordinator + prioritiesMap map[string]int + livelinessMap map[string]int + priorityList []string + nonPriorityList []string +} + +func main() { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + args := os.Args[1:] + if len(args) != 1 { + fmt.Fprintf(os.Stderr, "Usage: redis-seq-manager [redis-url]\n") + os.Exit(1) + } + redisURL := args[0] + redisCoordinator, err := rediscoordinator.NewRedisCoordinator(redisURL) + if err != nil { + panic(err) + } + + seqManager := &manager{ + redisCoordinator: redisCoordinator, + prioritiesMap: make(map[string]int), + livelinessMap: make(map[string]int), + } + + seqManager.refreshAllLists(ctx) + seqManager.populateLists(ctx) + + prioritySeqList.SetSelectedFunc(func(index int, name string, second_name string, shortcut rune) { + nonPriorityForm.Clear(true) + + n := len(seqManager.priorityList) + priorities := make([]string, n) + for i := 0; i < n; i++ { + priorities[i] = strconv.Itoa(i) + } + + target := index + priorityForm.Clear(true) + priorityForm.AddTextView("Additional details:", "Status:\nBlockNumber:", 0, 2, false, true) + priorityForm.AddDropDown("Change priority to ->", priorities, index, func(priority string, selection int) { + target = selection + }) + priorityForm.AddButton("Save", func() { + if target != index { + seqManager.updatePriorityList(ctx, index, target) + } + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + }) + }) + + nonPrioritySeqList.SetSelectedFunc(func(index int, name string, second_name string, shortcut rune) { + priorityForm.Clear(true) + + n := len(seqManager.priorityList) + priorities := make([]string, n+1) + for i := 0; i < n+1; i++ { + priorities[i] = strconv.Itoa(i) + } + + target := index + nonPriorityForm.Clear(true) + nonPriorityForm.AddTextView("Additional details:", "Status:\nBlockNumber:", 0, 2, false, true) + nonPriorityForm.AddDropDown("Set priority to ->", priorities, index, func(priority string, selection int) { + target = selection + }) + nonPriorityForm.AddButton("Save", func() { + seqManager.priorityList = append(seqManager.priorityList, seqManager.nonPriorityList[index]) + index = len(seqManager.priorityList) - 1 + seqManager.updatePriorityList(ctx, index, target) + nonPriorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + }) + }) + + // UI design + flex := tview.NewFlex() + priorityHeading := tview.NewTextView(). + SetTextColor(tcell.ColorYellow). + SetText("-----Priority List-----") + nonPriorityHeading := tview.NewTextView(). + SetTextColor(tcell.ColorYellow). + SetText("-----Not in priority list but online-----") + instructions := tview.NewTextView(). + SetTextColor(tcell.ColorYellow). + SetText("(r) to refresh \n(a) to add sequencer\n(q) to quit") + + flex.SetDirection(tview.FlexRow). + AddItem(priorityHeading, 0, 1, false). + AddItem(tview.NewFlex(). + AddItem(prioritySeqList, 0, 2, true). + AddItem(priorityForm, 0, 3, false), 0, 12, false). + AddItem(nonPriorityHeading, 0, 1, false). + AddItem(tview.NewFlex(). + AddItem(nonPrioritySeqList, 0, 2, true). + AddItem(nonPriorityForm, 0, 3, false), 0, 12, false). + AddItem(instructions, 0, 2, false).SetBorder(true) + + flex.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Rune() == 114 { + seqManager.refreshAllLists(ctx) + priorityForm.Clear(true) + nonPriorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + } else if event.Rune() == 97 { + addSeqForm.Clear(true) + seqManager.addSeqPriorityForm(ctx) + pages.SwitchToPage("Add Sequencer") + } else if event.Rune() == 113 { + app.Stop() + } + return event + }) + + pages.AddPage("Menu", flex, true, true) + pages.AddPage("Add Sequencer", addSeqForm, true, false) + + if err := app.SetRoot(pages, true).EnableMouse(true).Run(); err != nil { + panic(err) + } +} + +// updatePriorityList updates the list by changing the position of seq present at `index` to target +func (sm *manager) updatePriorityList(ctx context.Context, index int, target int) { + for i := index - 1; i >= target; i-- { + sm.priorityList[i], sm.priorityList[i+1] = sm.priorityList[i+1], sm.priorityList[i] + } + for i := index + 1; i <= target; i++ { + sm.priorityList[i], sm.priorityList[i-1] = sm.priorityList[i-1], sm.priorityList[i] + } + err := sm.redisCoordinator.UpdatePriorities(ctx, sm.priorityList) + if err != nil { + log.Warn("Failed to update priority, reverting change", "sequencer", sm.priorityList[target], "err", err) + } + sm.refreshAllLists(ctx) +} + +// populateLists populates seq's in priority list and seq's that are online but not in priority +func (sm *manager) populateLists(ctx context.Context) { + prioritySeqList.Clear() + chosen, err := sm.redisCoordinator.CurrentChosenSequencer(ctx) + if err != nil { + panic(err) + } + for index, seqURL := range sm.priorityList { + sec := "" + if seqURL == chosen { + sec = fmt.Sprintf(" %vchosen", emoji.LeftArrow) + } + status := fmt.Sprintf("%v ", emoji.RedCircle) + if _, ok := sm.livelinessMap[seqURL]; ok { + status = fmt.Sprintf("%v ", emoji.GreenCircle) + } + prioritySeqList.AddItem(status+seqURL+sec, "", rune(48+index), nil).SetSecondaryTextColor(tcell.ColorPurple) + } + + nonPrioritySeqList.Clear() + status := fmt.Sprintf("%v ", emoji.GreenCircle) + for _, seqURL := range sm.nonPriorityList { + nonPrioritySeqList.AddItem(status+seqURL, "", rune(45), nil) + } +} + +// addSeqPriorityForm returns a form with fields to add a new sequencer to priority list +func (sm *manager) addSeqPriorityForm(ctx context.Context) *tview.Form { + URL := "" + addSeqForm.AddInputField("Sequencer URL", "", 0, nil, func(url string) { + URL = url + }) + addSeqForm.AddButton("Cancel", func() { + priorityForm.Clear(true) + sm.populateLists(ctx) + pages.SwitchToPage("Menu") + }) + addSeqForm.AddButton("Add", func() { + // check if url is valid, i.e it doesnt already exist in the priority list + if _, ok := sm.prioritiesMap[URL]; !ok && URL != "" { + sm.priorityList = append(sm.priorityList, URL) + err := sm.redisCoordinator.UpdatePriorities(ctx, sm.priorityList) + if err != nil { + log.Warn("Failed to add sequencer to the priority list", URL) + } + sm.refreshAllLists(ctx) + } + sm.populateLists(ctx) + pages.SwitchToPage("Menu") + }) + return addSeqForm +} + +// refreshAllLists gets the current status of all the lists displayed in the UI +func (sm *manager) refreshAllLists(ctx context.Context) { + sequencerURLList, mapping, err := sm.redisCoordinator.GetPriorities(ctx) + if err != nil { + panic(err) + } + sm.priorityList = sequencerURLList + sm.prioritiesMap = mapping + + mapping, err = sm.redisCoordinator.GetLivelinessMap(ctx) + if err != nil { + panic(err) + } + sm.livelinessMap = mapping + + urlList := []string{} + for url := range sm.livelinessMap { + if _, ok := sm.prioritiesMap[url]; !ok { + urlList = append(urlList, url) + } + } + sm.nonPriorityList = urlList +} diff --git a/go.mod b/go.mod index 5adfd19388..e8fa503196 100644 --- a/go.mod +++ b/go.mod @@ -86,11 +86,14 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect + github.com/enescakir/emoji v1.0.0 // indirect github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gammazero/deque v0.2.1 // indirect + github.com/gdamore/encoding v1.0.0 // indirect + github.com/gdamore/tcell/v2 v2.6.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -188,6 +191,7 @@ require ( github.com/libp2p/go-reuseport v0.2.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.0 // indirect github.com/libp2p/zeroconf/v2 v2.2.0 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/dns v1.1.50 // indirect @@ -224,6 +228,8 @@ require ( github.com/quic-go/webtransport-go v0.5.2 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rhnvrm/simples3 v0.6.1 // indirect + github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 // indirect + github.com/rivo/uniseg v0.4.3 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/samber/lo v1.36.0 // indirect @@ -298,7 +304,7 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mitchellh/mapstructure v1.4.2 github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect diff --git a/go.sum b/go.sum index 58155db124..5f03dee5b3 100644 --- a/go.sum +++ b/go.sum @@ -310,6 +310,8 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/ github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog= +github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -348,6 +350,10 @@ github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZ github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell/v2 v2.6.0 h1:OKbluoP9VYmJwZwq/iLb4BxwKcwGthaa1YNBJIyCySg= +github.com/gdamore/tcell/v2 v2.6.0/go.mod h1:be9omFATkdr0D9qewWW3d+MEvl5dha+Etb5y65J2H8Y= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= @@ -1140,6 +1146,8 @@ github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1174,6 +1182,8 @@ github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= @@ -1443,6 +1453,11 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rhnvrm/simples3 v0.6.1 h1:H0DJwybR6ryQE+Odi9eqkHuzjYAeJgtGcGtuBwOhsH8= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 h1:ZyM/+FYnpbZsFWuCohniM56kRoHRB4r5EuIzXEYkpxo= +github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703/go.mod h1:nVwGv4MP47T0jvlk7KuTTjjuSmrGO4JF0iaiNt4bufE= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= +github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1631,6 +1646,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1774,6 +1790,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1835,6 +1852,7 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1857,6 +1875,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1948,13 +1967,18 @@ golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1966,6 +1990,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2035,6 +2060,7 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 9979eb1083cde85bbcc5ff24bee50c2bf6273f94 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 18 Aug 2023 11:35:42 -0600 Subject: [PATCH 028/117] Fix removed config option --- arbnode/dataposter/data_poster.go | 1 + 1 file changed, 1 insertion(+) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 73a0e92ce8..979cbe3b7e 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -603,6 +603,7 @@ func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Float64(prefix+".target-price-gwei", DefaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") f.Float64(prefix+".urgency-gwei", DefaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") f.Float64(prefix+".min-fee-cap-gwei", DefaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") + f.Float64(prefix+".min-tip-cap-gwei", DefaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") f.Float64(prefix+".max-tip-cap-gwei", DefaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") f.Uint64(prefix+".nonce-rbf-soft-confs", DefaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") f.Bool(prefix+".allocate-mempool-balance", DefaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") From ef30f3187e85ba9d80b63a7a06c9e68ef4c30539 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 21 Aug 2023 10:25:56 -0500 Subject: [PATCH 029/117] add keyboard-only support with additional functionalities --- .../seq-coordinator-manager.go | 93 +++++++++++++++---- 1 file changed, 75 insertions(+), 18 deletions(-) diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go index 5844e2e7e4..f39a810e9b 100644 --- a/cmd/seq-coordinator-manager/seq-coordinator-manager.go +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -70,17 +70,37 @@ func main() { target := index priorityForm.Clear(true) - priorityForm.AddTextView("Additional details:", "Status:\nBlockNumber:", 0, 2, false, true) + priorityForm.AddTextView("Additional details:", "Status:\nBlockNumber:\nStatus:\nBlockNumber:", 0, 2, false, true) priorityForm.AddDropDown("Change priority to ->", priorities, index, func(priority string, selection int) { target = selection }) - priorityForm.AddButton("Save", func() { + priorityForm.AddButton("Update", func() { if target != index { seqManager.updatePriorityList(ctx, index, target) } + priorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + }) + priorityForm.AddButton("Cancel", func() { + priorityForm.Clear(true) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + }) + priorityForm.AddButton("Remove", func() { + url := seqManager.priorityList[0] + delete(seqManager.prioritiesMap, url) + seqManager.updatePriorityList(ctx, index, 0) + seqManager.priorityList = seqManager.priorityList[1:] + + priorityForm.Clear(true) seqManager.populateLists(ctx) pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) }) + priorityForm.SetFocus(1) + app.SetFocus(priorityForm) }) nonPrioritySeqList.SetSelectedFunc(func(index int, name string, second_name string, shortcut rune) { @@ -98,14 +118,30 @@ func main() { nonPriorityForm.AddDropDown("Set priority to ->", priorities, index, func(priority string, selection int) { target = selection }) - nonPriorityForm.AddButton("Save", func() { - seqManager.priorityList = append(seqManager.priorityList, seqManager.nonPriorityList[index]) + nonPriorityForm.AddButton("Update", func() { + key := seqManager.nonPriorityList[index] + seqManager.priorityList = append(seqManager.priorityList, key) + seqManager.prioritiesMap[key]++ + index = len(seqManager.priorityList) - 1 seqManager.updatePriorityList(ctx, index, target) + nonPriorityForm.Clear(true) seqManager.populateLists(ctx) pages.SwitchToPage("Menu") + if len(seqManager.nonPriorityList) > 0 { + app.SetFocus(nonPrioritySeqList) + } else { + app.SetFocus(prioritySeqList) + } + }) + nonPriorityForm.AddButton("Cancel", func() { + nonPriorityForm.Clear(true) + pages.SwitchToPage("Menu") + app.SetFocus(nonPrioritySeqList) }) + nonPriorityForm.SetFocus(1) + app.SetFocus(nonPriorityForm) }) // UI design @@ -118,18 +154,18 @@ func main() { SetText("-----Not in priority list but online-----") instructions := tview.NewTextView(). SetTextColor(tcell.ColorYellow). - SetText("(r) to refresh \n(a) to add sequencer\n(q) to quit") + SetText("(r) to refresh\n(s) to save all changes\n(c) to switch between lists\n(a) to add sequencer\n(q) to quit\n(tab) to navigate") flex.SetDirection(tview.FlexRow). AddItem(priorityHeading, 0, 1, false). AddItem(tview.NewFlex(). AddItem(prioritySeqList, 0, 2, true). - AddItem(priorityForm, 0, 3, false), 0, 12, false). + AddItem(priorityForm, 0, 3, true), 0, 12, true). AddItem(nonPriorityHeading, 0, 1, false). AddItem(tview.NewFlex(). AddItem(nonPrioritySeqList, 0, 2, true). - AddItem(nonPriorityForm, 0, 3, false), 0, 12, false). - AddItem(instructions, 0, 2, false).SetBorder(true) + AddItem(nonPriorityForm, 0, 3, true), 0, 12, true). + AddItem(instructions, 0, 3, false).SetBorder(true) flex.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { if event.Rune() == 114 { @@ -138,10 +174,24 @@ func main() { nonPriorityForm.Clear(true) seqManager.populateLists(ctx) pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + } else if event.Rune() == 115 { + seqManager.pushUpdates(ctx) + priorityForm.Clear(true) + nonPriorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) } else if event.Rune() == 97 { addSeqForm.Clear(true) seqManager.addSeqPriorityForm(ctx) pages.SwitchToPage("Add Sequencer") + } else if event.Rune() == 99 { + if prioritySeqList.HasFocus() { + app.SetFocus(nonPrioritySeqList) + } else { + app.SetFocus(prioritySeqList) + } } else if event.Rune() == 113 { app.Stop() } @@ -164,11 +214,14 @@ func (sm *manager) updatePriorityList(ctx context.Context, index int, target int for i := index + 1; i <= target; i++ { sm.priorityList[i], sm.priorityList[i-1] = sm.priorityList[i-1], sm.priorityList[i] } - err := sm.redisCoordinator.UpdatePriorities(ctx, sm.priorityList) - if err != nil { - log.Warn("Failed to update priority, reverting change", "sequencer", sm.priorityList[target], "err", err) + + urlList := []string{} + for url := range sm.livelinessMap { + if _, ok := sm.prioritiesMap[url]; !ok { + urlList = append(urlList, url) + } } - sm.refreshAllLists(ctx) + sm.nonPriorityList = urlList } // populateLists populates seq's in priority list and seq's that are online but not in priority @@ -187,7 +240,7 @@ func (sm *manager) populateLists(ctx context.Context) { if _, ok := sm.livelinessMap[seqURL]; ok { status = fmt.Sprintf("%v ", emoji.GreenCircle) } - prioritySeqList.AddItem(status+seqURL+sec, "", rune(48+index), nil).SetSecondaryTextColor(tcell.ColorPurple) + prioritySeqList.AddItem(status+seqURL+sec, "", int32(48+index), nil).SetSecondaryTextColor(tcell.ColorPurple) } nonPrioritySeqList.Clear() @@ -212,11 +265,6 @@ func (sm *manager) addSeqPriorityForm(ctx context.Context) *tview.Form { // check if url is valid, i.e it doesnt already exist in the priority list if _, ok := sm.prioritiesMap[URL]; !ok && URL != "" { sm.priorityList = append(sm.priorityList, URL) - err := sm.redisCoordinator.UpdatePriorities(ctx, sm.priorityList) - if err != nil { - log.Warn("Failed to add sequencer to the priority list", URL) - } - sm.refreshAllLists(ctx) } sm.populateLists(ctx) pages.SwitchToPage("Menu") @@ -224,6 +272,15 @@ func (sm *manager) addSeqPriorityForm(ctx context.Context) *tview.Form { return addSeqForm } +// pushUpdates pushes the local changes to the redis server +func (sm *manager) pushUpdates(ctx context.Context) { + err := sm.redisCoordinator.UpdatePriorities(ctx, sm.priorityList) + if err != nil { + log.Warn("Failed to push local changes to the priority list") + } + sm.refreshAllLists(ctx) +} + // refreshAllLists gets the current status of all the lists displayed in the UI func (sm *manager) refreshAllLists(ctx context.Context) { sequencerURLList, mapping, err := sm.redisCoordinator.GetPriorities(ctx) From 916b5cec36ba77c1cf67d61926c2d96d0374cd69 Mon Sep 17 00:00:00 2001 From: Nodar Date: Mon, 21 Aug 2023 17:54:48 +0200 Subject: [PATCH 030/117] Fix flag initializations in sample_hmac.go, init.go and data_poster.go --- arbnode/dataposter/data_poster.go | 2 +- cmd/nitro/init.go | 2 +- util/signature/simple_hmac.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 979cbe3b7e..dfcb1f4ba1 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -608,7 +608,7 @@ func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Uint64(prefix+".nonce-rbf-soft-confs", DefaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") f.Bool(prefix+".allocate-mempool-balance", DefaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") f.Bool(prefix+".use-leveldb", DefaultDataPosterConfig.UseLevelDB, "uses leveldb when enabled") - f.Bool(prefix+".use-noop-storage", DefaultDataPosterConfig.UseLevelDB, "uses noop storage, it doesn't store anything") + f.Bool(prefix+".use-noop-storage", DefaultDataPosterConfig.UseNoOpStorage, "uses noop storage, it doesn't store anything") signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f) } diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 6480526897..bdba7c1210 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -86,7 +86,7 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".dev-init", InitConfigDefault.DevInit, "init with dev data (1 account with balance) instead of file import") f.String(prefix+".dev-init-address", InitConfigDefault.DevInitAddress, "Address of dev-account. Leave empty to use the dev-wallet.") f.Uint64(prefix+".dev-init-blocknum", InitConfigDefault.DevInitBlockNum, "Number of preinit blocks. Must exist in ancient database.") - f.Bool(prefix+".empty", InitConfigDefault.DevInit, "init with empty state") + f.Bool(prefix+".empty", InitConfigDefault.Empty, "init with empty state") f.Bool(prefix+".then-quit", InitConfigDefault.ThenQuit, "quit after init is done") f.String(prefix+".import-file", InitConfigDefault.ImportFile, "path for json data to import") f.Uint(prefix+".accounts-per-sync", InitConfigDefault.AccountsPerSync, "during init - sync database every X accounts. Lower value for low-memory systems. 0 disables.") diff --git a/util/signature/simple_hmac.go b/util/signature/simple_hmac.go index b1c683742b..4899b5c22c 100644 --- a/util/signature/simple_hmac.go +++ b/util/signature/simple_hmac.go @@ -58,7 +58,7 @@ func SimpleHmacDangerousConfigAddOptions(prefix string, f *flag.FlagSet) { func SimpleHmacConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".signing-key", EmptySimpleHmacConfig.SigningKey, "a 32-byte (64-character) hex string used to sign messages, or a path to a file containing it") - f.String(prefix+".fallback-verification-key", EmptySimpleHmacConfig.SigningKey, "a fallback key used for message verification") + f.String(prefix+".fallback-verification-key", EmptySimpleHmacConfig.FallbackVerificationKey, "a fallback key used for message verification") SimpleHmacDangerousConfigAddOptions(prefix+".dangerous", f) } From f95dd315b720dc149f46821691e482fabb97f938 Mon Sep 17 00:00:00 2001 From: Nodar Date: Mon, 21 Aug 2023 18:00:21 +0200 Subject: [PATCH 031/117] Implement linter detection of non-matching flag and filed name --- linter/koanf/koanf.go | 100 +++++++++++++++++++++++++++++++++++++ linter/koanf/koanf_test.go | 4 +- linter/testdata/src/a/a.go | 27 ++++++++++ 3 files changed, 129 insertions(+), 2 deletions(-) diff --git a/linter/koanf/koanf.go b/linter/koanf/koanf.go index bc94a9c20e..8dbb392cb4 100644 --- a/linter/koanf/koanf.go +++ b/linter/koanf/koanf.go @@ -51,6 +51,8 @@ func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { switch v := node.(type) { case *ast.StructType: res = checkStruct(pass, v) + case *ast.FuncDecl: + res = checkFlagDefs(pass, v) default: } for _, err := range res.Errors { @@ -70,6 +72,104 @@ func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { return ret, nil } +func containsFlagSet(params []*ast.Field) bool { + for _, p := range params { + se, ok := p.Type.(*ast.StarExpr) + if !ok { + continue + } + sle, ok := se.X.(*ast.SelectorExpr) + if !ok { + continue + } + if sle.Sel.Name == "FlagSet" { + return true + } + } + return false +} + +// checkFlagDefs checks flag definitions in the function. +// Result contains list of errors where flag name doesn't match field name. +func checkFlagDefs(pass *analysis.Pass, f *ast.FuncDecl) Result { + // Ignore functions that does not get flagset as parameter. + if !containsFlagSet(f.Type.Params.List) { + return Result{} + } + var res Result + for _, s := range f.Body.List { + es, ok := s.(*ast.ExprStmt) + if !ok { + continue + } + callE, ok := es.X.(*ast.CallExpr) + if !ok { + continue + } + if len(callE.Args) != 3 { + continue + } + sl, ok := extractStrLit(callE.Args[0]) + if !ok { + continue + } + s, ok := selector(callE.Args[1]) + if !ok { + continue + } + if normSL := normalize(sl); !strings.EqualFold(normSL, s) { + res.Errors = append(res.Errors, koanfError{ + Pos: pass.Fset.Position(f.Pos()), + Message: fmt.Sprintf("koanf tag name: %q doesn't match the field: %q", sl, s), + }) + } + + } + return res +} + +func selector(e ast.Expr) (string, bool) { + n, ok := e.(ast.Node) + if !ok { + return "", false + } + se, ok := n.(*ast.SelectorExpr) + if !ok { + return "", false + } + return se.Sel.Name, true +} + +// Extracts literal from expression that is either: +// - string literal or +// - sum of variable and string literal. +// E.g. +// strLitFromSum(`"max-size"`) = "max-size" +// - strLitFromSum(`prefix + ".enable"“) = ".enable". +func extractStrLit(e ast.Expr) (string, bool) { + if s, ok := strLit(e); ok { + return s, true + } + if be, ok := e.(*ast.BinaryExpr); ok { + if be.Op == token.ADD { + if s, ok := strLit(be.Y); ok { + // Drop the prefix dot. + return s[1:], true + } + } + } + return "", false +} + +func strLit(e ast.Expr) (string, bool) { + if s, ok := e.(*ast.BasicLit); ok { + if s.Kind == token.STRING { + return strings.Trim(s.Value, "\""), true + } + } + return "", false +} + func checkStruct(pass *analysis.Pass, s *ast.StructType) Result { var res Result for _, f := range s.Fields.List { diff --git a/linter/koanf/koanf_test.go b/linter/koanf/koanf_test.go index 2e3e68b0f4..e3ad5e6043 100644 --- a/linter/koanf/koanf_test.go +++ b/linter/koanf/koanf_test.go @@ -15,8 +15,8 @@ func TestAll(t *testing.T) { } testdata := filepath.Join(filepath.Dir(wd), "testdata") res := analysistest.Run(t, testdata, analyzerForTests, "a") - if cnt := countErrors(res); cnt != 1 { - t.Errorf("analysistest.Run() got %v errors, expected 1", cnt) + if cnt := countErrors(res); cnt != 3 { + t.Errorf("analysistest.Run() got %v errors, expected 3", cnt) } } diff --git a/linter/testdata/src/a/a.go b/linter/testdata/src/a/a.go index ddf77b6ed1..86b7739108 100644 --- a/linter/testdata/src/a/a.go +++ b/linter/testdata/src/a/a.go @@ -1,6 +1,11 @@ package a +import ( + "flag" +) + type Config struct { + // Field name doesn't match koanf tag. L2 int `koanf:"chain"` LogLevel int `koanf:"log-level"` LogType int `koanf:"log-type"` @@ -9,3 +14,25 @@ type Config struct { Node int `koanf:"node"` Queue int `koanf:"queue"` } + +type BatchPosterConfig struct { + Enable bool `koanf:"enable"` + MaxSize int `koanf:"max-size" reload:"hot"` +} + +// Flag names don't match field names from default config. +// Contains 2 errors. +func BatchPosterConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enabled", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") + f.Int("max-sz", DefaultBatchPosterConfig.MaxSize, "maximum batch size") +} + +func ConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") + f.Int("max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") +} + +var DefaultBatchPosterConfig = BatchPosterConfig{ + Enable: false, + MaxSize: 100000, +} From f8d72581bd06fd1221c75a70f92c4be8bb70dc03 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 21 Aug 2023 12:15:57 -0500 Subject: [PATCH 032/117] typo fix --- cmd/seq-coordinator-manager/seq-coordinator-manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go index f39a810e9b..b8a4a47ac7 100644 --- a/cmd/seq-coordinator-manager/seq-coordinator-manager.go +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -41,7 +41,7 @@ func main() { args := os.Args[1:] if len(args) != 1 { - fmt.Fprintf(os.Stderr, "Usage: redis-seq-manager [redis-url]\n") + fmt.Fprintf(os.Stderr, "Usage: seq-coordinator-manager [redis-url]\n") os.Exit(1) } redisURL := args[0] @@ -70,7 +70,7 @@ func main() { target := index priorityForm.Clear(true) - priorityForm.AddTextView("Additional details:", "Status:\nBlockNumber:\nStatus:\nBlockNumber:", 0, 2, false, true) + priorityForm.AddTextView("Additional details:", "Status:\nBlockNumber:", 0, 2, false, true) priorityForm.AddDropDown("Change priority to ->", priorities, index, func(priority string, selection int) { target = selection }) From 309bbb42d3d5733637eb993c67187b090787d6e2 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 21 Aug 2023 13:16:35 -0500 Subject: [PATCH 033/117] remove additional details and update list numbering to accomodate longer lists --- .../seq-coordinator-manager.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go index b8a4a47ac7..eb28f6023c 100644 --- a/cmd/seq-coordinator-manager/seq-coordinator-manager.go +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -70,7 +70,6 @@ func main() { target := index priorityForm.Clear(true) - priorityForm.AddTextView("Additional details:", "Status:\nBlockNumber:", 0, 2, false, true) priorityForm.AddDropDown("Change priority to ->", priorities, index, func(priority string, selection int) { target = selection }) @@ -99,7 +98,7 @@ func main() { pages.SwitchToPage("Menu") app.SetFocus(prioritySeqList) }) - priorityForm.SetFocus(1) + priorityForm.SetFocus(0) app.SetFocus(priorityForm) }) @@ -114,7 +113,6 @@ func main() { target := index nonPriorityForm.Clear(true) - nonPriorityForm.AddTextView("Additional details:", "Status:\nBlockNumber:", 0, 2, false, true) nonPriorityForm.AddDropDown("Set priority to ->", priorities, index, func(priority string, selection int) { target = selection }) @@ -140,7 +138,7 @@ func main() { pages.SwitchToPage("Menu") app.SetFocus(nonPrioritySeqList) }) - nonPriorityForm.SetFocus(1) + nonPriorityForm.SetFocus(0) app.SetFocus(nonPriorityForm) }) @@ -236,17 +234,17 @@ func (sm *manager) populateLists(ctx context.Context) { if seqURL == chosen { sec = fmt.Sprintf(" %vchosen", emoji.LeftArrow) } - status := fmt.Sprintf("%v ", emoji.RedCircle) + status := fmt.Sprintf("(%d) %v ", index, emoji.RedCircle) if _, ok := sm.livelinessMap[seqURL]; ok { - status = fmt.Sprintf("%v ", emoji.GreenCircle) + status = fmt.Sprintf("(%d) %v ", index, emoji.GreenCircle) } - prioritySeqList.AddItem(status+seqURL+sec, "", int32(48+index), nil).SetSecondaryTextColor(tcell.ColorPurple) + prioritySeqList.AddItem(status+seqURL+sec, "", rune(0), nil).SetSecondaryTextColor(tcell.ColorPurple) } nonPrioritySeqList.Clear() - status := fmt.Sprintf("%v ", emoji.GreenCircle) + status := fmt.Sprintf("(-) %v ", emoji.GreenCircle) for _, seqURL := range sm.nonPriorityList { - nonPrioritySeqList.AddItem(status+seqURL, "", rune(45), nil) + nonPrioritySeqList.AddItem(status+seqURL, "", rune(0), nil) } } From eba3d33cb3f79d424e3ed564c86523ee7bb53438 Mon Sep 17 00:00:00 2001 From: Nodar Date: Tue, 22 Aug 2023 16:39:00 +0200 Subject: [PATCH 034/117] Fix linter error where we implicitly alias memory in for loop --- arbos/arbosState/initialize.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index e98ab08485..9f24d96765 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -189,7 +189,8 @@ func initializeRetryables(statedb *state.StateDB, rs *retryables.RetryableState, for _, r := range retryablesList { var to *common.Address if r.To != (common.Address{}) { - to = &r.To + addr := r.To + to = &addr } statedb.AddBalance(retryables.RetryableEscrowAddress(r.Id), r.Callvalue) _, err := rs.CreateRetryable(r.Id, r.Timeout, r.From, to, r.Callvalue, r.Beneficiary, r.Calldata) From d0e6c7c8dbb0e73322ce1a9e0251cf9eb884dacb Mon Sep 17 00:00:00 2001 From: Nodar Date: Tue, 22 Aug 2023 17:38:54 +0200 Subject: [PATCH 035/117] Detect pointers in selectors --- linter/pointercheck/pointer.go | 9 +++++---- linter/pointercheck/pointer_test.go | 4 ++-- linter/testdata/src/pointercheck/pointercheck.go | 16 ++++++++++++++++ 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/linter/pointercheck/pointer.go b/linter/pointercheck/pointer.go index 2054921029..6500b01222 100644 --- a/linter/pointercheck/pointer.go +++ b/linter/pointercheck/pointer.go @@ -78,16 +78,17 @@ func checkExpr(pass *analysis.Pass, e *ast.BinaryExpr) *Result { ret := &Result{} if ptrIdent(pass, e.X) && ptrIdent(pass, e.Y) { ret.Errors = append(ret.Errors, pointerCmpError{ - Pos: pass.Fset.Position(e.OpPos), - Message: fmt.Sprintf("comparison of two pointers in expression %q", e), + Pos: pass.Fset.Position(e.Pos()), + Message: fmt.Sprintf("comparison of two pointers in expression %v", e), }) } return ret } func ptrIdent(pass *analysis.Pass, e ast.Expr) bool { - if _, ok := e.(*ast.Ident); ok { - et := pass.TypesInfo.Types[e].Type + switch tp := e.(type) { + case *ast.Ident, *ast.SelectorExpr: + et := pass.TypesInfo.Types[tp].Type _, isPtr := (et).(*types.Pointer) return isPtr } diff --git a/linter/pointercheck/pointer_test.go b/linter/pointercheck/pointer_test.go index 6ed74a9685..290e3826de 100644 --- a/linter/pointercheck/pointer_test.go +++ b/linter/pointercheck/pointer_test.go @@ -15,8 +15,8 @@ func TestAll(t *testing.T) { } testdata := filepath.Join(filepath.Dir(wd), "testdata") res := analysistest.Run(t, testdata, analyzerForTests, "pointercheck") - if cnt := countErrors(res); cnt != 5 { - t.Errorf("analysistest.Run() got %v errors, expected 5", cnt) + if cnt := countErrors(res); cnt != 6 { + t.Errorf("analysistest.Run() got %v errors, expected 6", cnt) } } diff --git a/linter/testdata/src/pointercheck/pointercheck.go b/linter/testdata/src/pointercheck/pointercheck.go index 55ebd18293..f63fdd1743 100644 --- a/linter/testdata/src/pointercheck/pointercheck.go +++ b/linter/testdata/src/pointercheck/pointercheck.go @@ -32,3 +32,19 @@ func pointerCmp() { fmt.Println("Not equal") } } + +func legitCmps() { + a, b := &A{}, &A{} + if a.x == b.x { + fmt.Println("Allowed") + } +} + +type cache struct { + dirty *A +} + +// matches does pointer comparison. +func (c *cache) matches(a *A) bool { + return c.dirty == a +} From 90c1d460a6031220d8a65a91d284f7157f847c4b Mon Sep 17 00:00:00 2001 From: Nodar Date: Tue, 22 Aug 2023 17:40:25 +0200 Subject: [PATCH 036/117] compare block hash instead of txHash when comparing block pointers --- arbnode/execution/executionengine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbnode/execution/executionengine.go b/arbnode/execution/executionengine.go index 71610b308c..da01e27983 100644 --- a/arbnode/execution/executionengine.go +++ b/arbnode/execution/executionengine.go @@ -599,7 +599,7 @@ func (s *ExecutionEngine) Start(ctx_in context.Context) { s.latestBlockMutex.Lock() block := s.latestBlock s.latestBlockMutex.Unlock() - if block != nil && (lastBlock == nil || block.TxHash() != lastBlock.TxHash()) { + if block != nil && (lastBlock == nil || block.Hash() != lastBlock.Hash()) { log.Info( "created block", "l2Block", block.Number(), From 182794c72cddf50b8d221c3b7192aa884ed72ee7 Mon Sep 17 00:00:00 2001 From: Nodar Date: Tue, 22 Aug 2023 18:03:20 +0200 Subject: [PATCH 037/117] Drop pointer comparison for selectors --- arbnode/execution/sequencer.go | 2 +- arbos/arbostypes/incomingmessage.go | 11 +++++++++-- util/headerreader/header_reader.go | 9 ++++++++- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/arbnode/execution/sequencer.go b/arbnode/execution/sequencer.go index ea818beb6b..7065bd87d9 100644 --- a/arbnode/execution/sequencer.go +++ b/arbnode/execution/sequencer.go @@ -184,7 +184,7 @@ func (c *nonceCache) matches(header *types.Header) bool { if c.dirty != nil { // The header is updated as the block is built, // so instead of checking its hash, we do a pointer comparison. - return c.dirty == header + return headerreader.HeadersEqual(c.dirty, header) } return c.block == header.ParentHash } diff --git a/arbos/arbostypes/incomingmessage.go b/arbos/arbostypes/incomingmessage.go index e9a5466d46..60f53cdaab 100644 --- a/arbos/arbostypes/incomingmessage.go +++ b/arbos/arbostypes/incomingmessage.go @@ -127,14 +127,21 @@ func (msg *L1IncomingMessage) Equals(other *L1IncomingMessage) bool { return msg.Header.Equals(other.Header) && bytes.Equal(msg.L2msg, other.L2msg) } +func hashesEqual(ha, hb *common.Hash) bool { + if (ha == nil) != (hb == nil) { + return false + } + return (ha == nil) && (hb == nil) || *ha == *hb +} + func (h *L1IncomingMessageHeader) Equals(other *L1IncomingMessageHeader) bool { // These are all non-pointer types so it's safe to use the == operator return h.Kind == other.Kind && h.Poster == other.Poster && h.BlockNumber == other.BlockNumber && h.Timestamp == other.Timestamp && - h.RequestId == other.RequestId && - h.L1BaseFee == other.L1BaseFee + hashesEqual(h.RequestId, other.RequestId) && + arbmath.BigEquals(h.L1BaseFee, other.L1BaseFee) } func ComputeBatchGasCost(data []byte) uint64 { diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index 28fef8ee07..87cc6d69b0 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -393,6 +393,13 @@ func headerIndicatesFinalitySupport(header *types.Header) bool { return false } +func HeadersEqual(ha, hb *types.Header) bool { + if (ha == nil) != (hb == nil) { + return false + } + return (ha == nil) && (hb == nil) || ha.Hash() == hb.Hash() +} + func (s *HeaderReader) getCached(ctx context.Context, c *cachedHeader) (*types.Header, error) { c.mutex.Lock() defer c.mutex.Unlock() @@ -400,7 +407,7 @@ func (s *HeaderReader) getCached(ctx context.Context, c *cachedHeader) (*types.H if err != nil { return nil, err } - if currentHead == c.headWhenCached { + if HeadersEqual(currentHead, c.headWhenCached) { return c.header, nil } if !s.config().UseFinalityData || !headerIndicatesFinalitySupport(currentHead) { From 93cb20b2f51698164e4b5a9f217a1b0868b2a423 Mon Sep 17 00:00:00 2001 From: Nodar Date: Tue, 22 Aug 2023 18:07:57 +0200 Subject: [PATCH 038/117] Use headerreader.HeadersEqual in execution to compare headers --- arbnode/execution/tx_pre_checker.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arbnode/execution/tx_pre_checker.go b/arbnode/execution/tx_pre_checker.go index 4a0645e97b..5dae32a192 100644 --- a/arbnode/execution/tx_pre_checker.go +++ b/arbnode/execution/tx_pre_checker.go @@ -18,6 +18,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/headerreader" flag "github.com/spf13/pflag" ) @@ -170,7 +171,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty oldHeader = previousHeader blocksTraversed++ } - if oldHeader == nil || (header != nil && oldHeader.Hash() != header.Hash()) { + if headerreader.HeadersEqual(oldHeader, header) { secondOldStatedb, err := bc.StateAt(oldHeader.Root) if err != nil { return fmt.Errorf("failed to get old state: %w", err) From 12811e290e002cea2a1aafa30a14d96c7c4ae8a1 Mon Sep 17 00:00:00 2001 From: Nodar Date: Tue, 22 Aug 2023 18:55:31 +0200 Subject: [PATCH 039/117] Add system_tests\/testdata and arbos\/testdata in .gitignore so that fuzzing output will be ignored --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 60df842f0e..f9b920e4fe 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ target/ yarn-error.log local/ system_tests/test-data/* +system_tests/testdata/* +arbos/testdata/* \ No newline at end of file From 9099b51689b53eb5249bef134e77470e74bb79ea Mon Sep 17 00:00:00 2001 From: Nodar Date: Tue, 22 Aug 2023 18:56:35 +0200 Subject: [PATCH 040/117] Add blank line to .gitignore, otherwise github complains --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f9b920e4fe..8a628e29c4 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,4 @@ yarn-error.log local/ system_tests/test-data/* system_tests/testdata/* -arbos/testdata/* \ No newline at end of file +arbos/testdata/* From 6555a39b15c1b296a9455abf2b7ab9e4b4569a54 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 22 Aug 2023 13:34:36 -0600 Subject: [PATCH 041/117] Bump go-ethereum pin --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index d312afd03b..88b1bd3b54 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit d312afd03bba77aa2b4ea36e80b7308cd6528e80 +Subproject commit 88b1bd3b5465725c6ecaec8b4d124c175d75268d From e1b438cf1cf0c42f0fe78f93bea9cf9cbb8a6bd1 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Tue, 22 Aug 2023 13:18:58 -0700 Subject: [PATCH 042/117] Update go-ethereum submod to latest merge-v1.12.0 --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 0685207850..c905292f8a 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 068520785011e63b5760c2195676674647269337 +Subproject commit c905292f8af601f7fca261e65a7d4bc144261e29 From cf355d96b2dd5a60bc6305d7d13e027855396910 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 22 Aug 2023 15:31:33 -0600 Subject: [PATCH 043/117] Fix running a watchtower staker without a wallet --- arbnode/node.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index a4025429c6..42435838ca 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -543,8 +543,13 @@ func checkArbDbSchemaVersion(arbDb ethdb.Database) error { return nil } -func ValidatorDataposter(db ethdb.Database, l1Reader *headerreader.HeaderReader, - transactOpts *bind.TransactOpts, cfgFetcher ConfigFetcher, syncMonitor *SyncMonitor) (*dataposter.DataPoster, error) { +func ValidatorDataposter( + db ethdb.Database, l1Reader *headerreader.HeaderReader, + transactOpts *bind.TransactOpts, cfgFetcher ConfigFetcher, syncMonitor *SyncMonitor, +) (*dataposter.DataPoster, error) { + if transactOpts == nil { + return nil, nil + } cfg := cfgFetcher.Get() mdRetriever := func(ctx context.Context, blockNum *big.Int) ([]byte, error) { return nil, nil From 0b72ffe08125012051720dbdf61ef6f3274810a5 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 22 Aug 2023 16:52:32 -0600 Subject: [PATCH 044/117] Add sepolia-rollup testnet chain information --- cmd/chaininfo/arbitrum_chain_info.json | 51 +++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json index 5352f9760f..224cd469c0 100644 --- a/cmd/chaininfo/arbitrum_chain_info.json +++ b/cmd/chaininfo/arbitrum_chain_info.json @@ -214,5 +214,54 @@ "GenesisBlockNum": 0 } } + }, + { + "chain-id": 421614, + "parent-chain-id": 11155111, + "chain-name": "sepolia-rollup", + "sequencer-url": "https://sepolia-rollup-sequencer.arbitrum.io/rpc", + "feed-url": "https://sepolia-rollup.arbitrum.io/feed", + "chain-config": + { + "chainId": 421614, + "homesteadBlock": 0, + "daoForkBlock": null, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "clique": + { + "period": 0, + "epoch": 0 + }, + "arbitrum": + { + "EnableArbOS": true, + "AllowDebugPrecompiles": false, + "DataAvailabilityCommittee": false, + "InitialArbOSVersion": 10, + "InitialChainOwner": "0x71B61c2E250AFa05dFc36304D6c91501bE0965D8", + "GenesisBlockNum": 0 + } + }, + "rollup": + { + "bridge": "0x38f918D0E9F1b721EDaA41302E399fa1B79333a9", + "inbox": "0xaAe29B0366299461418F5324a79Afc425BE5ae21", + "sequencer-inbox": "0x6c97864CE4bEf387dE0b3310A44230f7E3F1be0D", + "rollup": "0xd80810638dbDF9081b72C1B33c65375e807281C8", + "validator-utils": "0x1f6860C3cac255fFFa72B7410b1183c3a0D261e0", + "validator-wallet-creator": "0x894fC71fA0A666352824EC954B401573C861D664", + "deployed-at": 4139226 + } } -] \ No newline at end of file +] From b9ad5a1baa5c9118cfeb099a3c64d085e4445fa6 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 22 Aug 2023 21:26:05 -0600 Subject: [PATCH 045/117] Fix sepolia-rollup feed connection scheme --- cmd/chaininfo/arbitrum_chain_info.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json index 224cd469c0..f5fa56102c 100644 --- a/cmd/chaininfo/arbitrum_chain_info.json +++ b/cmd/chaininfo/arbitrum_chain_info.json @@ -220,7 +220,7 @@ "parent-chain-id": 11155111, "chain-name": "sepolia-rollup", "sequencer-url": "https://sepolia-rollup-sequencer.arbitrum.io/rpc", - "feed-url": "https://sepolia-rollup.arbitrum.io/feed", + "feed-url": "wss://sepolia-rollup.arbitrum.io/feed", "chain-config": { "chainId": 421614, From d464cc7215349573b3a0ab0ec75cb81064e970ee Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 23 Aug 2023 14:25:52 +0200 Subject: [PATCH 046/117] Add parenthesis in condition to make it more explicit, do inequality of headers instead of equality in execution package --- arbnode/execution/tx_pre_checker.go | 2 +- arbos/arbostypes/incomingmessage.go | 2 +- util/headerreader/header_reader.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arbnode/execution/tx_pre_checker.go b/arbnode/execution/tx_pre_checker.go index 5dae32a192..ee494fba3d 100644 --- a/arbnode/execution/tx_pre_checker.go +++ b/arbnode/execution/tx_pre_checker.go @@ -171,7 +171,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty oldHeader = previousHeader blocksTraversed++ } - if headerreader.HeadersEqual(oldHeader, header) { + if !headerreader.HeadersEqual(oldHeader, header) { secondOldStatedb, err := bc.StateAt(oldHeader.Root) if err != nil { return fmt.Errorf("failed to get old state: %w", err) diff --git a/arbos/arbostypes/incomingmessage.go b/arbos/arbostypes/incomingmessage.go index 60f53cdaab..04ce8ebe2e 100644 --- a/arbos/arbostypes/incomingmessage.go +++ b/arbos/arbostypes/incomingmessage.go @@ -131,7 +131,7 @@ func hashesEqual(ha, hb *common.Hash) bool { if (ha == nil) != (hb == nil) { return false } - return (ha == nil) && (hb == nil) || *ha == *hb + return (ha == nil && hb == nil) || *ha == *hb } func (h *L1IncomingMessageHeader) Equals(other *L1IncomingMessageHeader) bool { diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index 87cc6d69b0..739a96b438 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -397,7 +397,7 @@ func HeadersEqual(ha, hb *types.Header) bool { if (ha == nil) != (hb == nil) { return false } - return (ha == nil) && (hb == nil) || ha.Hash() == hb.Hash() + return (ha == nil && hb == nil) || ha.Hash() == hb.Hash() } func (s *HeaderReader) getCached(ctx context.Context, c *cachedHeader) (*types.Header, error) { From c217d9f705181fbfb35a3fe3ed9ee3e5e8d782f4 Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 23 Aug 2023 14:31:00 +0200 Subject: [PATCH 047/117] update method comment in sequencer.go --- arbnode/execution/sequencer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/execution/sequencer.go b/arbnode/execution/sequencer.go index 5bb097ace6..402958399d 100644 --- a/arbnode/execution/sequencer.go +++ b/arbnode/execution/sequencer.go @@ -182,8 +182,8 @@ func newNonceCache(size int) *nonceCache { func (c *nonceCache) matches(header *types.Header) bool { if c.dirty != nil { - // The header is updated as the block is built, - // so instead of checking its hash, we do a pointer comparison. + // Note, even though the of the header changes, c.dirty points to the + // same header, hence hashes will be the same and this check will pass. return headerreader.HeadersEqual(c.dirty, header) } return c.block == header.ParentHash From 85faf666a4572d7a698056504efc31eda5aa0b72 Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 23 Aug 2023 14:33:15 +0200 Subject: [PATCH 048/117] Drop nil check of newTx in dataposter --- arbnode/dataposter/data_poster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index dd8366208f..3d936485ec 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -341,7 +341,7 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr } func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransaction, newTx *storage.QueuedTransaction) error { - if prevTx == nil || (newTx != nil && newTx.FullTx.Hash() != prevTx.FullTx.Hash()) { + if prevTx == nil || (newTx.FullTx.Hash() != prevTx.FullTx.Hash()) { if err := p.saveTx(ctx, prevTx, newTx); err != nil { return err } From 121cc8bfee933c856230b9f11d4d9fdb40a4cb0f Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 23 Aug 2023 15:04:26 +0200 Subject: [PATCH 049/117] Merge with mater --- go.mod | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 2cae3a9cf2..10991bc3a2 100644 --- a/go.mod +++ b/go.mod @@ -33,6 +33,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/wealdtech/go-merkletree v1.0.0 golang.org/x/term v0.6.0 + golang.org/x/tools v0.7.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -258,7 +259,6 @@ require ( go4.org v0.0.0-20200411211856-f5505b9728dd // indirect golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect golang.org/x/mod v0.9.0 // indirect - golang.org/x/tools v0.7.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect google.golang.org/grpc v1.46.0 // indirect @@ -312,7 +312,7 @@ require ( golang.org/x/crypto v0.6.0 golang.org/x/net v0.8.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.6.0 + golang.org/x/sys v0.7.0 golang.org/x/text v0.8.0 // indirect golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect From 7b45b0352a48c14d3ebefa84a32f5086a395a2ee Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 23 Aug 2023 10:52:14 -0600 Subject: [PATCH 050/117] Move ExtraGas to L1ValidatorConfig and use it for eoa wallets --- arbnode/node.go | 7 ++++--- staker/block_validator.go | 4 ---- staker/eoa_validator_wallet.go | 7 +++++-- staker/staker.go | 3 +++ staker/validator_wallet.go | 8 ++++---- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 42435838ca..abc4367a39 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -299,7 +299,7 @@ type Config struct { BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` RecordingDatabase arbitrum.RecordingDatabaseConfig `koanf:"recording-database"` Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` - Staker staker.L1ValidatorConfig `koanf:"staker"` + Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` @@ -817,6 +817,7 @@ func createNodeImpl( if err != nil { return nil, err } + getExtraGas := func() uint64 { return configFetcher.Get().Staker.ExtraGas } var wallet staker.ValidatorWalletInterface if config.Staker.UseSmartContractWallet || txOptsValidator == nil { var existingWalletAddress *common.Address @@ -828,7 +829,7 @@ func createNodeImpl( tmpAddress := common.HexToAddress(config.Staker.ContractWalletAddress) existingWalletAddress = &tmpAddress } - wallet, err = staker.NewContractValidatorWallet(dp, existingWalletAddress, deployInfo.ValidatorWalletCreator, deployInfo.Rollup, l1Reader, txOptsValidator, int64(deployInfo.DeployedAt), func(common.Address) {}, config.BlockValidator.ExtraGas) + wallet, err = staker.NewContractValidatorWallet(dp, existingWalletAddress, deployInfo.ValidatorWalletCreator, deployInfo.Rollup, l1Reader, txOptsValidator, int64(deployInfo.DeployedAt), func(common.Address) {}, getExtraGas) if err != nil { return nil, err } @@ -836,7 +837,7 @@ func createNodeImpl( if len(config.Staker.ContractWalletAddress) > 0 { return nil, errors.New("validator contract wallet specified but flag to use a smart contract wallet was not specified") } - wallet, err = staker.NewEoaValidatorWallet(dp, deployInfo.Rollup, l1client, txOptsValidator) + wallet, err = staker.NewEoaValidatorWallet(dp, deployInfo.Rollup, l1client, txOptsValidator, getExtraGas) if err != nil { return nil, err } diff --git a/staker/block_validator.go b/staker/block_validator.go index 333a096813..5da4365143 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -91,7 +91,6 @@ type BlockValidatorConfig struct { DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` RedisUrl string `koanf:"redis-url"` RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` - ExtraGas uint64 `koanf:"extra-gas" reload:"hot"` } func (c *BlockValidatorConfig) Validate() error { @@ -113,7 +112,6 @@ func BlockValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".current-module-root", DefaultBlockValidatorConfig.CurrentModuleRoot, "current wasm module root ('current' read from chain, 'latest' from machines/latest dir, or provide hash)") f.String(prefix+".pending-upgrade-module-root", DefaultBlockValidatorConfig.PendingUpgradeModuleRoot, "pending upgrade wasm module root to additionally validate (hash, 'latest' or empty)") f.Bool(prefix+".failure-is-fatal", DefaultBlockValidatorConfig.FailureIsFatal, "failing a validation is treated as a fatal error") - f.Uint64(prefix+".extra-gas", DefaultBlockValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") BlockValidatorDangerousConfigAddOptions(prefix+".dangerous", f) dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) f.String(prefix+".redis-url", DefaultBlockValidatorConfig.RedisUrl, "redis url for block validator") @@ -137,7 +135,6 @@ var DefaultBlockValidatorConfig = BlockValidatorConfig{ DataPoster: dataposter.DefaultDataPosterConfig, RedisUrl: "", RedisLock: redislock.DefaultCfg, - ExtraGas: 50000, } var TestBlockValidatorConfig = BlockValidatorConfig{ @@ -153,7 +150,6 @@ var TestBlockValidatorConfig = BlockValidatorConfig{ DataPoster: dataposter.TestDataPosterConfig, RedisUrl: "", RedisLock: redislock.DefaultCfg, - ExtraGas: 50000, } var DefaultBlockValidatorDangerousConfig = BlockValidatorDangerousConfig{ diff --git a/staker/eoa_validator_wallet.go b/staker/eoa_validator_wallet.go index f514969434..e3d149c99f 100644 --- a/staker/eoa_validator_wallet.go +++ b/staker/eoa_validator_wallet.go @@ -29,17 +29,19 @@ type EoaValidatorWallet struct { challengeManagerAddress common.Address dataPoster *dataposter.DataPoster txCount atomic.Uint64 + getExtraGas func() uint64 } var _ ValidatorWalletInterface = (*EoaValidatorWallet)(nil) -func NewEoaValidatorWallet(dataPoster *dataposter.DataPoster, rollupAddress common.Address, l1Client arbutil.L1Interface, auth *bind.TransactOpts) (*EoaValidatorWallet, error) { +func NewEoaValidatorWallet(dataPoster *dataposter.DataPoster, rollupAddress common.Address, l1Client arbutil.L1Interface, auth *bind.TransactOpts, getExtraGas func() uint64) (*EoaValidatorWallet, error) { return &EoaValidatorWallet{ auth: auth, client: l1Client, rollupAddress: rollupAddress, dataPoster: dataPoster, txCount: atomic.Uint64{}, + getExtraGas: getExtraGas, }, nil } @@ -126,7 +128,8 @@ func (w *EoaValidatorWallet) ExecuteTransactions(ctx context.Context, builder *V log.Warn("Precondition failure, dataposter nonce is higher than validator transactio count", "dataposter nonce", nonce, "validator tx count", w.txCount.Load()) } tx := builder.transactions[0] // we ignore future txs and only execute the first - trans, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *tx.To(), tx.Data(), tx.Gas(), tx.Value()) + gas := tx.Gas() + w.getExtraGas() + trans, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *tx.To(), tx.Data(), gas, tx.Value()) if err != nil { return nil, fmt.Errorf("post transaction: %w", err) } diff --git a/staker/staker.go b/staker/staker.go index a35f5088c1..eb39bcb134 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -81,6 +81,7 @@ type L1ValidatorConfig struct { StartValidationFromStaked bool `koanf:"start-validation-from-staked"` ContractWalletAddress string `koanf:"contract-wallet-address"` GasRefunderAddress string `koanf:"gas-refunder-address"` + ExtraGas uint64 `koanf:"extra-gas" reload:"hot"` Dangerous DangerousConfig `koanf:"dangerous"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` @@ -144,6 +145,7 @@ var DefaultL1ValidatorConfig = L1ValidatorConfig{ StartValidationFromStaked: true, ContractWalletAddress: "", GasRefunderAddress: "", + ExtraGas: 50000, Dangerous: DefaultDangerousConfig, ParentChainWallet: DefaultValidatorL1WalletConfig, } @@ -169,6 +171,7 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".start-validation-from-staked", DefaultL1ValidatorConfig.StartValidationFromStaked, "assume staked nodes are valid") f.String(prefix+".contract-wallet-address", DefaultL1ValidatorConfig.ContractWalletAddress, "validator smart contract wallet public address") f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") + f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") DangerousConfigAddOptions(prefix+".dangerous", f) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) } diff --git a/staker/validator_wallet.go b/staker/validator_wallet.go index 6c940d8550..3e9c3b5425 100644 --- a/staker/validator_wallet.go +++ b/staker/validator_wallet.go @@ -76,13 +76,13 @@ type ContractValidatorWallet struct { rollupAddress common.Address challengeManagerAddress common.Address dataPoster *dataposter.DataPoster - extraGas uint64 + getExtraGas func() uint64 } var _ ValidatorWalletInterface = (*ContractValidatorWallet)(nil) func NewContractValidatorWallet(dp *dataposter.DataPoster, address *common.Address, walletFactoryAddr, rollupAddress common.Address, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, rollupFromBlock int64, onWalletCreated func(common.Address), - extraGas uint64) (*ContractValidatorWallet, error) { + getExtraGas func() uint64) (*ContractValidatorWallet, error) { var con *rollupgen.ValidatorWallet if address != nil { var err error @@ -105,7 +105,7 @@ func NewContractValidatorWallet(dp *dataposter.DataPoster, address *common.Addre rollup: rollup, rollupFromBlock: rollupFromBlock, dataPoster: dp, - extraGas: extraGas, + getExtraGas: getExtraGas, } // Go complains if we make an address variable before wallet and copy it in wallet.address.Store(address) @@ -344,7 +344,7 @@ func (v *ContractValidatorWallet) estimateGas(ctx context.Context, value *big.In if err != nil { return 0, fmt.Errorf("estimating gas: %w", err) } - return g + v.extraGas, nil + return g + v.getExtraGas(), nil } func (v *ContractValidatorWallet) TimeoutChallenges(ctx context.Context, challenges []uint64) (*types.Transaction, error) { From 4c81dcc7d41f4b8527f0d97f0233ae1417f6046a Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 23 Aug 2023 12:28:00 -0600 Subject: [PATCH 051/117] Fix wallet initialization in system tests --- system_tests/staker_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 468463d58f..b47e3bb5d8 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -134,7 +134,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } - valWalletA, err := staker.NewContractValidatorWallet(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, 10000) + valWalletA, err := staker.NewContractValidatorWallet(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return 10000 }) Require(t, err) if honestStakerInactive { valConfig.Strategy = "Defensive" @@ -182,7 +182,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } - valWalletB, err := staker.NewEoaValidatorWallet(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), &l1authB) + valWalletB, err := staker.NewEoaValidatorWallet(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), &l1authB, func() uint64 { return 0 }) Require(t, err) valConfig.Strategy = "MakeNodes" statelessB, err := staker.NewStatelessBlockValidator( @@ -221,7 +221,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } - valWalletC, err := staker.NewContractValidatorWallet(dpC, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, nil, 0, func(common.Address) {}, 10000) + valWalletC, err := staker.NewContractValidatorWallet(dpC, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, nil, 0, func(common.Address) {}, func() uint64 { return 10000 }) Require(t, err) valConfig.Strategy = "Watchtower" stakerC, err := staker.NewStaker( From ccfb0065a03ee026743f7cfa81cc72e4f26725ab Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 23 Aug 2023 17:13:28 -0500 Subject: [PATCH 052/117] Initialize dev wallet as a chain owner in nitro-testnode --- nitro-testnode | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nitro-testnode b/nitro-testnode index 14f24a1bad..7ad12c0f1b 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 14f24a1bad2625412602d06156156c380bd589d2 +Subproject commit 7ad12c0f1be75a72c7360d5258e0090f8225594e From ffe45b8690bc6be3b17b631b898139de5680f7da Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 23 Aug 2023 16:50:14 -0600 Subject: [PATCH 053/117] Check that the nonce is correct in the data poster --- arbnode/dataposter/data_poster.go | 52 ++++++++++++++++++++++--------- staker/eoa_validator_wallet.go | 40 ++++-------------------- staker/staker.go | 26 ++++++++++++++++ staker/validator_wallet.go | 6 ++++ 4 files changed, 75 insertions(+), 49 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 9451fbbafe..89b36e3e51 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -157,46 +157,59 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) err return nil } -// GetNextNonceAndMeta retrieves generates next nonce, validates that a -// transaction can be posted with that nonce, and fetches "Meta" either last -// queued iterm (if queue isn't empty) or retrieves with last block. -func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, error) { +// Requires the caller hold the mutex. +// Returns the next nonce, its metadata if stored, a bool indicating if the metadata is present, and an error. +// Unlike GetNextNonceAndMeta, this does not call the metadataRetriever if the metadata is not stored in the queue. +func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []byte, bool, error) { config := p.config() - p.mutex.Lock() - defer p.mutex.Unlock() // Ensure latest finalized block state is available. blockNum, err := p.client.BlockNumber(ctx) if err != nil { - return 0, nil, err + return 0, nil, false, err } lastQueueItem, err := p.queue.FetchLast(ctx) if err != nil { - return 0, nil, err + return 0, nil, false, err } if lastQueueItem != nil { nextNonce := lastQueueItem.Data.Nonce + 1 if err := p.canPostWithNonce(ctx, nextNonce); err != nil { - return 0, nil, err + return 0, nil, false, err } - return nextNonce, lastQueueItem.Meta, nil + return nextNonce, lastQueueItem.Meta, true, nil } if err := p.updateNonce(ctx); err != nil { if !p.queue.IsPersistent() && config.WaitForL1Finality { - return 0, nil, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) + return 0, nil, false, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) } // Fall back to using a recent block to get the nonce. This is safe because there's nothing in the queue. nonceQueryBlock := arbmath.UintToBig(arbmath.SaturatingUSub(blockNum, 1)) log.Warn("failed to update nonce with queue empty; falling back to using a recent block", "recentBlock", nonceQueryBlock, "err", err) nonce, err := p.client.NonceAt(ctx, p.sender, nonceQueryBlock) if err != nil { - return 0, nil, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) + return 0, nil, false, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) } p.lastBlock = nonceQueryBlock p.nonce = nonce } - meta, err := p.metadataRetriever(ctx, p.lastBlock) - return p.nonce, meta, err + return p.nonce, nil, false, nil +} + +// GetNextNonceAndMeta retrieves generates next nonce, validates that a +// transaction can be posted with that nonce, and fetches "Meta" either last +// queued iterm (if queue isn't empty) or retrieves with last block. +func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, error) { + p.mutex.Lock() + defer p.mutex.Unlock() + nonce, meta, hasMeta, err := p.getNextNonceAndMaybeMeta(ctx) + if err != nil { + return 0, nil, err + } + if !hasMeta { + meta, err = p.metadataRetriever(ctx, p.lastBlock) + } + return nonce, meta, err } const minRbfIncrease = arbmath.OneInBips * 11 / 10 @@ -299,7 +312,16 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { p.mutex.Lock() defer p.mutex.Unlock() - err := p.updateBalance(ctx) + + expectedNonce, _, _, err := p.getNextNonceAndMaybeMeta(ctx) + if err != nil { + return nil, err + } + if nonce != expectedNonce { + return nil, fmt.Errorf("data poster expected next transaction to have nonce %v but was requested to post transaction with nonce %v", expectedNonce, nonce) + } + + err = p.updateBalance(ctx) if err != nil { return nil, fmt.Errorf("failed to update data poster balance: %w", err) } diff --git a/staker/eoa_validator_wallet.go b/staker/eoa_validator_wallet.go index e3d149c99f..a91510cbbe 100644 --- a/staker/eoa_validator_wallet.go +++ b/staker/eoa_validator_wallet.go @@ -12,7 +12,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/challengegen" @@ -88,44 +87,13 @@ func (w *EoaValidatorWallet) TestTransactions(context.Context, []*types.Transact return nil } -// Polls until the nonce from dataposter catches up with transactions posted -// by validator wallet. -func (w *EoaValidatorWallet) pollForNonce(ctx context.Context) (uint64, error) { - var nonce uint64 - flag := true - for flag { - var err error - select { - // TODO: consider adding config for eoa validator wallet and pull this - // polling time from there. - case <-time.After(100 * time.Millisecond): - nonce, _, err = w.dataPoster.GetNextNonceAndMeta(ctx) - if err != nil { - return 0, fmt.Errorf("get next nonce and meta: %w", err) - } - if nonce >= w.txCount.Load() { - flag = false - break - } - log.Warn("Dataposter nonce too low", "nonce", nonce, "validator tx count", w.txCount.Load()) - case <-ctx.Done(): - return 0, ctx.Err() - } - } - return nonce, nil -} - func (w *EoaValidatorWallet) ExecuteTransactions(ctx context.Context, builder *ValidatorTxBuilder, _ common.Address) (*types.Transaction, error) { if len(builder.transactions) == 0 { return nil, nil } - nonce, err := w.pollForNonce(ctx) + nonce, err := w.L1Client().NonceAt(ctx, w.auth.From, nil) if err != nil { - return nil, fmt.Errorf("polling for dataposter nonce to catch up: %w", err) - } - if nonce > w.txCount.Load() { - // If this happens, it probably means the dataposter is used by another client, besides validator. - log.Warn("Precondition failure, dataposter nonce is higher than validator transactio count", "dataposter nonce", nonce, "validator tx count", w.txCount.Load()) + return nil, err } tx := builder.transactions[0] // we ignore future txs and only execute the first gas := tx.Gas() + w.getExtraGas() @@ -163,3 +131,7 @@ func (b *EoaValidatorWallet) StopAndWait() { b.StopWaiter.StopAndWait() b.dataPoster.StopAndWait() } + +func (b *EoaValidatorWallet) DataPoster() *dataposter.DataPoster { + return b.dataPoster +} diff --git a/staker/staker.go b/staker/staker.go index eb39bcb134..6ee561b867 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -493,7 +493,33 @@ func (s *Staker) shouldAct(ctx context.Context) bool { return true } +func (s *Staker) confirmDataPosterIsReady(ctx context.Context) error { + dp := s.wallet.DataPoster() + if dp == nil { + return nil + } + dataPosterNonce, _, err := dp.GetNextNonceAndMeta(ctx) + if err != nil { + return err + } + latestNonce, err := s.l1Reader.Client().NonceAt(ctx, dp.Sender(), nil) + if err != nil { + return err + } + if dataPosterNonce > latestNonce { + return fmt.Errorf("data poster nonce %v is ahead of on-chain nonce %v -- probably waiting for a pending transaction to be included in a block", dataPosterNonce, latestNonce) + } + if dataPosterNonce < latestNonce { + return fmt.Errorf("data poster nonce %v is behind on-chain nonce %v -- is something else making transactions on this address?", dataPosterNonce, latestNonce) + } + return nil +} + func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { + err := s.confirmDataPosterIsReady(ctx) + if err != nil { + return nil, err + } if s.config.strategy != WatchtowerStrategy { whitelisted, err := s.IsWhitelisted(ctx) if err != nil { diff --git a/staker/validator_wallet.go b/staker/validator_wallet.go index 3e9c3b5425..133a808eac 100644 --- a/staker/validator_wallet.go +++ b/staker/validator_wallet.go @@ -61,6 +61,8 @@ type ValidatorWalletInterface interface { AuthIfEoa() *bind.TransactOpts Start(context.Context) StopAndWait() + // May be nil + DataPoster() *dataposter.DataPoster } type ContractValidatorWallet struct { @@ -418,6 +420,10 @@ func (b *ContractValidatorWallet) StopAndWait() { b.StopWaiter.StopAndWait() } +func (b *ContractValidatorWallet) DataPoster() *dataposter.DataPoster { + return b.dataPoster +} + func GetValidatorWalletContract( ctx context.Context, validatorWalletFactoryAddr common.Address, From 4a314b74563a9014494f6f46b021992d42371166 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 23 Aug 2023 16:50:19 -0600 Subject: [PATCH 054/117] Fix flaky tests --- system_tests/forwarder_test.go | 3 ++- system_tests/retryable_test.go | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 93c928d145..0a954719d8 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -127,6 +127,7 @@ func createForwardingNode( nodeConfig := arbnode.ConfigDefaultL1Test() nodeConfig.Sequencer.Enable = false nodeConfig.DelayedSequencer.Enable = false + nodeConfig.BatchPoster.Enable = false nodeConfig.Forwarder.RedisUrl = redisUrl nodeConfig.ForwardingTarget = fallbackPath // nodeConfig.Feed.Output.Enable = false @@ -148,7 +149,7 @@ func createSequencer( ipcConfig.Path = ipcPath ipcConfig.Apply(stackConfig) nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.BatchPoster.Enable = true + nodeConfig.BatchPoster.Enable = false nodeConfig.SeqCoordinator.Enable = true nodeConfig.SeqCoordinator.RedisUrl = redisUrl nodeConfig.SeqCoordinator.MyUrl = ipcPath diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 29bfdd6e6f..b1dd32d1dc 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -319,7 +319,7 @@ func TestSubmissionGasCosts(t *testing.T) { usefulGas := params.TxGas excessGasLimit := uint64(808) - maxSubmissionFee := big.NewInt(1e13) + maxSubmissionFee := big.NewInt(1e14) retryableGas := arbmath.UintToBig(usefulGas + excessGasLimit) // will only burn the intrinsic cost retryableL2CallValue := big.NewInt(1e4) retryableCallData := []byte{} @@ -358,8 +358,10 @@ func TestSubmissionGasCosts(t *testing.T) { if redeemReceipt.Status != types.ReceiptStatusSuccessful { Fatal(t, "first retry tx failed") } + redeemBlock, err := l2client.HeaderByNumber(ctx, redeemReceipt.BlockNumber) + Require(t, err) - l2BaseFee := GetBaseFee(t, l2client, ctx) + l2BaseFee := redeemBlock.BaseFee excessGasPrice := arbmath.BigSub(gasFeeCap, l2BaseFee) excessWei := arbmath.BigMulByUint(l2BaseFee, excessGasLimit) excessWei.Add(excessWei, arbmath.BigMul(excessGasPrice, retryableGas)) From e557bd8bc29c37196b271afb8d0826ab72b86dba Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 23 Aug 2023 17:01:06 -0600 Subject: [PATCH 055/117] Move data poster config from BlockValidatorConfig to L1ValidatorConfig --- arbnode/dataposter/data_poster.go | 12 ++++++ arbnode/node.go | 8 ++-- staker/block_validator.go | 14 ------- staker/staker.go | 62 +++++++++++++++++++++++-------- system_tests/staker_test.go | 4 +- 5 files changed, 66 insertions(+), 34 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 89b36e3e51..1504fcf58b 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -648,6 +648,12 @@ var DefaultDataPosterConfig = DataPosterConfig{ UseNoOpStorage: false, } +var DefaultDataPosterConfigForValidator = func() DataPosterConfig { + config := DefaultDataPosterConfig + config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + return config +}() + var TestDataPosterConfig = DataPosterConfig{ ReplacementTimes: "1s,2s,5s,10s,20s,30s,1m,5m", RedisSigner: signature.TestSimpleHmacConfig, @@ -662,3 +668,9 @@ var TestDataPosterConfig = DataPosterConfig{ UseLevelDB: false, UseNoOpStorage: false, } + +var TestDataPosterConfigForValidator = func() DataPosterConfig { + config := TestDataPosterConfig + config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + return config +}() diff --git a/arbnode/node.go b/arbnode/node.go index abc4367a39..e6960a3f22 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -434,6 +434,7 @@ func ConfigDefaultL1NonSequencerTest() *Config { config.BatchPoster.Enable = false config.SeqCoordinator.Enable = false config.BlockValidator = staker.TestBlockValidatorConfig + config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServer.URL = "" config.Forwarder = execution.DefaultTestForwarderConfig @@ -451,6 +452,7 @@ func ConfigDefaultL2Test() *Config { config.Feed.Output.Signed = false config.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false config.SeqCoordinator.Signer.ECDSA.Dangerous.AcceptMissing = true + config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServer.URL = "" config.TransactionStreamer = DefaultTransactionStreamerConfig @@ -554,19 +556,19 @@ func ValidatorDataposter( mdRetriever := func(ctx context.Context, blockNum *big.Int) ([]byte, error) { return nil, nil } - redisC, err := redisutil.RedisClientFromURL(cfg.BlockValidator.RedisUrl) + redisC, err := redisutil.RedisClientFromURL(cfg.Staker.RedisUrl) if err != nil { return nil, fmt.Errorf("creating redis client from url: %w", err) } lockCfgFetcher := func() *redislock.SimpleCfg { - return &cfg.BlockValidator.RedisLock + return &cfg.Staker.RedisLock } redisLock, err := redislock.NewSimple(redisC, lockCfgFetcher, func() bool { return syncMonitor.Synced() }) if err != nil { return nil, err } dpCfg := func() *dataposter.DataPosterConfig { - return &cfg.BlockValidator.DataPoster + return &cfg.Staker.DataPoster } return dataposter.NewDataPoster(db, l1Reader, transactOpts, redisC, redisLock, dpCfg, mdRetriever) } diff --git a/staker/block_validator.go b/staker/block_validator.go index 5da4365143..f04b852041 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -19,8 +19,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" - "github.com/offchainlabs/nitro/arbnode/dataposter" - "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/rpcclient" @@ -88,9 +86,6 @@ type BlockValidatorConfig struct { PendingUpgradeModuleRoot string `koanf:"pending-upgrade-module-root"` // TODO(magic) requires StatelessBlockValidator recreation on hot reload FailureIsFatal bool `koanf:"failure-is-fatal" reload:"hot"` Dangerous BlockValidatorDangerousConfig `koanf:"dangerous"` - DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` - RedisUrl string `koanf:"redis-url"` - RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` } func (c *BlockValidatorConfig) Validate() error { @@ -113,9 +108,6 @@ func BlockValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".pending-upgrade-module-root", DefaultBlockValidatorConfig.PendingUpgradeModuleRoot, "pending upgrade wasm module root to additionally validate (hash, 'latest' or empty)") f.Bool(prefix+".failure-is-fatal", DefaultBlockValidatorConfig.FailureIsFatal, "failing a validation is treated as a fatal error") BlockValidatorDangerousConfigAddOptions(prefix+".dangerous", f) - dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) - f.String(prefix+".redis-url", DefaultBlockValidatorConfig.RedisUrl, "redis url for block validator") - redislock.AddConfigOptions(prefix+".redis-lock", f) } func BlockValidatorDangerousConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -132,9 +124,6 @@ var DefaultBlockValidatorConfig = BlockValidatorConfig{ PendingUpgradeModuleRoot: "latest", FailureIsFatal: true, Dangerous: DefaultBlockValidatorDangerousConfig, - DataPoster: dataposter.DefaultDataPosterConfig, - RedisUrl: "", - RedisLock: redislock.DefaultCfg, } var TestBlockValidatorConfig = BlockValidatorConfig{ @@ -147,9 +136,6 @@ var TestBlockValidatorConfig = BlockValidatorConfig{ PendingUpgradeModuleRoot: "latest", FailureIsFatal: true, Dangerous: DefaultBlockValidatorDangerousConfig, - DataPoster: dataposter.TestDataPosterConfig, - RedisUrl: "", - RedisLock: redislock.DefaultCfg, } var DefaultBlockValidatorDangerousConfig = BlockValidatorDangerousConfig{ diff --git a/staker/staker.go b/staker/staker.go index 6ee561b867..9b7e6c238e 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -20,6 +20,8 @@ import ( "github.com/ethereum/go-ethereum/rpc" flag "github.com/spf13/pflag" + "github.com/offchainlabs/nitro/arbnode/dataposter" + "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/util/arbmath" @@ -69,21 +71,24 @@ func L1PostingStrategyAddOptions(prefix string, f *flag.FlagSet) { } type L1ValidatorConfig struct { - Enable bool `koanf:"enable"` - Strategy string `koanf:"strategy"` - StakerInterval time.Duration `koanf:"staker-interval"` - MakeAssertionInterval time.Duration `koanf:"make-assertion-interval"` - PostingStrategy L1PostingStrategy `koanf:"posting-strategy"` - DisableChallenge bool `koanf:"disable-challenge"` - ConfirmationBlocks int64 `koanf:"confirmation-blocks"` - UseSmartContractWallet bool `koanf:"use-smart-contract-wallet"` - OnlyCreateWalletContract bool `koanf:"only-create-wallet-contract"` - StartValidationFromStaked bool `koanf:"start-validation-from-staked"` - ContractWalletAddress string `koanf:"contract-wallet-address"` - GasRefunderAddress string `koanf:"gas-refunder-address"` - ExtraGas uint64 `koanf:"extra-gas" reload:"hot"` - Dangerous DangerousConfig `koanf:"dangerous"` - ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` + Enable bool `koanf:"enable"` + Strategy string `koanf:"strategy"` + StakerInterval time.Duration `koanf:"staker-interval"` + MakeAssertionInterval time.Duration `koanf:"make-assertion-interval"` + PostingStrategy L1PostingStrategy `koanf:"posting-strategy"` + DisableChallenge bool `koanf:"disable-challenge"` + ConfirmationBlocks int64 `koanf:"confirmation-blocks"` + UseSmartContractWallet bool `koanf:"use-smart-contract-wallet"` + OnlyCreateWalletContract bool `koanf:"only-create-wallet-contract"` + StartValidationFromStaked bool `koanf:"start-validation-from-staked"` + ContractWalletAddress string `koanf:"contract-wallet-address"` + GasRefunderAddress string `koanf:"gas-refunder-address"` + DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` + RedisUrl string `koanf:"redis-url"` + RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` + ExtraGas uint64 `koanf:"extra-gas" reload:"hot"` + Dangerous DangerousConfig `koanf:"dangerous"` + ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` strategy StakerStrategy gasRefunder common.Address @@ -145,6 +150,30 @@ var DefaultL1ValidatorConfig = L1ValidatorConfig{ StartValidationFromStaked: true, ContractWalletAddress: "", GasRefunderAddress: "", + DataPoster: dataposter.DefaultDataPosterConfigForValidator, + RedisUrl: "", + RedisLock: redislock.DefaultCfg, + ExtraGas: 50000, + Dangerous: DefaultDangerousConfig, + ParentChainWallet: DefaultValidatorL1WalletConfig, +} + +var TestL1ValidatorConfig = L1ValidatorConfig{ + Enable: true, + Strategy: "Watchtower", + StakerInterval: time.Millisecond * 10, + MakeAssertionInterval: 0, + PostingStrategy: L1PostingStrategy{}, + DisableChallenge: false, + ConfirmationBlocks: 0, + UseSmartContractWallet: false, + OnlyCreateWalletContract: false, + StartValidationFromStaked: true, + ContractWalletAddress: "", + GasRefunderAddress: "", + DataPoster: dataposter.TestDataPosterConfigForValidator, + RedisUrl: "", + RedisLock: redislock.DefaultCfg, ExtraGas: 50000, Dangerous: DefaultDangerousConfig, ParentChainWallet: DefaultValidatorL1WalletConfig, @@ -171,7 +200,10 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".start-validation-from-staked", DefaultL1ValidatorConfig.StartValidationFromStaked, "assume staked nodes are valid") f.String(prefix+".contract-wallet-address", DefaultL1ValidatorConfig.ContractWalletAddress, "validator smart contract wallet public address") f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") + f.String(prefix+".redis-url", DefaultL1ValidatorConfig.RedisUrl, "redis url for L1 validator") f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") + dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) + redislock.AddConfigOptions(prefix+".redis-lock", f) DangerousConfigAddOptions(prefix+".dangerous", f) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index b47e3bb5d8..82eede9f60 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -128,13 +128,13 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, l1client) Require(t, err) - valConfig := staker.L1ValidatorConfig{} + valConfig := staker.TestL1ValidatorConfig dpA, err := arbnode.ValidatorDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.BlockValidatorPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } - valWalletA, err := staker.NewContractValidatorWallet(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return 10000 }) + valWalletA, err := staker.NewContractValidatorWallet(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfig.ExtraGas }) Require(t, err) if honestStakerInactive { valConfig.Strategy = "Defensive" From 69a5c3e76115a9bb022f2ef41c8487b8646d697b Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 23 Aug 2023 18:36:44 -0600 Subject: [PATCH 056/117] Improve batch poster reliability for L3s --- arbnode/batch_poster.go | 24 +++++++++++++++++++++--- arbnode/dataposter/data_poster.go | 19 ++++++++++++------- arbutil/wait_for_l1.go | 4 ++-- util/headerreader/header_reader.go | 4 ++++ 4 files changed, 39 insertions(+), 12 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 768f73276b..e9a1663741 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -259,6 +259,8 @@ func NewBatchPoster(dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderRe // checkRevert checks blocks with number in range [from, to] whether they // contain reverted batch_poster transaction. +// It returns true if it finds batch posting needs to halt, which is true if a batch reverts +// unless the data poster is configured with noop storage which can tolerate reverts. func (b *BatchPoster) checkReverts(ctx context.Context, from, to int64) (bool, error) { if from > to { return false, fmt.Errorf("wrong range, from: %d is more to: %d", from, to) @@ -280,8 +282,13 @@ func (b *BatchPoster) checkReverts(ctx context.Context, from, to int64) (bool, e return false, fmt.Errorf("getting a receipt for transaction: %v, %w", tx.Hash(), err) } if r.Status == types.ReceiptStatusFailed { - log.Error("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash) - return true, nil + shouldHalt := !b.config().DataPoster.UseNoOpStorage + logLevel := log.Warn + if shouldHalt { + logLevel = log.Error + } + logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash) + return shouldHalt, nil } } } @@ -881,7 +888,8 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - if _, err := b.dataPoster.PostTransaction(ctx, firstMsgTime, nonce, newMeta, b.seqInboxAddr, data, gasLimit, new(big.Int)); err != nil { + tx, err := b.dataPoster.PostTransaction(ctx, firstMsgTime, nonce, newMeta, b.seqInboxAddr, data, gasLimit, new(big.Int)) + if err != nil { return false, err } log.Info( @@ -920,6 +928,16 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) b.backlog = 0 } b.building = nil + + // If we aren't queueing up transactions, wait for the receipt before moving on to the next batch. + if config.DataPoster.UseNoOpStorage { + receipt, err := b.l1Reader.WaitForTxApproval(ctx, tx) + if err != nil { + return false, fmt.Errorf("error waiting for tx receipt: %w", err) + } + log.Info("Got successful receipt from batch poster transaction", "txHash", tx.Hash(), "blockNumber", receipt.BlockNumber, "blockHash", receipt.BlockHash) + } + return true, nil } diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 9451fbbafe..3c8da8ef5f 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -92,21 +92,26 @@ func parseReplacementTimes(val string) ([]time.Duration, error) { } func NewDataPoster(db ethdb.Database, headerReader *headerreader.HeaderReader, auth *bind.TransactOpts, redisClient redis.UniversalClient, redisLock AttemptLocker, config ConfigFetcher, metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error)) (*DataPoster, error) { - replacementTimes, err := parseReplacementTimes(config().ReplacementTimes) + initConfig := config() + replacementTimes, err := parseReplacementTimes(initConfig.ReplacementTimes) if err != nil { return nil, err } + if headerReader.IsParentChainArbitrum() && !initConfig.UseNoOpStorage { + initConfig.UseNoOpStorage = true + log.Info("Disabling data poster storage, as parent chain appears to be an Arbitrum chain without a mempool") + } var queue QueueStorage switch { - case config().UseLevelDB: - queue = leveldb.New(db) - case config().UseNoOpStorage: + case initConfig.UseNoOpStorage: queue = &noop.Storage{} + case initConfig.UseLevelDB: + queue = leveldb.New(db) case redisClient == nil: queue = slice.NewStorage() default: var err error - queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &config().RedisSigner) + queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &initConfig.RedisSigner) if err != nil { return nil, err } @@ -587,8 +592,8 @@ type DataPosterConfig struct { MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` - UseLevelDB bool `koanf:"use-leveldb" reload:"hot"` - UseNoOpStorage bool `koanf:"use-noop-storage" reload:"hot"` + UseLevelDB bool `koanf:"use-leveldb"` + UseNoOpStorage bool `koanf:"use-noop-storage"` } // ConfigFetcher function type is used instead of directly passing config so diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index ec6bb5a380..12d494a230 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -12,8 +12,8 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" ) type L1Interface interface { @@ -88,7 +88,7 @@ func DetailTxError(ctx context.Context, client L1Interface, tx *types.Transactio } _, err = SendTxAsCall(ctx, client, tx, from, txRes.BlockNumber, true) if err == nil { - return fmt.Errorf("%w for tx hash %v", core.ErrGasLimitReached, tx.Hash()) + return fmt.Errorf("%w for tx hash %v", vm.ErrOutOfGas, tx.Hash()) } return fmt.Errorf("SendTxAsCall got: %w for tx hash %v", err, tx.Hash()) } diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index 739a96b438..e5807224c0 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -462,6 +462,10 @@ func (s *HeaderReader) UseFinalityData() bool { return s.config().UseFinalityData } +func (s *HeaderReader) IsParentChainArbitrum() bool { + return s.isParentChainArbitrum +} + func (s *HeaderReader) Start(ctxIn context.Context) { s.StopWaiter.Start(ctxIn, s) s.LaunchThread(s.broadcastLoop) From 3c0bb4190c064bf11cf46ae41eddd4c8451bb9e1 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 23 Aug 2023 18:59:21 -0600 Subject: [PATCH 057/117] Use data poster for eoa to timeout challenges --- staker/eoa_validator_wallet.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/staker/eoa_validator_wallet.go b/staker/eoa_validator_wallet.go index a91510cbbe..09175332bf 100644 --- a/staker/eoa_validator_wallet.go +++ b/staker/eoa_validator_wallet.go @@ -6,7 +6,6 @@ package staker import ( "context" "fmt" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -27,7 +26,6 @@ type EoaValidatorWallet struct { challengeManager *challengegen.ChallengeManager challengeManagerAddress common.Address dataPoster *dataposter.DataPoster - txCount atomic.Uint64 getExtraGas func() uint64 } @@ -39,7 +37,6 @@ func NewEoaValidatorWallet(dataPoster *dataposter.DataPoster, rollupAddress comm client: l1Client, rollupAddress: rollupAddress, dataPoster: dataPoster, - txCount: atomic.Uint64{}, getExtraGas: getExtraGas, }, nil } @@ -91,18 +88,21 @@ func (w *EoaValidatorWallet) ExecuteTransactions(ctx context.Context, builder *V if len(builder.transactions) == 0 { return nil, nil } + tx := builder.transactions[0] // we ignore future txs and only execute the first + return w.postTransaction(ctx, tx) +} + +func (w *EoaValidatorWallet) postTransaction(ctx context.Context, baseTx *types.Transaction) (*types.Transaction, error) { nonce, err := w.L1Client().NonceAt(ctx, w.auth.From, nil) if err != nil { return nil, err } - tx := builder.transactions[0] // we ignore future txs and only execute the first - gas := tx.Gas() + w.getExtraGas() - trans, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *tx.To(), tx.Data(), gas, tx.Value()) + gas := baseTx.Gas() + w.getExtraGas() + newTx, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *baseTx.To(), baseTx.Data(), gas, baseTx.Value()) if err != nil { return nil, fmt.Errorf("post transaction: %w", err) } - w.txCount.Store(nonce) - return trans, nil + return newTx, nil } func (w *EoaValidatorWallet) TimeoutChallenges(ctx context.Context, timeouts []uint64) (*types.Transaction, error) { @@ -111,7 +111,12 @@ func (w *EoaValidatorWallet) TimeoutChallenges(ctx context.Context, timeouts []u } auth := *w.auth auth.Context = ctx - return w.challengeManager.Timeout(&auth, timeouts[0]) + auth.NoSend = true + tx, err := w.challengeManager.Timeout(&auth, timeouts[0]) + if err != nil { + return nil, err + } + return w.postTransaction(ctx, tx) } func (w *EoaValidatorWallet) CanBatchTxs() bool { From 47e5e9f24821d1badcddf68cc60cfefedbb572d9 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Thu, 24 Aug 2023 10:33:30 -0500 Subject: [PATCH 058/117] fix minor bug --- cmd/seq-coordinator-manager/seq-coordinator-manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go index eb28f6023c..96da53cc94 100644 --- a/cmd/seq-coordinator-manager/seq-coordinator-manager.go +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -88,7 +88,7 @@ func main() { app.SetFocus(prioritySeqList) }) priorityForm.AddButton("Remove", func() { - url := seqManager.priorityList[0] + url := seqManager.priorityList[index] delete(seqManager.prioritiesMap, url) seqManager.updatePriorityList(ctx, index, 0) seqManager.priorityList = seqManager.priorityList[1:] From 90dd010b9427a9fb7f1f8b1919a14c1ae0ab1f94 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Thu, 24 Aug 2023 10:52:45 -0500 Subject: [PATCH 059/117] update recent merge conflict resolution --- go.sum | 3 +++ 1 file changed, 3 insertions(+) diff --git a/go.sum b/go.sum index f7fdad4717..4362d4b01d 100644 --- a/go.sum +++ b/go.sum @@ -1973,6 +1973,9 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= From c5f6ba3ecb01ada623c5fe90e306ead00cb73d93 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Thu, 24 Aug 2023 13:48:16 -0500 Subject: [PATCH 060/117] update RedisCoordinator to inherit redisutil's implementation --- .../rediscoordinator/redis_coordinator.go | 25 +------------------ .../seq-coordinator-manager.go | 11 +++++--- 2 files changed, 8 insertions(+), 28 deletions(-) diff --git a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go index db3724240e..3dcb6f7203 100644 --- a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go +++ b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go @@ -10,18 +10,7 @@ import ( ) type RedisCoordinator struct { - Client redis.UniversalClient -} - -func NewRedisCoordinator(redisURL string) (*RedisCoordinator, error) { - redisClient, err := redisutil.RedisClientFromURL(redisURL) - if err != nil { - return nil, err - } - - return &RedisCoordinator{ - Client: redisClient, - }, nil + *redisutil.RedisCoordinator } func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, map[string]int, error) { @@ -63,15 +52,3 @@ func (rc *RedisCoordinator) UpdatePriorities(ctx context.Context, priorities []s } return err } - -// CurrentChosenSequencer retrieves the current chosen sequencer holding the lock -func (c *RedisCoordinator) CurrentChosenSequencer(ctx context.Context) (string, error) { - current, err := c.Client.Get(ctx, redisutil.CHOSENSEQ_KEY).Result() - if errors.Is(err, redis.Nil) { - return "", nil - } - if err != nil { - return "", err - } - return current, nil -} diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go index 96da53cc94..9bbafe18df 100644 --- a/cmd/seq-coordinator-manager/seq-coordinator-manager.go +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/gdamore/tcell/v2" "github.com/offchainlabs/nitro/cmd/seq-coordinator-manager/rediscoordinator" + "github.com/offchainlabs/nitro/util/redisutil" "github.com/rivo/tview" ) @@ -45,15 +46,17 @@ func main() { os.Exit(1) } redisURL := args[0] - redisCoordinator, err := rediscoordinator.NewRedisCoordinator(redisURL) + redisutilCoordinator, err := redisutil.NewRedisCoordinator(redisURL) if err != nil { panic(err) } seqManager := &manager{ - redisCoordinator: redisCoordinator, - prioritiesMap: make(map[string]int), - livelinessMap: make(map[string]int), + redisCoordinator: &rediscoordinator.RedisCoordinator{ + RedisCoordinator: redisutilCoordinator, + }, + prioritiesMap: make(map[string]int), + livelinessMap: make(map[string]int), } seqManager.refreshAllLists(ctx) From 6d7667715a5bf5311bf9b6289a11148724c075e8 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 25 Aug 2023 11:45:53 -0500 Subject: [PATCH 061/117] add comments --- .../rediscoordinator/redis_coordinator.go | 4 ++++ cmd/seq-coordinator-manager/seq-coordinator-manager.go | 1 + 2 files changed, 5 insertions(+) diff --git a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go index 3dcb6f7203..a393719a1d 100644 --- a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go +++ b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go @@ -9,10 +9,12 @@ import ( "github.com/offchainlabs/nitro/util/redisutil" ) +// RedisCoordinator builds upon RedisCoordinator of redisutil with additional functionality type RedisCoordinator struct { *redisutil.RedisCoordinator } +// GetPriorities returns the priority list of sequencers func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, map[string]int, error) { prioritiesMap := make(map[string]int) prioritiesString, err := rc.Client.Get(ctx, redisutil.PRIORITIES_KEY).Result() @@ -29,6 +31,7 @@ func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, map[st return priorities, prioritiesMap, nil } +// GetLivelinessMap returns a map whose keys are sequencers that have their liveliness set to OK func (rc *RedisCoordinator) GetLivelinessMap(ctx context.Context) (map[string]int, error) { livelinessMap := make(map[string]int) livelinessList, _, err := rc.Client.Scan(ctx, 0, redisutil.WANTS_LOCKOUT_KEY_PREFIX+"*", 0).Result() @@ -42,6 +45,7 @@ func (rc *RedisCoordinator) GetLivelinessMap(ctx context.Context) (map[string]in return livelinessMap, nil } +// UpdatePriorities updates the priority list of sequencers func (rc *RedisCoordinator) UpdatePriorities(ctx context.Context, priorities []string) error { prioritiesString := strings.Join(priorities, ",") err := rc.Client.Set(ctx, redisutil.PRIORITIES_KEY, prioritiesString, 0).Err() diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go index 9bbafe18df..0a279cff64 100644 --- a/cmd/seq-coordinator-manager/seq-coordinator-manager.go +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -265,6 +265,7 @@ func (sm *manager) addSeqPriorityForm(ctx context.Context) *tview.Form { addSeqForm.AddButton("Add", func() { // check if url is valid, i.e it doesnt already exist in the priority list if _, ok := sm.prioritiesMap[URL]; !ok && URL != "" { + sm.prioritiesMap[URL]++ sm.priorityList = append(sm.priorityList, URL) } sm.populateLists(ctx) From d53b1bc2dc48c4a07c6298547c4ac3f33b88c226 Mon Sep 17 00:00:00 2001 From: Nodar Date: Mon, 28 Aug 2023 16:31:49 +0200 Subject: [PATCH 062/117] Drop unused fields with koanf tags --- cmd/datool/datool.go | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 1de85037ee..d20a5b52cd 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -84,14 +84,13 @@ func startClient(args []string) error { // datool client rpc store type ClientStoreConfig struct { - URL string `koanf:"url"` - Message string `koanf:"message"` - RandomMessageSize int `koanf:"random-message-size"` - DASRetentionPeriod time.Duration `koanf:"das-retention-period"` - SigningKey string `koanf:"signing-key"` - SigningWallet string `koanf:"signing-wallet"` - SigningWalletPassword string `koanf:"signing-wallet-password"` - Conf genericconf.ConfConfig `koanf:"conf"` + URL string `koanf:"url"` + Message string `koanf:"message"` + RandomMessageSize int `koanf:"random-message-size"` + DASRetentionPeriod time.Duration `koanf:"das-retention-period"` + SigningKey string `koanf:"signing-key"` + SigningWallet string `koanf:"signing-wallet"` + SigningWalletPassword string `koanf:"signing-wallet-password"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -196,9 +195,8 @@ func startClientStore(args []string) error { // datool client rest getbyhash type RESTClientGetByHashConfig struct { - URL string `koanf:"url"` - DataHash string `koanf:"data-hash"` - Conf genericconf.ConfConfig `koanf:"conf"` + URL string `koanf:"url"` + DataHash string `koanf:"data-hash"` } func parseRESTClientGetByHashConfig(args []string) (*RESTClientGetByHashConfig, error) { @@ -257,8 +255,7 @@ func startRESTClientGetByHash(args []string) error { // das keygen type KeyGenConfig struct { - Dir string - Conf genericconf.ConfConfig `koanf:"conf"` + Dir string // ECDSA mode. ECDSA bool `koanf:"ecdsa"` // Wallet mode. From 2b01cf8fe37f115f548ae481e2deb34d6cbecef1 Mon Sep 17 00:00:00 2001 From: Nodar Date: Mon, 28 Aug 2023 17:34:31 +0200 Subject: [PATCH 063/117] Implement linter detection of koanf fields that aren't used outside flag definitions --- Makefile | 2 +- linter/koanf/handlers.go | 241 +++++++++++++++++++++++++++++++ linter/koanf/koanf.go | 201 ++++++-------------------- linter/koanf/koanf_test.go | 62 ++++++-- linter/testdata/src/a/a.go | 38 ----- linter/testdata/src/koanf/a/a.go | 58 ++++++++ linter/testdata/src/koanf/b/b.go | 52 +++++++ 7 files changed, 447 insertions(+), 207 deletions(-) create mode 100644 linter/koanf/handlers.go delete mode 100644 linter/testdata/src/a/a.go create mode 100644 linter/testdata/src/koanf/a/a.go create mode 100644 linter/testdata/src/koanf/b/b.go diff --git a/Makefile b/Makefile index 0d93958c2d..33487d0609 100644 --- a/Makefile +++ b/Makefile @@ -304,7 +304,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make - go run linter/koanf/koanf.go ./... + go run linter/koanf/koanf.go linter/koanf/handlers.go ./... go run linter/pointercheck/pointer.go ./... golangci-lint run --fix yarn --cwd contracts solhint diff --git a/linter/koanf/handlers.go b/linter/koanf/handlers.go new file mode 100644 index 0000000000..452291e605 --- /dev/null +++ b/linter/koanf/handlers.go @@ -0,0 +1,241 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + "unicode" + + "github.com/fatih/structtag" + "golang.org/x/tools/go/analysis" +) + +// handleComposite tracks use of fields in composite literals. +// E.g. `Config{A: 1, B: 2, C: 3}` will increase counters of fields A,B and C. +func handleComposite(pass *analysis.Pass, cl *ast.CompositeLit, cnt map[string]int) { + id, ok := cl.Type.(*ast.Ident) + if !ok { + return + } + for _, e := range cl.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + if ki, ok := kv.Key.(*ast.Ident); ok { + fi := pass.TypesInfo.Types[id].Type.String() + "." + ki.Name + cnt[normalizeID(pass, fi)]++ + } + } + } +} + +// handleSelector handles selector expression recursively, that is an expression: +// a.B.C.D will update counter for fields: a.B.C.D, a.B.C and a.B. +func handleSelector(pass *analysis.Pass, se *ast.SelectorExpr, inc int, cnt map[string]int) string { + if e, ok := se.X.(*ast.SelectorExpr); ok { + // Full field identifier, including package name. + fi := pass.TypesInfo.Types[e].Type.String() + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += inc + prefix := handleSelector(pass, e, inc, cnt) + fi = prefix + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += inc + return fi + } + // Handle selectors on function calls, e.g. `config().Enabled`. + if _, ok := se.X.(*ast.CallExpr); ok { + fi := pass.TypesInfo.Types[se.X].Type.String() + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += inc + return fi + } + if ident, ok := se.X.(*ast.Ident); ok { + if pass.TypesInfo.Types[ident].Type != nil { + fi := pass.TypesInfo.Types[ident].Type.String() + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += inc + return fi + } + } + return "" +} + +// koanfFields returns a map of fields that have koanf tag. +func koanfFields(pass *analysis.Pass) map[string]token.Pos { + res := make(map[string]token.Pos) + for _, f := range pass.Files { + pkgName := f.Name.Name + ast.Inspect(f, func(node ast.Node) bool { + if ts, ok := node.(*ast.TypeSpec); ok { + st, ok := ts.Type.(*ast.StructType) + if !ok { + return true + } + for _, f := range st.Fields.List { + if tag := tagFromField(f); tag != "" { + t := strings.Join([]string{pkgName, ts.Name.Name, f.Names[0].Name}, ".") + res[t] = f.Pos() + } + } + } + return true + }) + } + return res +} + +func containsFlagSet(params []*ast.Field) bool { + for _, p := range params { + se, ok := p.Type.(*ast.StarExpr) + if !ok { + continue + } + sle, ok := se.X.(*ast.SelectorExpr) + if !ok { + continue + } + if sle.Sel.Name == "FlagSet" { + return true + } + } + return false +} + +// checkFlagDefs checks flag definitions in the function. +// Result contains list of errors where flag name doesn't match field name. +func checkFlagDefs(pass *analysis.Pass, f *ast.FuncDecl, cnt map[string]int) Result { + // Ignore functions that does not get flagset as parameter. + if !containsFlagSet(f.Type.Params.List) { + return Result{} + } + var res Result + for _, s := range f.Body.List { + es, ok := s.(*ast.ExprStmt) + if !ok { + continue + } + callE, ok := es.X.(*ast.CallExpr) + if !ok { + continue + } + if len(callE.Args) != 3 { + continue + } + sl, ok := extractStrLit(callE.Args[0]) + if !ok { + continue + } + s, ok := selectorName(callE.Args[1]) + if !ok { + continue + } + handleSelector(pass, callE.Args[1].(*ast.SelectorExpr), -1, cnt) + if normSL := normalizeTag(sl); !strings.EqualFold(normSL, s) { + res.Errors = append(res.Errors, koanfError{ + Pos: f.Pos(), + Message: fmt.Sprintf("koanf tag name: %q doesn't match the field: %q", sl, s), + err: errIncorrectFlag, + }) + } + + } + return res +} + +func selectorName(e ast.Expr) (string, bool) { + n, ok := e.(ast.Node) + if !ok { + return "", false + } + se, ok := n.(*ast.SelectorExpr) + if !ok { + return "", false + } + return se.Sel.Name, true +} + +// Extracts literal from expression that is either: +// - string literal or +// - sum of variable and string literal. +// E.g. +// strLitFromSum(`"max-size"`) = "max-size" +// - strLitFromSum(`prefix + ".enable"“) = ".enable". +func extractStrLit(e ast.Expr) (string, bool) { + if s, ok := strLit(e); ok { + return s, true + } + if be, ok := e.(*ast.BinaryExpr); ok { + if be.Op == token.ADD { + if s, ok := strLit(be.Y); ok { + // Drop the prefix dot. + return s[1:], true + } + } + } + return "", false +} + +func strLit(e ast.Expr) (string, bool) { + if s, ok := e.(*ast.BasicLit); ok { + if s.Kind == token.STRING { + return strings.Trim(s.Value, "\""), true + } + } + return "", false +} + +// tagFromField extracts koanf tag from struct field. +func tagFromField(f *ast.Field) string { + if f.Tag == nil { + return "" + } + tags, err := structtag.Parse(strings.Trim((f.Tag.Value), "`")) + if err != nil { + return "" + } + tag, err := tags.Get("koanf") + if err != nil { + return "" + } + return normalizeTag(tag.Name) +} + +// checkStruct returns violations where koanf tag name doesn't match field names. +func checkStruct(pass *analysis.Pass, s *ast.StructType) Result { + var res Result + for _, f := range s.Fields.List { + tag := tagFromField(f) + if tag == "" { + continue + } + fieldName := f.Names[0].Name + if !strings.EqualFold(tag, fieldName) { + res.Errors = append(res.Errors, koanfError{ + Pos: f.Pos(), + Message: fmt.Sprintf("field name: %q doesn't match tag name: %q\n", fieldName, tag), + err: errMismatch, + }) + } + } + return res +} + +func normalizeTag(s string) string { + ans := s[:1] + for i := 1; i < len(s); i++ { + c := rune(s[i]) + if !isAlphanumeric(c) { + continue + } + if !isAlphanumeric(rune(s[i-1])) && unicode.IsLower(c) { + c = unicode.ToUpper(c) + } + ans += string(c) + } + return ans +} + +func isAlphanumeric(c rune) bool { + return unicode.IsLetter(c) || unicode.IsDigit(c) +} + +func normalizeID(pass *analysis.Pass, id string) string { + id = strings.TrimPrefix(id, "*") + return pass.Pkg.Name() + strings.TrimPrefix(id, pass.Pkg.Path()) +} diff --git a/linter/koanf/koanf.go b/linter/koanf/koanf.go index 8dbb392cb4..d6780760e7 100644 --- a/linter/koanf/koanf.go +++ b/linter/koanf/koanf.go @@ -1,18 +1,23 @@ package main import ( + "errors" "fmt" "go/ast" "go/token" "reflect" - "strings" - "unicode" - "github.com/fatih/structtag" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/singlechecker" ) +var ( + errUnused = errors.New("unused") + errMismatch = errors.New("mismmatched field name and tag in a struct") + // e.g. f.Int("max-sz", DefaultBatchPosterConfig.MaxSize, "maximum batch size") + errIncorrectFlag = errors.New("mismatching flag initialization") +) + func New(conf any) ([]*analysis.Analyzer, error) { return []*analysis.Analyzer{Analyzer}, nil } @@ -33,8 +38,9 @@ var analyzerForTests = &analysis.Analyzer{ // koanfError indicates the position of an error in configuration. type koanfError struct { - Pos token.Position + Pos token.Pos Message string + err error } // Result is returned from the checkStruct function, and holds all the @@ -44,7 +50,14 @@ type Result struct { } func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { - var ret Result + var ( + ret Result + cnt = make(map[string]int) + // koanfFields map contains all the struct koanfFields that have koanf tag. + // It identifies field as "{pkgName}.{structName}.{field_Name}". + // e.g. "a.BatchPosterConfig.Enable", "a.BatchPosterConfig.MaxSize" + koanfFields = koanfFields(pass) + ) for _, f := range pass.Files { ast.Inspect(f, func(node ast.Node) bool { var res Result @@ -52,167 +65,41 @@ func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { case *ast.StructType: res = checkStruct(pass, v) case *ast.FuncDecl: - res = checkFlagDefs(pass, v) - default: - } - for _, err := range res.Errors { - ret.Errors = append(ret.Errors, err) - if !dryRun { - pass.Report(analysis.Diagnostic{ - Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), - Message: err.Message, - Category: "koanf", - }) + res = checkFlagDefs(pass, v, cnt) + case *ast.SelectorExpr: + handleSelector(pass, v, 1, cnt) + case *ast.IfStmt: + if se, ok := v.Cond.(*ast.SelectorExpr); ok { + handleSelector(pass, se, 1, cnt) } + case *ast.CompositeLit: + handleComposite(pass, v, cnt) + default: } + ret.Errors = append(ret.Errors, res.Errors...) return true - }, - ) - } - return ret, nil -} - -func containsFlagSet(params []*ast.Field) bool { - for _, p := range params { - se, ok := p.Type.(*ast.StarExpr) - if !ok { - continue - } - sle, ok := se.X.(*ast.SelectorExpr) - if !ok { - continue - } - if sle.Sel.Name == "FlagSet" { - return true - } - } - return false -} - -// checkFlagDefs checks flag definitions in the function. -// Result contains list of errors where flag name doesn't match field name. -func checkFlagDefs(pass *analysis.Pass, f *ast.FuncDecl) Result { - // Ignore functions that does not get flagset as parameter. - if !containsFlagSet(f.Type.Params.List) { - return Result{} - } - var res Result - for _, s := range f.Body.List { - es, ok := s.(*ast.ExprStmt) - if !ok { - continue - } - callE, ok := es.X.(*ast.CallExpr) - if !ok { - continue - } - if len(callE.Args) != 3 { - continue - } - sl, ok := extractStrLit(callE.Args[0]) - if !ok { - continue - } - s, ok := selector(callE.Args[1]) - if !ok { - continue - } - if normSL := normalize(sl); !strings.EqualFold(normSL, s) { - res.Errors = append(res.Errors, koanfError{ - Pos: pass.Fset.Position(f.Pos()), - Message: fmt.Sprintf("koanf tag name: %q doesn't match the field: %q", sl, s), - }) - } - + }) } - return res -} - -func selector(e ast.Expr) (string, bool) { - n, ok := e.(ast.Node) - if !ok { - return "", false - } - se, ok := n.(*ast.SelectorExpr) - if !ok { - return "", false - } - return se.Sel.Name, true -} - -// Extracts literal from expression that is either: -// - string literal or -// - sum of variable and string literal. -// E.g. -// strLitFromSum(`"max-size"`) = "max-size" -// - strLitFromSum(`prefix + ".enable"“) = ".enable". -func extractStrLit(e ast.Expr) (string, bool) { - if s, ok := strLit(e); ok { - return s, true - } - if be, ok := e.(*ast.BinaryExpr); ok { - if be.Op == token.ADD { - if s, ok := strLit(be.Y); ok { - // Drop the prefix dot. - return s[1:], true - } - } - } - return "", false -} - -func strLit(e ast.Expr) (string, bool) { - if s, ok := e.(*ast.BasicLit); ok { - if s.Kind == token.STRING { - return strings.Trim(s.Value, "\""), true + for k := range koanfFields { + if cnt[k] == 0 { + ret.Errors = append(ret.Errors, + koanfError{ + Pos: koanfFields[k], + Message: fmt.Sprintf("field %v not used", k), + err: errUnused, + }) } } - return "", false -} - -func checkStruct(pass *analysis.Pass, s *ast.StructType) Result { - var res Result - for _, f := range s.Fields.List { - if f.Tag == nil { - continue - } - tags, err := structtag.Parse(strings.Trim((f.Tag.Value), "`")) - if err != nil { - continue - } - tag, err := tags.Get("koanf") - if err != nil { - continue - } - tagName := normalize(tag.Name) - fieldName := f.Names[0].Name - if !strings.EqualFold(tagName, fieldName) { - res.Errors = append(res.Errors, koanfError{ - Pos: pass.Fset.Position(f.Pos()), - Message: fmt.Sprintf("field name: %q doesn't match tag name: %q\n", fieldName, tagName), + for _, err := range ret.Errors { + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: err.Pos, + Message: err.Message, + Category: "koanf", }) } } - return res -} - -func normalize(s string) string { - ans := s[:1] - for i := 1; i < len(s); i++ { - c := rune(s[i]) - if !isAlphanumeric(c) { - continue - } - if !isAlphanumeric(rune(s[i-1])) && unicode.IsLower(c) { - c = unicode.ToUpper(c) - } - ans += string(c) - } - return ans -} - -func isAlphanumeric(c rune) bool { - return unicode.IsLetter(c) || unicode.IsDigit(c) + return ret, nil } func main() { diff --git a/linter/koanf/koanf_test.go b/linter/koanf/koanf_test.go index e3ad5e6043..064ae533c4 100644 --- a/linter/koanf/koanf_test.go +++ b/linter/koanf/koanf_test.go @@ -1,31 +1,71 @@ package main import ( + "errors" "os" "path/filepath" "testing" + "github.com/google/go-cmp/cmp" "golang.org/x/tools/go/analysis/analysistest" ) -func TestAll(t *testing.T) { +var ( + incorrectFlag = "incorrect_flag" + mismatch = "mismatch" + unused = "unused" +) + +func testData(t *testing.T) string { + t.Helper() wd, err := os.Getwd() if err != nil { t.Fatalf("Failed to get wd: %s", err) } - testdata := filepath.Join(filepath.Dir(wd), "testdata") - res := analysistest.Run(t, testdata, analyzerForTests, "a") - if cnt := countErrors(res); cnt != 3 { - t.Errorf("analysistest.Run() got %v errors, expected 3", cnt) + return filepath.Join(filepath.Dir(wd), "testdata") +} + +// Tests koanf/a package that contains two types of errors where: +// - koanf tag doesn't match field name. +// - flag definition doesn't match field name. +// Errors are marked as comments in the package source file. +func TestMismatch(t *testing.T) { + testdata := testData(t) + got := errCounts(analysistest.Run(t, testdata, analyzerForTests, "koanf/a")) + want := map[string]int{ + incorrectFlag: 2, + mismatch: 1, + } + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("analysistest.Run() unexpected diff:\n%s\n", diff) + } +} + +func TestUnused(t *testing.T) { + testdata := testData(t) + got := errCounts(analysistest.Run(t, testdata, analyzerForTests, "koanf/b")) + if diff := cmp.Diff(got, map[string]int{"unused": 2}); diff != "" { + t.Errorf("analysistest.Run() unexpected diff:\n%s\n", diff) } } -func countErrors(errs []*analysistest.Result) int { - cnt := 0 - for _, e := range errs { - if r, ok := e.Result.(Result); ok { - cnt += len(r.Errors) +func errCounts(res []*analysistest.Result) map[string]int { + m := make(map[string]int) + for _, r := range res { + if rs, ok := r.Result.(Result); ok { + for _, e := range rs.Errors { + var s string + switch { + case errors.Is(e.err, errIncorrectFlag): + s = incorrectFlag + case errors.Is(e.err, errMismatch): + s = mismatch + case errors.Is(e.err, errUnused): + s = unused + } + m[s] = m[s] + 1 + } } } - return cnt + return m } diff --git a/linter/testdata/src/a/a.go b/linter/testdata/src/a/a.go deleted file mode 100644 index 86b7739108..0000000000 --- a/linter/testdata/src/a/a.go +++ /dev/null @@ -1,38 +0,0 @@ -package a - -import ( - "flag" -) - -type Config struct { - // Field name doesn't match koanf tag. - L2 int `koanf:"chain"` - LogLevel int `koanf:"log-level"` - LogType int `koanf:"log-type"` - Metrics int `koanf:"metrics"` - PProf int `koanf:"pprof"` - Node int `koanf:"node"` - Queue int `koanf:"queue"` -} - -type BatchPosterConfig struct { - Enable bool `koanf:"enable"` - MaxSize int `koanf:"max-size" reload:"hot"` -} - -// Flag names don't match field names from default config. -// Contains 2 errors. -func BatchPosterConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enabled", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") - f.Int("max-sz", DefaultBatchPosterConfig.MaxSize, "maximum batch size") -} - -func ConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") - f.Int("max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") -} - -var DefaultBatchPosterConfig = BatchPosterConfig{ - Enable: false, - MaxSize: 100000, -} diff --git a/linter/testdata/src/koanf/a/a.go b/linter/testdata/src/koanf/a/a.go new file mode 100644 index 0000000000..a0513fb09b --- /dev/null +++ b/linter/testdata/src/koanf/a/a.go @@ -0,0 +1,58 @@ +package a + +import ( + "flag" +) + +type Config struct { + L2 int `koanf:"chain"` // Err: mismatch. + LogLevel int `koanf:"log-level"` + LogType int `koanf:"log-type"` + Metrics int `koanf:"metrics"` + PProf int `koanf:"pprof"` + Node int `koanf:"node"` + Queue int `koanf:"queue"` +} + +// Cover using of all fields in a various way: + +// Instantiating a type. +var defaultConfig = Config{ + L2: 1, + LogLevel: 2, +} + +// Instantiating a type an taking reference. +var defaultConfigPtr = &Config{ + LogType: 3, + Metrics: 4, +} + +func init() { + defaultConfig.PProf = 5 + defaultConfig.Node, _ = 6, 0 + defaultConfigPtr.Queue = 7 +} + +type BatchPosterConfig struct { + Enable bool `koanf:"enable"` + MaxSize int `koanf:"max-size" reload:"hot"` +} + +var DefaultBatchPosterConfig BatchPosterConfig + +func BatchPosterConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enabled", DefaultBatchPosterConfig.Enable, "") // Err: incorrect flag. + f.Int("max-sz", DefaultBatchPosterConfig.MaxSize, "") // Err: incorrect flag. +} + +func ConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") + f.Int("max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") +} + +func init() { + // Fields must be used outside flag definitions at least once. + DefaultBatchPosterConfig.Enable = true + DefaultBatchPosterConfig.MaxSize = 3 +} diff --git a/linter/testdata/src/koanf/b/b.go b/linter/testdata/src/koanf/b/b.go new file mode 100644 index 0000000000..fe958f17b3 --- /dev/null +++ b/linter/testdata/src/koanf/b/b.go @@ -0,0 +1,52 @@ +package b + +import ( + "flag" + "fmt" +) + +type ParCfg struct { + child ChildCfg `koanf:"child"` + grandChild GrandChildCfg `koanf:grandchild` +} + +var defaultCfg = ParCfg{} + +type ChildCfg struct { + A bool `koanf:"A"` + B bool `koanf:"B"` + C bool `koanf:"C"` + D bool `koanf:"D"` // Error: not used outside flag definition. +} + +var defaultChildCfg = ChildCfg{} + +func childConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".a", defaultChildCfg.A, "") + f.Bool("b", defaultChildCfg.B, "") + f.Bool("c", defaultChildCfg.C, "") + f.Bool("d", defaultChildCfg.D, "") +} + +type GrandChildCfg struct { + A int `koanf:"A"` // Error: unused. +} + +func (c *GrandChildCfg) Do() { +} + +func configPtr() *ChildCfg { + return nil +} +func config() ChildCfg { + return ChildCfg{} +} + +func init() { + fmt.Printf("%v %v", config().A, configPtr().B) + // This covers usage of both `ParCfg.Child` and `ChildCfg.C`. + _ = defaultCfg.child.C + // Covers usage of grandChild. + defaultCfg.grandChild.Do() + +} From 80dc09da065f87fb5bc740dd820d687cc0942a38 Mon Sep 17 00:00:00 2001 From: Nodar Date: Mon, 28 Aug 2023 17:54:25 +0200 Subject: [PATCH 064/117] Enable custom linters (koanf/pointercheck) in CI --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d2f5765d72..5b0b33848c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -129,6 +129,10 @@ jobs: version: latest skip-go-installation: true skip-pkg-cache: true + - name: Custom Lint + run: | + go run linter/koanf/koanf.go linter/koanf/handlers.go ./... + go run linter/pointercheck/pointer.go ./... - name: Set environment variables run: | From 10af61eb84e7d4e9eaa263794c53c0f14da09fc2 Mon Sep 17 00:00:00 2001 From: Nodar Date: Mon, 28 Aug 2023 17:55:35 +0200 Subject: [PATCH 065/117] Fix whitespace in ci.yml --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5b0b33848c..916969f324 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -132,7 +132,7 @@ jobs: - name: Custom Lint run: | go run linter/koanf/koanf.go linter/koanf/handlers.go ./... - go run linter/pointercheck/pointer.go ./... + go run linter/pointercheck/pointer.go ./... - name: Set environment variables run: | From 2754cf3322f7a359ff9c20186be706e7c8033040 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 29 Aug 2023 11:50:24 -0600 Subject: [PATCH 066/117] Fix batch bounds calculation on L3s --- arbnode/batch_poster.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index e9a1663741..429701be7e 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -787,7 +787,8 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - blockNumberWithPadding := arbmath.SaturatingUAdd(arbmath.BigToUintSaturating(latestHeader.Number), uint64(config.L1BlockBoundBypass/ethPosBlockTime)) + latestBlockNumber := arbutil.ParentHeaderToL1BlockNumber(latestHeader) + blockNumberWithPadding := arbmath.SaturatingUAdd(latestBlockNumber, uint64(config.L1BlockBoundBypass/ethPosBlockTime)) timestampWithPadding := arbmath.SaturatingUAdd(latestHeader.Time, uint64(config.L1BlockBoundBypass/time.Second)) l1BoundMinBlockNumber = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariation.DelayBlocks)) From 64b6adb383227dfc47b4d10f5cfb34d76c1dc6a1 Mon Sep 17 00:00:00 2001 From: Nodar Date: Tue, 29 Aug 2023 20:03:52 +0200 Subject: [PATCH 067/117] Implement structinit linter that checks for marked structs that all instantiations initialize all the fields --- Makefile | 1 + linter/structinit/structinit.go | 122 ++++++++++++++++++++++++++ linter/structinit/structinit_test.go | 36 ++++++++ linter/testdata/src/structinit/a/a.go | 28 ++++++ 4 files changed, 187 insertions(+) create mode 100644 linter/structinit/structinit.go create mode 100644 linter/structinit/structinit_test.go create mode 100644 linter/testdata/src/structinit/a/a.go diff --git a/Makefile b/Makefile index 33487d0609..9f1c2b37e7 100644 --- a/Makefile +++ b/Makefile @@ -306,6 +306,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make go run linter/koanf/koanf.go linter/koanf/handlers.go ./... go run linter/pointercheck/pointer.go ./... + go run linter/structinit/structinit.go ./... golangci-lint run --fix yarn --cwd contracts solhint @touch $@ diff --git a/linter/structinit/structinit.go b/linter/structinit/structinit.go new file mode 100644 index 0000000000..e4e65bc3fc --- /dev/null +++ b/linter/structinit/structinit.go @@ -0,0 +1,122 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/singlechecker" +) + +// Tip for linter that struct that has this comment should be included in the +// analysis. +// Note: comment should be directly line above the struct definition. +const linterTip = "// lint:require-exhaustive-initialization" + +func New(conf any) ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{Analyzer}, nil +} + +// Analyzer implements struct analyzer for structs that are annotated with +// `linterTip`, it checks that every instantiation initializes all the fields. +var Analyzer = &analysis.Analyzer{ + Name: "structinit", + Doc: "check for struct field initializations", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "teststructinit", + Doc: "check for struct field initializations", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +type structError struct { + Pos token.Pos + Message string +} + +type Result struct { + Errors []structError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ( + ret Result + structs = markedStructs(pass) + ) + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + // For every composite literal check that number of elements in + // the literal match the number of struct fields. + if cl, ok := node.(*ast.CompositeLit); ok { + stName := pass.TypesInfo.Types[cl].Type.String() + if cnt, found := structs[stName]; found && cnt != len(cl.Elts) { + ret.Errors = append(ret.Errors, structError{ + Pos: cl.Pos(), + Message: fmt.Sprintf("struct: %q initialized with: %v of total: %v fields", stName, len(cl.Elts), cnt), + }) + + } + + } + return true + }) + } + for _, err := range ret.Errors { + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: err.Pos, + Message: err.Message, + Category: "structinit", + }) + } + } + return ret, nil +} + +// markedStructs returns a map of structs that are annotated for linter to check +// that all fields are initialized when the struct is instantiated. +// It maps struct full name (including package path) to number of fields it contains. +func markedStructs(pass *analysis.Pass) map[string]int { + res := make(map[string]int) + for _, f := range pass.Files { + tips := make(map[position]bool) + ast.Inspect(f, func(node ast.Node) bool { + switch n := node.(type) { + case *ast.Comment: + p := pass.Fset.Position(node.Pos()) + if strings.Contains(n.Text, linterTip) { + tips[position{p.Filename, p.Line + 1}] = true + } + case *ast.TypeSpec: + if st, ok := n.Type.(*ast.StructType); ok { + p := pass.Fset.Position(st.Struct) + if tips[position{p.Filename, p.Line}] { + fieldsCnt := 0 + for _, field := range st.Fields.List { + fieldsCnt += len(field.Names) + } + res[pass.Pkg.Path()+"."+n.Name.Name] = fieldsCnt + } + } + } + return true + }) + } + return res +} + +type position struct { + fileName string + line int +} + +func main() { + singlechecker.Main(Analyzer) +} diff --git a/linter/structinit/structinit_test.go b/linter/structinit/structinit_test.go new file mode 100644 index 0000000000..db3676e185 --- /dev/null +++ b/linter/structinit/structinit_test.go @@ -0,0 +1,36 @@ +package main + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +func testData(t *testing.T) string { + t.Helper() + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get wd: %s", err) + } + return filepath.Join(filepath.Dir(wd), "testdata") +} + +func TestLinter(t *testing.T) { + testdata := testData(t) + got := errCount(analysistest.Run(t, testdata, analyzerForTests, "structinit/a")) + if got != 2 { + t.Errorf("analysistest.Run() got %d errors, expected 2", got) + } +} + +func errCount(res []*analysistest.Result) int { + cnt := 0 + for _, r := range res { + if rs, ok := r.Result.(Result); ok { + cnt += len(rs.Errors) + } + } + return cnt +} diff --git a/linter/testdata/src/structinit/a/a.go b/linter/testdata/src/structinit/a/a.go new file mode 100644 index 0000000000..40be4dea21 --- /dev/null +++ b/linter/testdata/src/structinit/a/a.go @@ -0,0 +1,28 @@ +package a + +import "fmt" + +// lint:require-exhaustive-initialization +type interestingStruct struct { + x int + b *boringStruct +} + +type boringStruct struct { + x, y int +} + +func init() { + a := &interestingStruct{ + x: 1, // Error: only single field is initialized. + } + fmt.Println(a) + b := interestingStruct{ + b: nil, // Error: only single field is initialized. + } + fmt.Println(b) + c := &boringStruct{ + x: 1, // Not an error since it's not annotated for the linter. + } + fmt.Println(c) +} From 89d96b6ffaf1b2a2b97c27a08885b86a83e2e993 Mon Sep 17 00:00:00 2001 From: Nodar Date: Tue, 29 Aug 2023 20:06:18 +0200 Subject: [PATCH 068/117] Add testcase to structinit linter --- linter/testdata/src/structinit/a/a.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/linter/testdata/src/structinit/a/a.go b/linter/testdata/src/structinit/a/a.go index 40be4dea21..45f6059726 100644 --- a/linter/testdata/src/structinit/a/a.go +++ b/linter/testdata/src/structinit/a/a.go @@ -13,16 +13,21 @@ type boringStruct struct { } func init() { - a := &interestingStruct{ - x: 1, // Error: only single field is initialized. + a := &interestingStruct{ // Error: only single field is initialized. + x: 1, } fmt.Println(a) - b := interestingStruct{ - b: nil, // Error: only single field is initialized. + b := interestingStruct{ // Error: only single field is initialized. + b: nil, } fmt.Println(b) - c := &boringStruct{ - x: 1, // Not an error since it's not annotated for the linter. + c := interestingStruct{ // Not an error, all fields are initialized. + x: 1, + b: nil, } fmt.Println(c) + d := &boringStruct{ // Not an error since it's not annotated for the linter. + x: 1, + } + fmt.Println(d) } From de3b6a9305b4cacca9a915e48ea0b8c4b874ef93 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 29 Aug 2023 15:06:24 -0600 Subject: [PATCH 069/117] Fix clippy warning about an Arc not being Send+Sync --- arbitrator/prover/src/utils.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arbitrator/prover/src/utils.rs b/arbitrator/prover/src/utils.rs index 6c11e9af05..efd94dcd7c 100644 --- a/arbitrator/prover/src/utils.rs +++ b/arbitrator/prover/src/utils.rs @@ -158,6 +158,10 @@ impl From<&[u8]> for CBytes { } } +// There's no thread safety concerns for CBytes +unsafe impl Send for CBytes {} +unsafe impl Sync for CBytes {} + #[derive(Serialize, Deserialize)] #[serde(remote = "Type")] enum RemoteType { From 9227df788929352184e0bd792898f15beabac0b9 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 29 Aug 2023 15:32:04 -0600 Subject: [PATCH 070/117] Fix formatting --- arbitrator/jit/src/syscall.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbitrator/jit/src/syscall.rs b/arbitrator/jit/src/syscall.rs index 4cd0363b49..c81641a7f8 100644 --- a/arbitrator/jit/src/syscall.rs +++ b/arbitrator/jit/src/syscall.rs @@ -306,10 +306,10 @@ pub fn js_value_index(mut env: WasmEnvMut, sp: u32) { pub fn js_value_call(mut env: WasmEnvMut, sp: u32) -> MaybeEscape { let Some(resume) = env.data().exports.resume.clone() else { - return Escape::failure(format!("wasmer failed to bind {}", "resume".red())) + return Escape::failure(format!("wasmer failed to bind {}", "resume".red())); }; let Some(get_stack_pointer) = env.data().exports.get_stack_pointer.clone() else { - return Escape::failure(format!("wasmer failed to bind {}", "getsp".red())) + return Escape::failure(format!("wasmer failed to bind {}", "getsp".red())); }; let sp = GoStack::simple(sp, &env); let data = env.data_mut(); From daac2b8f21e2c7837a65c12d8f8294403dc54b25 Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 30 Aug 2023 15:37:33 +0200 Subject: [PATCH 071/117] Add comment about increaseBy parameter --- linter/koanf/handlers.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/linter/koanf/handlers.go b/linter/koanf/handlers.go index 452291e605..00cd10c07e 100644 --- a/linter/koanf/handlers.go +++ b/linter/koanf/handlers.go @@ -30,26 +30,28 @@ func handleComposite(pass *analysis.Pass, cl *ast.CompositeLit, cnt map[string]i // handleSelector handles selector expression recursively, that is an expression: // a.B.C.D will update counter for fields: a.B.C.D, a.B.C and a.B. -func handleSelector(pass *analysis.Pass, se *ast.SelectorExpr, inc int, cnt map[string]int) string { +// It updates counters map in place, increasing corresponding identifiers by +// increaseBy amount. +func handleSelector(pass *analysis.Pass, se *ast.SelectorExpr, increaseBy int, cnt map[string]int) string { if e, ok := se.X.(*ast.SelectorExpr); ok { // Full field identifier, including package name. fi := pass.TypesInfo.Types[e].Type.String() + "." + se.Sel.Name - cnt[normalizeID(pass, fi)] += inc - prefix := handleSelector(pass, e, inc, cnt) + cnt[normalizeID(pass, fi)] += increaseBy + prefix := handleSelector(pass, e, increaseBy, cnt) fi = prefix + "." + se.Sel.Name - cnt[normalizeID(pass, fi)] += inc + cnt[normalizeID(pass, fi)] += increaseBy return fi } // Handle selectors on function calls, e.g. `config().Enabled`. if _, ok := se.X.(*ast.CallExpr); ok { fi := pass.TypesInfo.Types[se.X].Type.String() + "." + se.Sel.Name - cnt[normalizeID(pass, fi)] += inc + cnt[normalizeID(pass, fi)] += increaseBy return fi } if ident, ok := se.X.(*ast.Ident); ok { if pass.TypesInfo.Types[ident].Type != nil { fi := pass.TypesInfo.Types[ident].Type.String() + "." + se.Sel.Name - cnt[normalizeID(pass, fi)] += inc + cnt[normalizeID(pass, fi)] += increaseBy return fi } } From ad64d967f68b84b1e8ed29a3d6907bd639b8cb16 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 30 Aug 2023 09:37:29 -0600 Subject: [PATCH 072/117] Make PreimageResolver Send+Sync --- arbitrator/prover/src/machine.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index fff9c0f3d8..d5a9c52d92 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -651,7 +651,7 @@ pub struct MachineState<'a> { initial_hash: Bytes32, } -pub type PreimageResolver = Arc Option>; +pub type PreimageResolver = Arc Option + Send + Sync>; /// Wraps a preimage resolver to provide an easier API /// and cache the last preimage retrieved. From 283b5d71a1d8eff096e878d8f028cf33356585e9 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 30 Aug 2023 09:53:11 -0600 Subject: [PATCH 073/117] Extend comment explaining why CBytes is Send+Sync --- arbitrator/prover/src/utils.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arbitrator/prover/src/utils.rs b/arbitrator/prover/src/utils.rs index efd94dcd7c..e86ea96768 100644 --- a/arbitrator/prover/src/utils.rs +++ b/arbitrator/prover/src/utils.rs @@ -158,7 +158,10 @@ impl From<&[u8]> for CBytes { } } -// There's no thread safety concerns for CBytes +// There's no thread safety concerns for CBytes. +// This type is basically a Box<[u8]> (which is Send + Sync) with libc as an allocator. +// Any data races between threads are prevented by Rust borrowing rules, +// and the data isn't thread-local so there's no concern moving it between threads. unsafe impl Send for CBytes {} unsafe impl Sync for CBytes {} From f6f659e7b2513ab48f0a276340676636350d98f0 Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 30 Aug 2023 18:04:20 +0200 Subject: [PATCH 074/117] Implement legacy storage encoding with hot-reload flag to change encoding strategy on the fly --- arbnode/dataposter/data_poster.go | 24 +++-- arbnode/dataposter/leveldb/leveldb.go | 26 ++--- arbnode/dataposter/redis/redisstorage.go | 28 +++--- arbnode/dataposter/slice/slicestorage.go | 22 ++--- arbnode/dataposter/storage/storage.go | 117 +++++++++++++++++++++++ arbnode/dataposter/storage_test.go | 64 ++++++++----- 6 files changed, 202 insertions(+), 79 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index b1db655d71..adfde88f74 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -101,17 +101,23 @@ func NewDataPoster(db ethdb.Database, headerReader *headerreader.HeaderReader, a initConfig.UseNoOpStorage = true log.Info("Disabling data poster storage, as parent chain appears to be an Arbitrum chain without a mempool") } + encF := func() storage.EncoderDecoderInterface { + if config().LegacyStorageEncoding { + return &storage.LegacyEncoderDecoder{} + } + return &storage.EncoderDecoder{} + } var queue QueueStorage switch { case initConfig.UseNoOpStorage: queue = &noop.Storage{} case initConfig.UseLevelDB: - queue = leveldb.New(db) + queue = leveldb.New(db, encF) case redisClient == nil: - queue = slice.NewStorage() + queue = slice.NewStorage(encF) default: var err error - queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &initConfig.RedisSigner) + queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &initConfig.RedisSigner, encF) if err != nil { return nil, err } @@ -174,7 +180,7 @@ func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []by } lastQueueItem, err := p.queue.FetchLast(ctx) if err != nil { - return 0, nil, false, err + return 0, nil, false, fmt.Errorf("fetching last element from queue: %w", err) } if lastQueueItem != nil { nextNonce := lastQueueItem.Data.Nonce + 1 @@ -364,7 +370,10 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr if prevTx != nil && prevTx.Data.Nonce != newTx.Data.Nonce { return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.Data.Nonce, newTx.Data.Nonce) } - return p.queue.Put(ctx, newTx.Data.Nonce, prevTx, newTx) + if err := p.queue.Put(ctx, newTx.Data.Nonce, prevTx, newTx); err != nil { + return fmt.Errorf("putting new tx in the queue: %w", err) + } + return nil } func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransaction, newTx *storage.QueuedTransaction) error { @@ -546,7 +555,7 @@ func (p *DataPoster) Start(ctxIn context.Context) { // replacing them by fee. queueContents, err := p.queue.FetchContents(ctx, unconfirmedNonce, maxTxsToRbf) if err != nil { - log.Error("Failed to get tx queue contents", "err", err) + log.Error("Failed to fetch tx queue contents", "err", err) return minWait } for index, tx := range queueContents { @@ -616,6 +625,7 @@ type DataPosterConfig struct { AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` UseLevelDB bool `koanf:"use-leveldb"` UseNoOpStorage bool `koanf:"use-noop-storage"` + LegacyStorageEncoding bool `koanf:"legacy-storage-encoding" reload:"hot"` } // ConfigFetcher function type is used instead of directly passing config so @@ -636,6 +646,7 @@ func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".allocate-mempool-balance", DefaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") f.Bool(prefix+".use-leveldb", DefaultDataPosterConfig.UseLevelDB, "uses leveldb when enabled") f.Bool(prefix+".use-noop-storage", DefaultDataPosterConfig.UseNoOpStorage, "uses noop storage, it doesn't store anything") + f.Bool(prefix+".legacy-storage-encoding", DefaultDataPosterConfig.LegacyStorageEncoding, "encodes items in a legacy way (as it was before dropping generics)") signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f) } @@ -651,6 +662,7 @@ var DefaultDataPosterConfig = DataPosterConfig{ AllocateMempoolBalance: true, UseLevelDB: false, UseNoOpStorage: false, + LegacyStorageEncoding: true, } var DefaultDataPosterConfigForValidator = func() DataPosterConfig { diff --git a/arbnode/dataposter/leveldb/leveldb.go b/arbnode/dataposter/leveldb/leveldb.go index e41a8665a6..cfb34b04f7 100644 --- a/arbnode/dataposter/leveldb/leveldb.go +++ b/arbnode/dataposter/leveldb/leveldb.go @@ -12,14 +12,14 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/memorydb" - "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/syndtr/goleveldb/leveldb" ) // Storage implements leveldb based storage for batch poster. type Storage struct { - db ethdb.Database + db ethdb.Database + encDec storage.EncoderDecoderF } var ( @@ -31,16 +31,8 @@ var ( countKey = []byte(".count_key") ) -func New(db ethdb.Database) *Storage { - return &Storage{db: db} -} - -func (s *Storage) decodeItem(data []byte) (*storage.QueuedTransaction, error) { - var item storage.QueuedTransaction - if err := rlp.DecodeBytes(data, &item); err != nil { - return nil, fmt.Errorf("decoding item: %w", err) - } - return &item, nil +func New(db ethdb.Database, enc storage.EncoderDecoderF) *Storage { + return &Storage{db: db, encDec: enc} } func idxToKey(idx uint64) []byte { @@ -55,7 +47,7 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu if !it.Next() { break } - item, err := s.decodeItem(it.Value()) + item, err := s.encDec().Decode(it.Value()) if err != nil { return nil, err } @@ -84,7 +76,7 @@ func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, er if err != nil { return nil, err } - return s.decodeItem(val) + return s.encDec().Decode(val) } func (s *Storage) Prune(ctx context.Context, until uint64) error { @@ -117,7 +109,7 @@ func (s *Storage) valueAt(_ context.Context, key []byte) ([]byte, error) { val, err := s.db.Get(key) if err != nil { if isErrNotFound(err) { - return rlp.EncodeToBytes((*storage.QueuedTransaction)(nil)) + return s.encDec().Encode((*storage.QueuedTransaction)(nil)) } return nil, err } @@ -130,14 +122,14 @@ func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.Queu if err != nil { return err } - prevEnc, err := rlp.EncodeToBytes(prev) + prevEnc, err := s.encDec().Encode(prev) if err != nil { return fmt.Errorf("encoding previous item: %w", err) } if !bytes.Equal(stored, prevEnc) { return fmt.Errorf("replacing different item than expected at index: %v, stored: %v, prevEnc: %v", index, stored, prevEnc) } - newEnc, err := rlp.EncodeToBytes(new) + newEnc, err := s.encDec().Encode(new) if err != nil { return fmt.Errorf("encoding new item: %w", err) } diff --git a/arbnode/dataposter/redis/redisstorage.go b/arbnode/dataposter/redis/redisstorage.go index e6fe666c69..5123cea154 100644 --- a/arbnode/dataposter/redis/redisstorage.go +++ b/arbnode/dataposter/redis/redisstorage.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" - "github.com/ethereum/go-ethereum/rlp" "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/util/signature" @@ -23,14 +22,15 @@ type Storage struct { client redis.UniversalClient signer *signature.SimpleHmac key string + encDec storage.EncoderDecoderF } -func NewStorage(client redis.UniversalClient, key string, signerConf *signature.SimpleHmacConfig) (*Storage, error) { +func NewStorage(client redis.UniversalClient, key string, signerConf *signature.SimpleHmacConfig, enc storage.EncoderDecoderF) (*Storage, error) { signer, err := signature.NewSimpleHmac(signerConf) if err != nil { return nil, err } - return &Storage{client, signer, key}, nil + return &Storage{client, signer, key, enc}, nil } func joinHmacMsg(msg []byte, sig []byte) ([]byte, error) { @@ -65,16 +65,15 @@ func (s *Storage) FetchContents(ctx context.Context, startingIndex uint64, maxRe } var items []*storage.QueuedTransaction for _, itemString := range itemStrings { - var item storage.QueuedTransaction data, err := s.peelVerifySignature([]byte(itemString)) if err != nil { return nil, err } - err = rlp.DecodeBytes(data, &item) + item, err := s.encDec().Decode(data) if err != nil { return nil, err } - items = append(items, &item) + items = append(items, item) } return items, nil } @@ -95,16 +94,15 @@ func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, er } var ret *storage.QueuedTransaction if len(itemStrings) > 0 { - var item storage.QueuedTransaction data, err := s.peelVerifySignature([]byte(itemStrings[0])) if err != nil { return nil, err } - err = rlp.DecodeBytes(data, &item) + item, err := s.encDec().Decode(data) if err != nil { return nil, err } - ret = &item + ret = item } return ret, nil } @@ -144,21 +142,20 @@ func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.Queu if err != nil { return fmt.Errorf("failed to validate item already in redis at index%v: %w", index, err) } - prevItemEncoded, err := rlp.EncodeToBytes(prev) + prevItemEncoded, err := s.encDec().Encode(prev) if err != nil { return err } if !bytes.Equal(verifiedItem, prevItemEncoded) { return fmt.Errorf("%w: replacing different item than expected at index %v", storage.ErrStorageRace, index) } - err = pipe.ZRem(ctx, s.key, haveItems[0]).Err() - if err != nil { + if err := pipe.ZRem(ctx, s.key, haveItems[0]).Err(); err != nil { return err } } else { return fmt.Errorf("expected only one return value for Put but got %v", len(haveItems)) } - newItemEncoded, err := rlp.EncodeToBytes(*new) + newItemEncoded, err := s.encDec().Encode(new) if err != nil { return err } @@ -170,11 +167,10 @@ func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.Queu if err != nil { return err } - err = pipe.ZAdd(ctx, s.key, &redis.Z{ + if err := pipe.ZAdd(ctx, s.key, &redis.Z{ Score: float64(index), Member: string(signedItem), - }).Err() - if err != nil { + }).Err(); err != nil { return err } _, err = pipe.Exec(ctx) diff --git a/arbnode/dataposter/slice/slicestorage.go b/arbnode/dataposter/slice/slicestorage.go index 6eda5ca9a3..04286df411 100644 --- a/arbnode/dataposter/slice/slicestorage.go +++ b/arbnode/dataposter/slice/slicestorage.go @@ -9,25 +9,17 @@ import ( "errors" "fmt" - "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" ) type Storage struct { firstNonce uint64 queue [][]byte + encDec func() storage.EncoderDecoderInterface } -func NewStorage() *Storage { - return &Storage{} -} - -func (s *Storage) decodeItem(data []byte) (*storage.QueuedTransaction, error) { - var item storage.QueuedTransaction - if err := rlp.DecodeBytes(data, &item); err != nil { - return nil, fmt.Errorf("decoding item: %w", err) - } - return &item, nil +func NewStorage(encDec func() storage.EncoderDecoderInterface) *Storage { + return &Storage{encDec: encDec} } func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResults uint64) ([]*storage.QueuedTransaction, error) { @@ -43,7 +35,7 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu } var res []*storage.QueuedTransaction for _, r := range txs { - item, err := s.decodeItem(r) + item, err := s.encDec().Decode(r) if err != nil { return nil, err } @@ -56,7 +48,7 @@ func (s *Storage) FetchLast(context.Context) (*storage.QueuedTransaction, error) if len(s.queue) == 0 { return nil, nil } - return s.decodeItem(s.queue[len(s.queue)-1]) + return s.encDec().Decode(s.queue[len(s.queue)-1]) } func (s *Storage) Prune(_ context.Context, until uint64) error { @@ -73,7 +65,7 @@ func (s *Storage) Put(_ context.Context, index uint64, prev, new *storage.Queued if new == nil { return fmt.Errorf("tried to insert nil item at index %v", index) } - newEnc, err := rlp.EncodeToBytes(new) + newEnc, err := s.encDec().Encode(new) if err != nil { return fmt.Errorf("encoding new item: %w", err) } @@ -93,7 +85,7 @@ func (s *Storage) Put(_ context.Context, index uint64, prev, new *storage.Queued if queueIdx > len(s.queue) { return fmt.Errorf("attempted to set out-of-bounds index %v in queue starting at %v of length %v", index, s.firstNonce, len(s.queue)) } - prevEnc, err := rlp.EncodeToBytes(prev) + prevEnc, err := s.encDec().Encode(prev) if err != nil { return fmt.Errorf("encoding previous item: %w", err) } diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index 174ab131ac..57085c4fb1 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -2,9 +2,12 @@ package storage import ( "errors" + "fmt" "time" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbutil" ) var ( @@ -24,3 +27,117 @@ type QueuedTransaction struct { Created time.Time // may be earlier than the tx was given to the tx poster NextReplacement time.Time } + +// LegacyQueuedTransaction is used for backwards compatibility. +// Before https://github.com/OffchainLabs/nitro/pull/1773: the queuedTransaction +// looked like this and was rlp encoded directly. After the pr, we are store +// rlp encoding of Meta into queuedTransaction and rlp encoding it once more +// to store it. +type LegacyQueuedTransaction struct { + FullTx *types.Transaction `rlp:"nil"` + Data types.DynamicFeeTx + Meta BatchPosterPosition + Sent bool + Created time.Time // may be earlier than the tx was given to the tx poster + NextReplacement time.Time +} + +// This is also for legacy reason. Since Batchposter is in arbnode package, +// we can't refer to BatchPosterPosition type there even if we export it (that +// would create cyclic dependency). +// Ideally we'll factor out Batch Poster from arbnode into separate package +// and BatchPosterPosition into another separate package as well. +// For the sake of minimal refactoring, that struct is duplicated here. +type BatchPosterPosition struct { + MessageCount arbutil.MessageIndex + DelayedMessageCount uint64 + NextSeqNum uint64 +} + +func DecodeLegacyQueuedTransaction(data []byte) (*LegacyQueuedTransaction, error) { + var val LegacyQueuedTransaction + if err := rlp.DecodeBytes(data, &val); err != nil { + return nil, fmt.Errorf("decoding legacy queued transaction: %w", err) + } + return &val, nil +} + +func LegacyToQueuedTransaction(legacyQT *LegacyQueuedTransaction) (*QueuedTransaction, error) { + meta, err := rlp.EncodeToBytes(legacyQT.Meta) + if err != nil { + return nil, fmt.Errorf("converting legacy to queued transaction: %w", err) + } + return &QueuedTransaction{ + FullTx: legacyQT.FullTx, + Data: legacyQT.Data, + Meta: meta, + Sent: legacyQT.Sent, + Created: legacyQT.Created, + NextReplacement: legacyQT.NextReplacement, + }, nil +} + +func QueuedTransactionToLegacy(qt *QueuedTransaction) (*LegacyQueuedTransaction, error) { + if qt == nil { + return nil, nil + } + var meta BatchPosterPosition + if qt.Meta != nil { + if err := rlp.DecodeBytes(qt.Meta, &meta); err != nil { + return nil, fmt.Errorf("converting queued transaction to legacy: %w", err) + } + } + return &LegacyQueuedTransaction{ + FullTx: qt.FullTx, + Data: qt.Data, + Meta: meta, + Sent: qt.Sent, + Created: qt.Created, + NextReplacement: qt.NextReplacement, + }, nil +} + +type EncoderDecoder struct{} + +func (e *EncoderDecoder) Encode(qt *QueuedTransaction) ([]byte, error) { + return rlp.EncodeToBytes(qt) +} + +func (e *EncoderDecoder) Decode(data []byte) (*QueuedTransaction, error) { + var item QueuedTransaction + if err := rlp.DecodeBytes(data, &item); err != nil { + return nil, fmt.Errorf("decoding item: %w", err) + } + return &item, nil +} + +type LegacyEncoderDecoder struct{} + +func (e *LegacyEncoderDecoder) Encode(qt *QueuedTransaction) ([]byte, error) { + legacyQt, err := QueuedTransactionToLegacy(qt) + if err != nil { + return nil, fmt.Errorf("encoding legacy item: %w", err) + } + return rlp.EncodeToBytes(legacyQt) +} + +func (le *LegacyEncoderDecoder) Decode(data []byte) (*QueuedTransaction, error) { + val, err := DecodeLegacyQueuedTransaction(data) + if err != nil { + return nil, fmt.Errorf("decoding legacy item: %w", err) + } + return LegacyToQueuedTransaction(val) +} + +// Typically interfaces belong to where they are being used, not at implementing +// site, but this is used in all storages (besides no-op) and all of them +// require all the functions for this interface. +type EncoderDecoderInterface interface { + Encode(*QueuedTransaction) ([]byte, error) + Decode([]byte) (*QueuedTransaction, error) +} + +// EncoderDecoderF is a function type that returns encoder/decoder interface. +// This is needed to implement hot-reloading flag to switch encoding/decoding +// strategy on the fly. +type EncoderDecoderF func() EncoderDecoderInterface diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index 2424ac0845..eac05502be 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/offchainlabs/nitro/arbnode/dataposter/leveldb" @@ -27,36 +28,41 @@ var ignoreData = cmp.Options{ cmpopts.IgnoreFields(types.Transaction{}, "hash", "size", "from"), } -func newLevelDBStorage(t *testing.T) *leveldb.Storage { +func newLevelDBStorage(t *testing.T, encF storage.EncoderDecoderF) *leveldb.Storage { t.Helper() db, err := rawdb.NewLevelDBDatabase(path.Join(t.TempDir(), "level.db"), 0, 0, "default", false) if err != nil { t.Fatalf("NewLevelDBDatabase() unexpected error: %v", err) } - return leveldb.New(db) + return leveldb.New(db, encF) } -func newSliceStorage() *slice.Storage { - return slice.NewStorage() +func newSliceStorage(encF storage.EncoderDecoderF) *slice.Storage { + return slice.NewStorage(encF) } -func newRedisStorage(ctx context.Context, t *testing.T) *redis.Storage { +func newRedisStorage(ctx context.Context, t *testing.T, encF storage.EncoderDecoderF) *redis.Storage { t.Helper() redisUrl := redisutil.CreateTestRedis(ctx, t) client, err := redisutil.RedisClientFromURL(redisUrl) if err != nil { t.Fatalf("RedisClientFromURL(%q) unexpected error: %v", redisUrl, err) } - s, err := redis.NewStorage(client, "", &signature.TestSimpleHmacConfig) + s, err := redis.NewStorage(client, "", &signature.TestSimpleHmacConfig, encF) if err != nil { t.Fatalf("redis.NewStorage() unexpected error: %v", err) } return s } -func valueOf(i int) *storage.QueuedTransaction { +func valueOf(t *testing.T, i int) *storage.QueuedTransaction { + t.Helper() + meta, err := rlp.EncodeToBytes(storage.BatchPosterPosition{DelayedMessageCount: uint64(i)}) + if err != nil { + t.Fatalf("Encoding batch poster position, error: %v", err) + } return &storage.QueuedTransaction{ - Meta: []byte{byte(i)}, + Meta: meta, Data: types.DynamicFeeTx{ ChainID: big.NewInt(int64(i)), Nonce: uint64(i), @@ -73,10 +79,10 @@ func valueOf(i int) *storage.QueuedTransaction { } } -func values(from, to int) []*storage.QueuedTransaction { +func values(t *testing.T, from, to int) []*storage.QueuedTransaction { var res []*storage.QueuedTransaction for i := from; i <= to; i++ { - res = append(res, valueOf(i)) + res = append(res, valueOf(t, i)) } return res } @@ -85,7 +91,7 @@ func values(from, to int) []*storage.QueuedTransaction { func initStorage(ctx context.Context, t *testing.T, s QueueStorage) QueueStorage { t.Helper() for i := 0; i < 20; i++ { - if err := s.Put(ctx, uint64(i), nil, valueOf(i)); err != nil { + if err := s.Put(ctx, uint64(i), nil, valueOf(t, i)); err != nil { t.Fatalf("Error putting a key/value: %v", err) } } @@ -95,10 +101,18 @@ func initStorage(ctx context.Context, t *testing.T, s QueueStorage) QueueStorage // Returns a map of all empty storages. func storages(t *testing.T) map[string]QueueStorage { t.Helper() + f := func(enc storage.EncoderDecoderInterface) storage.EncoderDecoderF { + return func() storage.EncoderDecoderInterface { + return enc + } + } return map[string]QueueStorage{ - "levelDB": newLevelDBStorage(t), - "slice": newSliceStorage(), - "redis": newRedisStorage(context.Background(), t), + "levelDBLegacy": newLevelDBStorage(t, f(&storage.LegacyEncoderDecoder{})), + "sliceLegacy": newSliceStorage(f(&storage.LegacyEncoderDecoder{})), + "redisLegacy": newRedisStorage(context.Background(), t, f(&storage.LegacyEncoderDecoder{})), + "levelDB": newLevelDBStorage(t, f(&storage.EncoderDecoder{})), + "slice": newSliceStorage(f(&storage.EncoderDecoder{})), + "redis": newRedisStorage(context.Background(), t, f(&storage.EncoderDecoder{})), } } @@ -125,13 +139,13 @@ func TestFetchContents(t *testing.T) { desc: "sequence with single digits", startIdx: 5, maxResults: 3, - want: values(5, 7), + want: values(t, 5, 7), }, { desc: "corner case of single element", startIdx: 0, maxResults: 1, - want: values(0, 0), + want: values(t, 0, 0), }, { desc: "no elements", @@ -143,13 +157,13 @@ func TestFetchContents(t *testing.T) { desc: "sequence with variable number of digits", startIdx: 9, maxResults: 3, - want: values(9, 11), + want: values(t, 9, 11), }, { desc: "max results goes over the last element", startIdx: 13, maxResults: 10, - want: values(13, 19), + want: values(t, 13, 19), }, } { t.Run(name+"_"+tc.desc, func(t *testing.T) { @@ -171,7 +185,7 @@ func TestLast(t *testing.T) { t.Run(name, func(t *testing.T) { ctx := context.Background() for i := 0; i < cnt; i++ { - val := valueOf(i) + val := valueOf(t, i) if err := s.Put(ctx, uint64(i), nil, val); err != nil { t.Fatalf("Error putting a key/value: %v", err) } @@ -185,12 +199,12 @@ func TestLast(t *testing.T) { } }) - last := valueOf(cnt - 1) + last := valueOf(t, cnt-1) t.Run(name+"_update_entries", func(t *testing.T) { ctx := context.Background() for i := 0; i < cnt-1; i++ { - prev := valueOf(i) - newVal := valueOf(cnt + i) + prev := valueOf(t, i) + newVal := valueOf(t, cnt+i) if err := s.Put(ctx, uint64(i), prev, newVal); err != nil { t.Fatalf("Error putting a key/value: %v, prev: %v, new: %v", err, prev, newVal) } @@ -227,17 +241,17 @@ func TestPrune(t *testing.T) { { desc: "prune all but one", pruneFrom: 19, - want: values(19, 19), + want: values(t, 19, 19), }, { desc: "pruning first element", pruneFrom: 1, - want: values(1, 19), + want: values(t, 1, 19), }, { desc: "pruning first 11 elements", pruneFrom: 11, - want: values(11, 19), + want: values(t, 11, 19), }, { desc: "pruning from higher than biggest index", From bb08e9d3fb5a5424f7e5885e411733978a2df736 Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 30 Aug 2023 18:51:23 +0200 Subject: [PATCH 075/117] Use new encoding for slicestorage, attempt both decodings no matter what encoding is enabled --- arbnode/dataposter/data_poster.go | 2 +- arbnode/dataposter/storage/storage.go | 29 ++++++++++++++++++--------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index adfde88f74..a289a17626 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -114,7 +114,7 @@ func NewDataPoster(db ethdb.Database, headerReader *headerreader.HeaderReader, a case initConfig.UseLevelDB: queue = leveldb.New(db, encF) case redisClient == nil: - queue = slice.NewStorage(encF) + queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) default: var err error queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &initConfig.RedisSigner, encF) diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index 57085c4fb1..a132f375e5 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -6,6 +6,7 @@ import ( "time" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbutil" ) @@ -97,6 +98,22 @@ func QueuedTransactionToLegacy(qt *QueuedTransaction) (*LegacyQueuedTransaction, }, nil } +// Decode tries to decode QueuedTransaction, if that fails it tries to decode +// into legacy queued transaction and converts to queued +func decode(data []byte) (*QueuedTransaction, error) { + var item QueuedTransaction + if err := rlp.DecodeBytes(data, &item); err != nil { + log.Warn("Failed to decode QueuedTransaction, attempting to decide legacy queued transaction", "error", err) + val, err := DecodeLegacyQueuedTransaction(data) + if err != nil { + return nil, fmt.Errorf("decoding legacy item: %w", err) + } + return LegacyToQueuedTransaction(val) + } + return &item, nil + +} + type EncoderDecoder struct{} func (e *EncoderDecoder) Encode(qt *QueuedTransaction) ([]byte, error) { @@ -104,11 +121,7 @@ func (e *EncoderDecoder) Encode(qt *QueuedTransaction) ([]byte, error) { } func (e *EncoderDecoder) Decode(data []byte) (*QueuedTransaction, error) { - var item QueuedTransaction - if err := rlp.DecodeBytes(data, &item); err != nil { - return nil, fmt.Errorf("decoding item: %w", err) - } - return &item, nil + return decode(data) } type LegacyEncoderDecoder struct{} @@ -122,11 +135,7 @@ func (e *LegacyEncoderDecoder) Encode(qt *QueuedTransaction) ([]byte, error) { } func (le *LegacyEncoderDecoder) Decode(data []byte) (*QueuedTransaction, error) { - val, err := DecodeLegacyQueuedTransaction(data) - if err != nil { - return nil, fmt.Errorf("decoding legacy item: %w", err) - } - return LegacyToQueuedTransaction(val) + return decode(data) } // Typically interfaces belong to where they are being used, not at implementing From 2e6d59ac0c3bfa6679011c974d37fdd16fad2992 Mon Sep 17 00:00:00 2001 From: Nodar Date: Wed, 30 Aug 2023 18:52:20 +0200 Subject: [PATCH 076/117] drop empty line --- arbnode/dataposter/storage/storage.go | 1 - 1 file changed, 1 deletion(-) diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index a132f375e5..ed848b9b7d 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -111,7 +111,6 @@ func decode(data []byte) (*QueuedTransaction, error) { return LegacyToQueuedTransaction(val) } return &item, nil - } type EncoderDecoder struct{} From 06930b67bf24630f002abbe523c2ae9e3fd71362 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 30 Aug 2023 12:56:15 -0600 Subject: [PATCH 077/117] Fix pollForReverts channel reading --- arbnode/batch_poster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 429701be7e..7efa2b3f73 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -309,8 +309,8 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) { // - polling is through context, or // - we see a transaction in the block from dataposter that was reverted. select { - case h, closed := <-headerCh: - if closed { + case h, ok := <-headerCh: + if !ok { log.Info("L1 headers channel has been closed") return } From 5ce0f8164c3b05c2056cc22083cc1e2ebdc6d33d Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 30 Aug 2023 14:26:51 -0600 Subject: [PATCH 078/117] Disable wait-for-l1-finality on L3 data posters --- arbnode/dataposter/data_poster.go | 39 +++++++++++++++++-------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index b1db655d71..f20d7dd597 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -162,11 +162,14 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) err return nil } +func (p *DataPoster) waitForL1Finality() bool { + return p.config().WaitForL1Finality && !p.headerReader.IsParentChainArbitrum() +} + // Requires the caller hold the mutex. // Returns the next nonce, its metadata if stored, a bool indicating if the metadata is present, and an error. // Unlike GetNextNonceAndMeta, this does not call the metadataRetriever if the metadata is not stored in the queue. func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []byte, bool, error) { - config := p.config() // Ensure latest finalized block state is available. blockNum, err := p.client.BlockNumber(ctx) if err != nil { @@ -185,7 +188,7 @@ func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []by } if err := p.updateNonce(ctx); err != nil { - if !p.queue.IsPersistent() && config.WaitForL1Finality { + if !p.queue.IsPersistent() && p.waitForL1Finality() { return 0, nil, false, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) } // Fall back to using a recent block to get the nonce. This is safe because there's nothing in the queue. @@ -433,7 +436,7 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa // The mutex must be held by the caller. func (p *DataPoster) updateNonce(ctx context.Context) error { var blockNumQuery *big.Int - if p.config().WaitForL1Finality { + if p.waitForL1Finality() { blockNumQuery = big.NewInt(int64(rpc.FinalizedBlockNumber)) } header, err := p.client.HeaderByNumber(ctx, blockNumQuery) @@ -602,20 +605,22 @@ type QueueStorage interface { } type DataPosterConfig struct { - RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` - ReplacementTimes string `koanf:"replacement-times"` - WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` - MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` - MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` - TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` - UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` - MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` - MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` - MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` - NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` - AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` - UseLevelDB bool `koanf:"use-leveldb"` - UseNoOpStorage bool `koanf:"use-noop-storage"` + RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` + ReplacementTimes string `koanf:"replacement-times"` + // This is forcibly disabled if the parent chain is an Arbitrum chain, + // so you should probably use DataPoster's waitForL1Finality method instead of reading this field directly. + WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` + MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` + MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` + TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` + UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` + MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` + MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` + MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` + NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` + AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` + UseLevelDB bool `koanf:"use-leveldb"` + UseNoOpStorage bool `koanf:"use-noop-storage"` } // ConfigFetcher function type is used instead of directly passing config so From 370ac231089a56649372e7bbe21ce3c26cefda80 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 30 Aug 2023 20:43:12 -0600 Subject: [PATCH 079/117] Fix off by 1 in validator logging --- staker/block_validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staker/block_validator.go b/staker/block_validator.go index f04b852041..94bc2a0806 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -597,7 +597,7 @@ func (v *BlockValidator) iterativeValidationPrint(ctx context.Context) time.Dura var batchMsgs arbutil.MessageIndex var printedCount int64 if validated.GlobalState.Batch > 0 { - batchMsgs, err = v.inboxTracker.GetBatchMessageCount(validated.GlobalState.Batch) + batchMsgs, err = v.inboxTracker.GetBatchMessageCount(validated.GlobalState.Batch - 1) } if err != nil { printedCount = -1 From a7e26b2c038471bcd11831fbcc517238114efbb9 Mon Sep 17 00:00:00 2001 From: Nodar Date: Thu, 31 Aug 2023 15:30:04 +0200 Subject: [PATCH 080/117] Drop normalize method in koanf.go, ignore case when comparing tag and a field --- linter/koanf/koanf.go | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/linter/koanf/koanf.go b/linter/koanf/koanf.go index bc94a9c20e..2127fb23b0 100644 --- a/linter/koanf/koanf.go +++ b/linter/koanf/koanf.go @@ -6,7 +6,6 @@ import ( "go/token" "reflect" "strings" - "unicode" "github.com/fatih/structtag" "golang.org/x/tools/go/analysis" @@ -84,7 +83,7 @@ func checkStruct(pass *analysis.Pass, s *ast.StructType) Result { if err != nil { continue } - tagName := normalize(tag.Name) + tagName := strings.ReplaceAll(tag.Name, "-", "") fieldName := f.Names[0].Name if !strings.EqualFold(tagName, fieldName) { res.Errors = append(res.Errors, koanfError{ @@ -96,25 +95,6 @@ func checkStruct(pass *analysis.Pass, s *ast.StructType) Result { return res } -func normalize(s string) string { - ans := s[:1] - for i := 1; i < len(s); i++ { - c := rune(s[i]) - if !isAlphanumeric(c) { - continue - } - if !isAlphanumeric(rune(s[i-1])) && unicode.IsLower(c) { - c = unicode.ToUpper(c) - } - ans += string(c) - } - return ans -} - -func isAlphanumeric(c rune) bool { - return unicode.IsLetter(c) || unicode.IsDigit(c) -} - func main() { singlechecker.Main(Analyzer) } From 4d2118660a75fb1dcf04491e07049a2041d3fd83 Mon Sep 17 00:00:00 2001 From: Nodar Date: Thu, 31 Aug 2023 15:38:54 +0200 Subject: [PATCH 081/117] drop tag normalization in koanf.go --- linter/koanf/handlers.go | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/linter/koanf/handlers.go b/linter/koanf/handlers.go index 2381a230ee..5826004014 100644 --- a/linter/koanf/handlers.go +++ b/linter/koanf/handlers.go @@ -5,7 +5,6 @@ import ( "go/ast" "go/token" "strings" - "unicode" "github.com/fatih/structtag" "golang.org/x/tools/go/analysis" @@ -195,7 +194,7 @@ func tagFromField(f *ast.Field) string { if err != nil { return "" } - return strings.ReplaceAll(tag.Name, "-", "") + return normalizeTag(tag.Name) } // checkStruct returns violations where koanf tag name doesn't match field names. @@ -219,22 +218,7 @@ func checkStruct(pass *analysis.Pass, s *ast.StructType) Result { } func normalizeTag(s string) string { - ans := s[:1] - for i := 1; i < len(s); i++ { - c := rune(s[i]) - if !isAlphanumeric(c) { - continue - } - if !isAlphanumeric(rune(s[i-1])) && unicode.IsLower(c) { - c = unicode.ToUpper(c) - } - ans += string(c) - } - return ans -} - -func isAlphanumeric(c rune) bool { - return unicode.IsLetter(c) || unicode.IsDigit(c) + return strings.ReplaceAll(s, "-", "") } func normalizeID(pass *analysis.Pass, id string) string { From 8cb2606040a993d7fd1f6cf08e5af842ff1b2d1d Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Thu, 31 Aug 2023 10:49:42 -0500 Subject: [PATCH 082/117] add new type Uint64OrHex --- go-ethereum | 2 +- system_tests/conditionaltx_test.go | 29 ++++++++++++++--------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/go-ethereum b/go-ethereum index c905292f8a..f8363dc42d 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit c905292f8af601f7fca261e65a7d4bc144261e29 +Subproject commit f8363dc42d6cf8dbd76aed2550207a90919e88ee diff --git a/system_tests/conditionaltx_test.go b/system_tests/conditionaltx_test.go index c65103694a..b758ba1807 100644 --- a/system_tests/conditionaltx_test.go +++ b/system_tests/conditionaltx_test.go @@ -16,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/arbitrum_types" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rpc" @@ -103,23 +102,23 @@ func getOptions(address common.Address, rootHash common.Hash, slotValueMap map[c } func getFulfillableBlockTimeLimits(t *testing.T, blockNumber uint64, timestamp uint64) []*arbitrum_types.ConditionalOptions { - future := hexutil.Uint64(timestamp + 30) - past := hexutil.Uint64(timestamp - 1) - futureBlockNumber := hexutil.Uint64(blockNumber + 1000) - currentBlockNumber := hexutil.Uint64(blockNumber) + future := common.Uint64OrHex(timestamp + 30) + past := common.Uint64OrHex(timestamp - 1) + futureBlockNumber := common.Uint64OrHex(blockNumber + 1000) + currentBlockNumber := common.Uint64OrHex(blockNumber) return getBlockTimeLimits(t, currentBlockNumber, futureBlockNumber, past, future) } func getUnfulfillableBlockTimeLimits(t *testing.T, blockNumber uint64, timestamp uint64) []*arbitrum_types.ConditionalOptions { - future := hexutil.Uint64(timestamp + 30) - past := hexutil.Uint64(timestamp - 1) - futureBlockNumber := hexutil.Uint64(blockNumber + 1000) - previousBlockNumber := hexutil.Uint64(blockNumber - 1) + future := common.Uint64OrHex(timestamp + 30) + past := common.Uint64OrHex(timestamp - 1) + futureBlockNumber := common.Uint64OrHex(blockNumber + 1000) + previousBlockNumber := common.Uint64OrHex(blockNumber - 1) // skip first empty options return getBlockTimeLimits(t, futureBlockNumber, previousBlockNumber, future, past)[1:] } -func getBlockTimeLimits(t *testing.T, blockMin, blockMax hexutil.Uint64, timeMin, timeMax hexutil.Uint64) []*arbitrum_types.ConditionalOptions { +func getBlockTimeLimits(t *testing.T, blockMin, blockMax common.Uint64OrHex, timeMin, timeMax common.Uint64OrHex) []*arbitrum_types.ConditionalOptions { basic := []*arbitrum_types.ConditionalOptions{ {}, {TimestampMin: &timeMin}, @@ -157,9 +156,9 @@ func optionsProduct(optionsA, optionsB []*arbitrum_types.ConditionalOptions) []* c.KnownAccounts[k] = v } limitTriples := []struct { - a *hexutil.Uint64 - b *hexutil.Uint64 - c **hexutil.Uint64 + a *common.Uint64OrHex + b *common.Uint64OrHex + c **common.Uint64OrHex }{ {a.BlockNumberMin, b.BlockNumberMin, &c.BlockNumberMin}, {a.BlockNumberMax, b.BlockNumberMax, &c.BlockNumberMax}, @@ -168,10 +167,10 @@ func optionsProduct(optionsA, optionsB []*arbitrum_types.ConditionalOptions) []* } for _, tripple := range limitTriples { if tripple.b != nil { - value := hexutil.Uint64(*tripple.b) + value := common.Uint64OrHex(*tripple.b) *tripple.c = &value } else if tripple.a != nil { - value := hexutil.Uint64(*tripple.a) + value := common.Uint64OrHex(*tripple.a) *tripple.c = &value } else { *tripple.c = nil From 1f6ddc4f16497f80a203fdfb8f7c4cca04bfba93 Mon Sep 17 00:00:00 2001 From: Nodar Date: Thu, 31 Aug 2023 17:55:40 +0200 Subject: [PATCH 083/117] Change warning to debug when decoding queued transaction fails, add another debug line when it succeeds --- arbnode/dataposter/storage/storage.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index ed848b9b7d..734e2770ee 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -103,11 +103,12 @@ func QueuedTransactionToLegacy(qt *QueuedTransaction) (*LegacyQueuedTransaction, func decode(data []byte) (*QueuedTransaction, error) { var item QueuedTransaction if err := rlp.DecodeBytes(data, &item); err != nil { - log.Warn("Failed to decode QueuedTransaction, attempting to decide legacy queued transaction", "error", err) + log.Debug("Failed to decode QueuedTransaction, attempting to decide legacy queued transaction", "error", err) val, err := DecodeLegacyQueuedTransaction(data) if err != nil { return nil, fmt.Errorf("decoding legacy item: %w", err) } + log.Debug("Succeeded decoding QueuedTransaction with legacy encoder") return LegacyToQueuedTransaction(val) } return &item, nil From 8f3f8162b88f22e3ef56e3ebbc6ba627b10b8ecb Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Thu, 31 Aug 2023 12:59:35 -0500 Subject: [PATCH 084/117] reuse type math.HexOrDecimal64 --- go-ethereum | 2 +- system_tests/conditionaltx_test.go | 29 +++++++++++++++-------------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/go-ethereum b/go-ethereum index f8363dc42d..b4bd0da114 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit f8363dc42d6cf8dbd76aed2550207a90919e88ee +Subproject commit b4bd0da1142fe6bb81cac7e0794ebb4746b9885a diff --git a/system_tests/conditionaltx_test.go b/system_tests/conditionaltx_test.go index b758ba1807..14aa000313 100644 --- a/system_tests/conditionaltx_test.go +++ b/system_tests/conditionaltx_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/arbitrum_types" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rpc" @@ -102,23 +103,23 @@ func getOptions(address common.Address, rootHash common.Hash, slotValueMap map[c } func getFulfillableBlockTimeLimits(t *testing.T, blockNumber uint64, timestamp uint64) []*arbitrum_types.ConditionalOptions { - future := common.Uint64OrHex(timestamp + 30) - past := common.Uint64OrHex(timestamp - 1) - futureBlockNumber := common.Uint64OrHex(blockNumber + 1000) - currentBlockNumber := common.Uint64OrHex(blockNumber) + future := math.HexOrDecimal64(timestamp + 30) + past := math.HexOrDecimal64(timestamp - 1) + futureBlockNumber := math.HexOrDecimal64(blockNumber + 1000) + currentBlockNumber := math.HexOrDecimal64(blockNumber) return getBlockTimeLimits(t, currentBlockNumber, futureBlockNumber, past, future) } func getUnfulfillableBlockTimeLimits(t *testing.T, blockNumber uint64, timestamp uint64) []*arbitrum_types.ConditionalOptions { - future := common.Uint64OrHex(timestamp + 30) - past := common.Uint64OrHex(timestamp - 1) - futureBlockNumber := common.Uint64OrHex(blockNumber + 1000) - previousBlockNumber := common.Uint64OrHex(blockNumber - 1) + future := math.HexOrDecimal64(timestamp + 30) + past := math.HexOrDecimal64(timestamp - 1) + futureBlockNumber := math.HexOrDecimal64(blockNumber + 1000) + previousBlockNumber := math.HexOrDecimal64(blockNumber - 1) // skip first empty options return getBlockTimeLimits(t, futureBlockNumber, previousBlockNumber, future, past)[1:] } -func getBlockTimeLimits(t *testing.T, blockMin, blockMax common.Uint64OrHex, timeMin, timeMax common.Uint64OrHex) []*arbitrum_types.ConditionalOptions { +func getBlockTimeLimits(t *testing.T, blockMin, blockMax math.HexOrDecimal64, timeMin, timeMax math.HexOrDecimal64) []*arbitrum_types.ConditionalOptions { basic := []*arbitrum_types.ConditionalOptions{ {}, {TimestampMin: &timeMin}, @@ -156,9 +157,9 @@ func optionsProduct(optionsA, optionsB []*arbitrum_types.ConditionalOptions) []* c.KnownAccounts[k] = v } limitTriples := []struct { - a *common.Uint64OrHex - b *common.Uint64OrHex - c **common.Uint64OrHex + a *math.HexOrDecimal64 + b *math.HexOrDecimal64 + c **math.HexOrDecimal64 }{ {a.BlockNumberMin, b.BlockNumberMin, &c.BlockNumberMin}, {a.BlockNumberMax, b.BlockNumberMax, &c.BlockNumberMax}, @@ -167,10 +168,10 @@ func optionsProduct(optionsA, optionsB []*arbitrum_types.ConditionalOptions) []* } for _, tripple := range limitTriples { if tripple.b != nil { - value := common.Uint64OrHex(*tripple.b) + value := math.HexOrDecimal64(*tripple.b) *tripple.c = &value } else if tripple.a != nil { - value := common.Uint64OrHex(*tripple.a) + value := math.HexOrDecimal64(*tripple.a) *tripple.c = &value } else { *tripple.c = nil From d23602ffc8215367d7705c0e4867cb357949ba84 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 31 Aug 2023 19:42:15 -0600 Subject: [PATCH 085/117] Fix batch poster fixAccErr not resetting when the error stops --- arbnode/batch_poster.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 429701be7e..53cd5d5594 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -970,10 +970,14 @@ func (b *BatchPoster) Start(ctxIn context.Context) { return b.config().PollInterval } posted, err := b.maybePostSequencerBatch(ctx) + ephemeralError := errors.Is(err, AccumulatorNotFoundErr) || errors.Is(err, storage.ErrStorageRace) + if !ephemeralError { + b.firstAccErr = time.Time{} + } if err != nil { b.building = nil logLevel := log.Error - if errors.Is(err, AccumulatorNotFoundErr) || errors.Is(err, storage.ErrStorageRace) { + if ephemeralError { // Likely the inbox tracker just isn't caught up. // Let's see if this error disappears naturally. if b.firstAccErr == (time.Time{}) { @@ -982,8 +986,6 @@ func (b *BatchPoster) Start(ctxIn context.Context) { } else if time.Since(b.firstAccErr) < time.Minute { logLevel = log.Debug } - } else { - b.firstAccErr = time.Time{} } logLevel("error posting batch", "err", err) return b.config().ErrorDelay From 053e13a4d81da7e417be72c70f023bf86aa11189 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 31 Aug 2023 19:47:50 -0600 Subject: [PATCH 086/117] Rename firstAccErr to firstEphemeralError --- arbnode/batch_poster.go | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 53cd5d5594..43cf97f2ae 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -56,20 +56,20 @@ type batchPosterPosition struct { type BatchPoster struct { stopwaiter.StopWaiter - l1Reader *headerreader.HeaderReader - inbox *InboxTracker - streamer *TransactionStreamer - config BatchPosterConfigFetcher - seqInbox *bridgegen.SequencerInbox - bridge *bridgegen.Bridge - syncMonitor *SyncMonitor - seqInboxABI *abi.ABI - seqInboxAddr common.Address - building *buildingBatch - daWriter das.DataAvailabilityServiceWriter - dataPoster *dataposter.DataPoster - redisLock *redislock.Simple - firstAccErr time.Time // first time a continuous missing accumulator occurred + l1Reader *headerreader.HeaderReader + inbox *InboxTracker + streamer *TransactionStreamer + config BatchPosterConfigFetcher + seqInbox *bridgegen.SequencerInbox + bridge *bridgegen.Bridge + syncMonitor *SyncMonitor + seqInboxABI *abi.ABI + seqInboxAddr common.Address + building *buildingBatch + daWriter das.DataAvailabilityServiceWriter + dataPoster *dataposter.DataPoster + redisLock *redislock.Simple + firstEphemeralError time.Time // first time a continuous error suspected to be ephemeral occurred // An estimate of the number of batches we want to post but haven't yet. // This doesn't include batches which we don't want to post yet due to the L1 bounds. backlog uint64 @@ -972,7 +972,7 @@ func (b *BatchPoster) Start(ctxIn context.Context) { posted, err := b.maybePostSequencerBatch(ctx) ephemeralError := errors.Is(err, AccumulatorNotFoundErr) || errors.Is(err, storage.ErrStorageRace) if !ephemeralError { - b.firstAccErr = time.Time{} + b.firstEphemeralError = time.Time{} } if err != nil { b.building = nil @@ -980,10 +980,10 @@ func (b *BatchPoster) Start(ctxIn context.Context) { if ephemeralError { // Likely the inbox tracker just isn't caught up. // Let's see if this error disappears naturally. - if b.firstAccErr == (time.Time{}) { - b.firstAccErr = time.Now() + if b.firstEphemeralError == (time.Time{}) { + b.firstEphemeralError = time.Now() logLevel = log.Debug - } else if time.Since(b.firstAccErr) < time.Minute { + } else if time.Since(b.firstEphemeralError) < time.Minute { logLevel = log.Debug } } From 2f9db18cb552cec9647c9c6dfd9cbe4d7ef079b9 Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 1 Sep 2023 16:34:51 +0200 Subject: [PATCH 087/117] Compare correct encodings in redis storage, drop nil rlp tag from queuedTransaction struct --- arbnode/dataposter/data_poster.go | 2 +- arbnode/dataposter/redis/redisstorage.go | 15 +++++++++++++++ arbnode/dataposter/storage/storage.go | 8 +++----- arbnode/dataposter/storage_test.go | 9 +++++++++ 4 files changed, 28 insertions(+), 6 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 5231a1c332..dff2602cac 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -112,7 +112,7 @@ func NewDataPoster(db ethdb.Database, headerReader *headerreader.HeaderReader, a case initConfig.UseNoOpStorage: queue = &noop.Storage{} case initConfig.UseLevelDB: - queue = leveldb.New(db, encF) + queue = leveldb.New(db, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) case redisClient == nil: queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) default: diff --git a/arbnode/dataposter/redis/redisstorage.go b/arbnode/dataposter/redis/redisstorage.go index 5123cea154..f2393611b2 100644 --- a/arbnode/dataposter/redis/redisstorage.go +++ b/arbnode/dataposter/redis/redisstorage.go @@ -114,6 +114,17 @@ func (s *Storage) Prune(ctx context.Context, until uint64) error { return nil } +// normalizeDecoding decodes data (regardless of what encoding it used), and +// encodes it according to current encoding for storage. +// As a result, encoded data is transformed to currently used encoding. +func (s *Storage) normalizeDecoding(data []byte) ([]byte, error) { + item, err := s.encDec().Decode(data) + if err != nil { + return nil, err + } + return s.encDec().Encode(item) +} + func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.QueuedTransaction) error { if new == nil { return fmt.Errorf("tried to insert nil item at index %v", index) @@ -142,6 +153,10 @@ func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.Queu if err != nil { return fmt.Errorf("failed to validate item already in redis at index%v: %w", index, err) } + verifiedItem, err = s.normalizeDecoding(verifiedItem) + if err != nil { + return fmt.Errorf("error normalizing encoding for verified item: %w", err) + } prevItemEncoded, err := s.encDec().Encode(prev) if err != nil { return err diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index 734e2770ee..b59bf7bf62 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -21,7 +21,7 @@ var ( ) type QueuedTransaction struct { - FullTx *types.Transaction `rlp:"nil"` + FullTx *types.Transaction Data types.DynamicFeeTx Meta []byte Sent bool @@ -35,7 +35,7 @@ type QueuedTransaction struct { // rlp encoding of Meta into queuedTransaction and rlp encoding it once more // to store it. type LegacyQueuedTransaction struct { - FullTx *types.Transaction `rlp:"nil"` + FullTx *types.Transaction Data types.DynamicFeeTx Meta BatchPosterPosition Sent bool @@ -46,9 +46,7 @@ type LegacyQueuedTransaction struct { // This is also for legacy reason. Since Batchposter is in arbnode package, // we can't refer to BatchPosterPosition type there even if we export it (that // would create cyclic dependency). -// Ideally we'll factor out Batch Poster from arbnode into separate package -// and BatchPosterPosition into another separate package as well. -// For the sake of minimal refactoring, that struct is duplicated here. +// We'll drop this struct in a few releases when we drop legacy encoding. type BatchPosterPosition struct { MessageCount arbutil.MessageIndex DelayedMessageCount uint64 diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index eac05502be..d536e5da05 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -6,6 +6,7 @@ import ( "path" "testing" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" @@ -22,6 +23,7 @@ import ( var ignoreData = cmp.Options{ cmpopts.IgnoreUnexported( + types.Transaction{}, types.DynamicFeeTx{}, big.Int{}, ), @@ -62,6 +64,13 @@ func valueOf(t *testing.T, i int) *storage.QueuedTransaction { t.Fatalf("Encoding batch poster position, error: %v", err) } return &storage.QueuedTransaction{ + FullTx: types.NewTransaction( + uint64(i), + common.Address{}, + big.NewInt(int64(i)), + uint64(i), + big.NewInt(int64(i)), + []byte{byte(i)}), Meta: meta, Data: types.DynamicFeeTx{ ChainID: big.NewInt(int64(i)), From 0bf724b80497f821ff31fc36519ffe53b37d9676 Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 1 Sep 2023 17:09:28 +0200 Subject: [PATCH 088/117] Fix incorrect merge with base pr --- linter/koanf/koanf.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linter/koanf/koanf.go b/linter/koanf/koanf.go index d483cf7751..c7c38e2571 100644 --- a/linter/koanf/koanf.go +++ b/linter/koanf/koanf.go @@ -116,7 +116,7 @@ func checkFlagDefs(pass *analysis.Pass, f *ast.FuncDecl) Result { if !ok { continue } - if normSL := normalize(sl); !strings.EqualFold(normSL, s) { + if normSL := strings.ReplaceAll(sl, "-", ""); !strings.EqualFold(normSL, s) { res.Errors = append(res.Errors, koanfError{ Pos: pass.Fset.Position(f.Pos()), Message: fmt.Sprintf("koanf tag name: %q doesn't match the field: %q", sl, s), From 293ed4c504e3370acb0984cac36db02dd1e82275 Mon Sep 17 00:00:00 2001 From: Nodar Date: Fri, 1 Sep 2023 17:14:06 +0200 Subject: [PATCH 089/117] Specify folders of linters instead of separate go files in CI yml --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 916969f324..a0f5251f9f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -131,8 +131,8 @@ jobs: skip-pkg-cache: true - name: Custom Lint run: | - go run linter/koanf/koanf.go linter/koanf/handlers.go ./... - go run linter/pointercheck/pointer.go ./... + go run ./linter/koanf ./... + go run ./linter/pointercheck ./... - name: Set environment variables run: | From 1182da1bc3fef5f9117edb64323f0480b6e1f63b Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 1 Sep 2023 09:54:04 -0600 Subject: [PATCH 090/117] Fix changing the basefee in non-mutating calls --- arbos/tx_processor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index 09a4692eae..0d44ac548e 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -677,7 +677,7 @@ func (p *TxProcessor) GetPaidGasPrice() *big.Int { if version != 9 { gasPrice = p.evm.Context.BaseFee if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.Sign() == 0 { - gasPrice.SetInt64(0) // gasprice zero behavior + gasPrice = common.Big0 } } return gasPrice From 4be9bbb019fde7e6103304f266a85e0622f3b7b8 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 1 Sep 2023 13:24:48 -0500 Subject: [PATCH 091/117] remove precompilesgen dependency from headerreader --- arbnode/node.go | 7 +++++-- cmd/daserver/daserver.go | 5 ++++- cmd/nitro/nitro.go | 5 ++++- system_tests/das_test.go | 4 +++- util/headerreader/header_reader.go | 17 +++++++++-------- 5 files changed, 25 insertions(+), 13 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index e6960a3f22..e3e9223b1d 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -40,6 +40,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/ospgen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/contracts" @@ -235,7 +236,8 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com } func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, readerConfig headerreader.ConfigFetcher, config rollupgen.Config) (*chaininfo.RollupAddresses, error) { - l1Reader, err := headerreader.New(ctx, l1client, readerConfig) + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err := headerreader.New(ctx, l1client, readerConfig, arbSys) if err != nil { return nil, err } @@ -611,7 +613,8 @@ func createNodeImpl( var l1Reader *headerreader.HeaderReader if config.ParentChainReader.Enable { - l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }) + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }, arbSys) if err != nil { return nil, err } diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 7cdfc39915..335aba6a1b 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -17,6 +17,7 @@ import ( flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics/exp" @@ -24,6 +25,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" ) @@ -196,7 +198,8 @@ func startup() error { if err != nil { return err } - l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }) // TODO: config + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys) // TODO: config if err != nil { return err } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 407ed0afe7..dd26fea46f 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" _ "github.com/ethereum/go-ethereum/eth/tracers/js" _ "github.com/ethereum/go-ethereum/eth/tracers/native" @@ -48,6 +49,7 @@ import ( "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/cmd/util/confighelpers" _ "github.com/offchainlabs/nitro/nodeInterface" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/headerreader" @@ -354,7 +356,8 @@ func mainImpl() int { flag.Usage() log.Crit("--node.validator.only-create-wallet-contract requires --node.validator.use-smart-contract-wallet") } - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }) + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) if err != nil { log.Crit("failed to get L1 headerreader", "error", err) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 7952120933..8889d2d53d 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -28,6 +28,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/solgen/go/bridgegen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" ) @@ -233,7 +234,8 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { chainConfig := params.ArbitrumDevTestDASChainConfig() l1info, l1client, _, l1stack := createTestL1BlockChain(t, nil) defer requireClose(t, l1stack) - l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }) + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys) Require(t, err) l1Reader.Start(ctx) defer l1Reader.StopAndWait() diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index e5807224c0..8487ccd54b 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -18,17 +18,20 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" ) +type ArbSysInterface interface { + ArbBlockNumber(*bind.CallOpts) (*big.Int, error) +} + type HeaderReader struct { stopwaiter.StopWaiter config ConfigFetcher client arbutil.L1Interface isParentChainArbitrum bool - arbSys *precompilesgen.ArbSys + arbSys ArbSysInterface chanMutex sync.RWMutex // All fields below require the chanMutex @@ -91,25 +94,23 @@ var TestConfig = Config{ UseFinalityData: false, } -func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher) (*HeaderReader, error) { +func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher, arbSysPrecompile ArbSysInterface) (*HeaderReader, error) { isParentChainArbitrum := false - var arbSys *precompilesgen.ArbSys codeAt, err := client.CodeAt(ctx, types.ArbSysAddress, nil) if err != nil { return nil, err } if len(codeAt) != 0 { isParentChainArbitrum = true - arbSys, err = precompilesgen.NewArbSys(types.ArbSysAddress, client) - if err != nil { - return nil, err + if arbSysPrecompile == nil { + return nil, errors.New("unable to create ArbSys from precompilesgen") } } return &HeaderReader{ client: client, config: config, isParentChainArbitrum: isParentChainArbitrum, - arbSys: arbSys, + arbSys: arbSysPrecompile, outChannels: make(map[chan<- *types.Header]struct{}), outChannelsBehind: make(map[chan<- *types.Header]struct{}), safe: cachedHeader{rpcBlockNum: big.NewInt(rpc.SafeBlockNumber.Int64())}, From 57f25acccc14d0633502136bacc4e637dc89ea02 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 1 Sep 2023 14:23:19 -0500 Subject: [PATCH 092/117] modify implementation to handle CodeAt errors in lb --- arbnode/node.go | 4 ++-- cmd/daserver/daserver.go | 2 +- cmd/nitro/nitro.go | 2 +- system_tests/das_test.go | 2 +- util/headerreader/header_reader.go | 20 +++++++++++--------- 5 files changed, 16 insertions(+), 14 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index e3e9223b1d..7fde929771 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -237,7 +237,7 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, readerConfig headerreader.ConfigFetcher, config rollupgen.Config) (*chaininfo.RollupAddresses, error) { arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) - l1Reader, err := headerreader.New(ctx, l1client, readerConfig, arbSys) + l1Reader, err := headerreader.New(ctx, l1client, readerConfig, arbSys, true) if err != nil { return nil, err } @@ -614,7 +614,7 @@ func createNodeImpl( var l1Reader *headerreader.HeaderReader if config.ParentChainReader.Enable { arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) - l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }, arbSys) + l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }, arbSys, true) if err != nil { return nil, err } diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 335aba6a1b..6b874f4639 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -199,7 +199,7 @@ func startup() error { return err } arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) - l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys) // TODO: config + l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys, true) // TODO: config if err != nil { return err } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index dd26fea46f..e404733b1e 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -357,7 +357,7 @@ func mainImpl() int { log.Crit("--node.validator.only-create-wallet-contract requires --node.validator.use-smart-contract-wallet") } arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) + l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys, true) if err != nil { log.Crit("failed to get L1 headerreader", "error", err) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 8889d2d53d..e79b993d03 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -235,7 +235,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { l1info, l1client, _, l1stack := createTestL1BlockChain(t, nil) defer requireClose(t, l1stack) arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) - l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys, true) Require(t, err) l1Reader.Start(ctx) defer l1Reader.StopAndWait() diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index 8487ccd54b..c7fa937385 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -94,16 +94,18 @@ var TestConfig = Config{ UseFinalityData: false, } -func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher, arbSysPrecompile ArbSysInterface) (*HeaderReader, error) { +func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher, arbSysPrecompile ArbSysInterface, usePrecompilesgen bool) (*HeaderReader, error) { isParentChainArbitrum := false - codeAt, err := client.CodeAt(ctx, types.ArbSysAddress, nil) - if err != nil { - return nil, err - } - if len(codeAt) != 0 { - isParentChainArbitrum = true - if arbSysPrecompile == nil { - return nil, errors.New("unable to create ArbSys from precompilesgen") + if usePrecompilesgen { + codeAt, err := client.CodeAt(ctx, types.ArbSysAddress, nil) + if err != nil { + return nil, err + } + if len(codeAt) != 0 { + isParentChainArbitrum = true + if arbSysPrecompile == nil { + return nil, errors.New("unable to create ArbSys from precompilesgen") + } } } return &HeaderReader{ From 800c0a4d34fb49c139ef4d78d5cc2c8eec856bb0 Mon Sep 17 00:00:00 2001 From: Nodar Date: Mon, 4 Sep 2023 15:51:32 +0200 Subject: [PATCH 093/117] Run linters by specifying linter folder rather than go files --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 33487d0609..d6082261cd 100644 --- a/Makefile +++ b/Makefile @@ -304,8 +304,8 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make - go run linter/koanf/koanf.go linter/koanf/handlers.go ./... - go run linter/pointercheck/pointer.go ./... + go run ./linter/koanf ./... + go run ./linter/pointercheck ./... golangci-lint run --fix yarn --cwd contracts solhint @touch $@ From c2a6a54c923e22e4d6f027e9acd80b93507b0bbb Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 5 Sep 2023 11:07:52 -0500 Subject: [PATCH 094/117] address PR comments --- .../rediscoordinator/redis_coordinator.go | 34 +------------ .../seq-coordinator-manager.go | 48 +++++++++++-------- util/redisutil/redis_coordinator.go | 26 ++++++++++ 3 files changed, 57 insertions(+), 51 deletions(-) diff --git a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go index a393719a1d..782ab3801b 100644 --- a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go +++ b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go @@ -14,37 +14,6 @@ type RedisCoordinator struct { *redisutil.RedisCoordinator } -// GetPriorities returns the priority list of sequencers -func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, map[string]int, error) { - prioritiesMap := make(map[string]int) - prioritiesString, err := rc.Client.Get(ctx, redisutil.PRIORITIES_KEY).Result() - if err != nil { - if errors.Is(err, redis.Nil) { - err = errors.New("sequencer priorities unset") - } - return []string{}, prioritiesMap, err - } - priorities := strings.Split(prioritiesString, ",") - for _, url := range priorities { - prioritiesMap[url]++ - } - return priorities, prioritiesMap, nil -} - -// GetLivelinessMap returns a map whose keys are sequencers that have their liveliness set to OK -func (rc *RedisCoordinator) GetLivelinessMap(ctx context.Context) (map[string]int, error) { - livelinessMap := make(map[string]int) - livelinessList, _, err := rc.Client.Scan(ctx, 0, redisutil.WANTS_LOCKOUT_KEY_PREFIX+"*", 0).Result() - if err != nil { - return livelinessMap, err - } - for _, elem := range livelinessList { - url := strings.TrimPrefix(elem, redisutil.WANTS_LOCKOUT_KEY_PREFIX) - livelinessMap[url]++ - } - return livelinessMap, nil -} - // UpdatePriorities updates the priority list of sequencers func (rc *RedisCoordinator) UpdatePriorities(ctx context.Context, priorities []string) error { prioritiesString := strings.Join(priorities, ",") @@ -53,6 +22,7 @@ func (rc *RedisCoordinator) UpdatePriorities(ctx context.Context, priorities []s if errors.Is(err, redis.Nil) { err = errors.New("sequencer priorities unset") } + return err } - return err + return nil } diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go index 0a279cff64..a0123a9123 100644 --- a/cmd/seq-coordinator-manager/seq-coordinator-manager.go +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -30,8 +30,8 @@ var nonPriorityForm = tview.NewForm() // Sequencer coordinator managment UI data store type manager struct { redisCoordinator *rediscoordinator.RedisCoordinator - prioritiesMap map[string]int - livelinessMap map[string]int + prioritiesSet map[string]bool + livelinessSet map[string]bool priorityList []string nonPriorityList []string } @@ -55,8 +55,8 @@ func main() { redisCoordinator: &rediscoordinator.RedisCoordinator{ RedisCoordinator: redisutilCoordinator, }, - prioritiesMap: make(map[string]int), - livelinessMap: make(map[string]int), + prioritiesSet: make(map[string]bool), + livelinessSet: make(map[string]bool), } seqManager.refreshAllLists(ctx) @@ -92,7 +92,7 @@ func main() { }) priorityForm.AddButton("Remove", func() { url := seqManager.priorityList[index] - delete(seqManager.prioritiesMap, url) + delete(seqManager.prioritiesSet, url) seqManager.updatePriorityList(ctx, index, 0) seqManager.priorityList = seqManager.priorityList[1:] @@ -122,7 +122,7 @@ func main() { nonPriorityForm.AddButton("Update", func() { key := seqManager.nonPriorityList[index] seqManager.priorityList = append(seqManager.priorityList, key) - seqManager.prioritiesMap[key]++ + seqManager.prioritiesSet[key] = true index = len(seqManager.priorityList) - 1 seqManager.updatePriorityList(ctx, index, target) @@ -188,9 +188,11 @@ func main() { seqManager.addSeqPriorityForm(ctx) pages.SwitchToPage("Add Sequencer") } else if event.Rune() == 99 { - if prioritySeqList.HasFocus() { + if prioritySeqList.HasFocus() || priorityForm.HasFocus() { + priorityForm.Clear(true) app.SetFocus(nonPrioritySeqList) } else { + nonPriorityForm.Clear(true) app.SetFocus(prioritySeqList) } } else if event.Rune() == 113 { @@ -217,8 +219,8 @@ func (sm *manager) updatePriorityList(ctx context.Context, index int, target int } urlList := []string{} - for url := range sm.livelinessMap { - if _, ok := sm.prioritiesMap[url]; !ok { + for url := range sm.livelinessSet { + if _, ok := sm.prioritiesSet[url]; !ok { urlList = append(urlList, url) } } @@ -238,7 +240,7 @@ func (sm *manager) populateLists(ctx context.Context) { sec = fmt.Sprintf(" %vchosen", emoji.LeftArrow) } status := fmt.Sprintf("(%d) %v ", index, emoji.RedCircle) - if _, ok := sm.livelinessMap[seqURL]; ok { + if _, ok := sm.livelinessSet[seqURL]; ok { status = fmt.Sprintf("(%d) %v ", index, emoji.GreenCircle) } prioritySeqList.AddItem(status+seqURL+sec, "", rune(0), nil).SetSecondaryTextColor(tcell.ColorPurple) @@ -264,8 +266,8 @@ func (sm *manager) addSeqPriorityForm(ctx context.Context) *tview.Form { }) addSeqForm.AddButton("Add", func() { // check if url is valid, i.e it doesnt already exist in the priority list - if _, ok := sm.prioritiesMap[URL]; !ok && URL != "" { - sm.prioritiesMap[URL]++ + if _, ok := sm.prioritiesSet[URL]; !ok && URL != "" { + sm.prioritiesSet[URL] = true sm.priorityList = append(sm.priorityList, URL) } sm.populateLists(ctx) @@ -285,24 +287,32 @@ func (sm *manager) pushUpdates(ctx context.Context) { // refreshAllLists gets the current status of all the lists displayed in the UI func (sm *manager) refreshAllLists(ctx context.Context) { - sequencerURLList, mapping, err := sm.redisCoordinator.GetPriorities(ctx) + priorityList, err := sm.redisCoordinator.GetPriorities(ctx) if err != nil { panic(err) } - sm.priorityList = sequencerURLList - sm.prioritiesMap = mapping + sm.priorityList = priorityList + sm.prioritiesSet = getMapfromlist(priorityList) - mapping, err = sm.redisCoordinator.GetLivelinessMap(ctx) + livelinessList, err := sm.redisCoordinator.GetLiveliness(ctx) if err != nil { panic(err) } - sm.livelinessMap = mapping + sm.livelinessSet = getMapfromlist(livelinessList) urlList := []string{} - for url := range sm.livelinessMap { - if _, ok := sm.prioritiesMap[url]; !ok { + for url := range sm.livelinessSet { + if _, ok := sm.prioritiesSet[url]; !ok { urlList = append(urlList, url) } } sm.nonPriorityList = urlList } + +func getMapfromlist(list []string) map[string]bool { + mapping := make(map[string]bool) + for _, url := range list { + mapping[url] = true + } + return mapping +} diff --git a/util/redisutil/redis_coordinator.go b/util/redisutil/redis_coordinator.go index 0ee92fef17..357dfb2e93 100644 --- a/util/redisutil/redis_coordinator.go +++ b/util/redisutil/redis_coordinator.go @@ -76,6 +76,32 @@ func (c *RedisCoordinator) CurrentChosenSequencer(ctx context.Context) (string, return current, nil } +// GetPriorities returns the priority list of sequencers +func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, error) { + prioritiesString, err := rc.Client.Get(ctx, PRIORITIES_KEY).Result() + if err != nil { + if errors.Is(err, redis.Nil) { + err = errors.New("sequencer priorities unset") + } + return []string{}, err + } + prioritiesList := strings.Split(prioritiesString, ",") + return prioritiesList, nil +} + +// GetLiveliness returns a map whose keys are sequencers that have their liveliness set to OK +func (rc *RedisCoordinator) GetLiveliness(ctx context.Context) ([]string, error) { + livelinessList, _, err := rc.Client.Scan(ctx, 0, WANTS_LOCKOUT_KEY_PREFIX+"*", 0).Result() + if err != nil { + return []string{}, err + } + for i, elem := range livelinessList { + url := strings.TrimPrefix(elem, WANTS_LOCKOUT_KEY_PREFIX) + livelinessList[i] = url + } + return livelinessList, nil +} + func MessageKeyFor(pos arbutil.MessageIndex) string { return fmt.Sprintf("%s%d", MESSAGE_KEY_PREFIX, pos) } From 5af5d4f7d80c8324083e1bd46cfb4dfc664d6ff4 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 6 Sep 2023 14:13:56 -0500 Subject: [PATCH 095/117] check for valid inputs for setting pendingWasmModuleRoot --- staker/stateless_block_validator.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 7add3e258d..7131fe6074 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "regexp" "sync" "testing" @@ -455,8 +456,9 @@ func (v *StatelessBlockValidator) Start(ctx_in context.Context) error { } v.pendingWasmModuleRoot = latest } else { + valid, _ := regexp.MatchString("(0x)?[0-9a-fA-F]{64}", v.config.PendingUpgradeModuleRoot) v.pendingWasmModuleRoot = common.HexToHash(v.config.PendingUpgradeModuleRoot) - if (v.pendingWasmModuleRoot == common.Hash{}) { + if (!valid || v.pendingWasmModuleRoot == common.Hash{}) { return errors.New("pending-upgrade-module-root config value illegal") } } From eba6785faf31ba3c9bc0e041d65da119d491acbc Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 7 Sep 2023 10:50:31 -0600 Subject: [PATCH 096/117] Fix Start/Stop of the staker wallet's data poster --- staker/eoa_validator_wallet.go | 4 ---- staker/staker.go | 8 ++++++-- staker/validator_wallet.go | 6 ++---- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/staker/eoa_validator_wallet.go b/staker/eoa_validator_wallet.go index 09175332bf..5285e96ea9 100644 --- a/staker/eoa_validator_wallet.go +++ b/staker/eoa_validator_wallet.go @@ -15,11 +15,9 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/offchainlabs/nitro/util/stopwaiter" ) type EoaValidatorWallet struct { - stopwaiter.StopWaiter auth *bind.TransactOpts client arbutil.L1Interface rollupAddress common.Address @@ -129,11 +127,9 @@ func (w *EoaValidatorWallet) AuthIfEoa() *bind.TransactOpts { func (w *EoaValidatorWallet) Start(ctx context.Context) { w.dataPoster.Start(ctx) - w.StopWaiter.Start(ctx, w) } func (b *EoaValidatorWallet) StopAndWait() { - b.StopWaiter.StopAndWait() b.dataPoster.StopAndWait() } diff --git a/staker/staker.go b/staker/staker.go index 9b7e6c238e..8fdbbd648f 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -372,11 +372,15 @@ func (s *Staker) getLatestStakedState(ctx context.Context, staker common.Address func (s *Staker) StopAndWait() { s.StopWaiter.StopAndWait() - s.wallet.StopAndWait() + if s.Strategy() != WatchtowerStrategy { + s.wallet.StopAndWait() + } } func (s *Staker) Start(ctxIn context.Context) { - s.wallet.Start(ctxIn) + if s.Strategy() != WatchtowerStrategy { + s.wallet.Start(ctxIn) + } s.StopWaiter.Start(ctxIn, s) backoff := time.Second s.CallIteratively(func(ctx context.Context) (returningWait time.Duration) { diff --git a/staker/validator_wallet.go b/staker/validator_wallet.go index 133a808eac..fb0f5ed956 100644 --- a/staker/validator_wallet.go +++ b/staker/validator_wallet.go @@ -24,7 +24,6 @@ import ( "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" - "github.com/offchainlabs/nitro/util/stopwaiter" ) var validatorABI abi.ABI @@ -66,7 +65,6 @@ type ValidatorWalletInterface interface { } type ContractValidatorWallet struct { - stopwaiter.StopWaiter con *rollupgen.ValidatorWallet address atomic.Pointer[common.Address] onWalletCreated func(common.Address) @@ -413,11 +411,11 @@ func (v *ContractValidatorWallet) AuthIfEoa() *bind.TransactOpts { } func (w *ContractValidatorWallet) Start(ctx context.Context) { - w.StopWaiter.Start(ctx, w) + w.dataPoster.Start(ctx) } func (b *ContractValidatorWallet) StopAndWait() { - b.StopWaiter.StopAndWait() + b.dataPoster.StopAndWait() } func (b *ContractValidatorWallet) DataPoster() *dataposter.DataPoster { From f238cda3d48f17d4a4403a64bb4ce9b06c3865e4 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 8 Sep 2023 10:27:18 -0600 Subject: [PATCH 097/117] Improve batch size and tx size limit defaults for L3s --- arbnode/batch_poster.go | 27 +++++++++++++------------- arbnode/execution/sequencer.go | 1 + arbnode/node.go | 13 +++---------- cmd/chaininfo/arbitrum_chain_info.json | 6 +++++- cmd/chaininfo/chain_info.go | 5 +++-- cmd/deploy/deploy.go | 20 +++++++++++++------ cmd/nitro/nitro.go | 16 ++++++++++++++- system_tests/common_test.go | 9 +++++++-- 8 files changed, 62 insertions(+), 35 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 8a69bf13ef..42b983f0fb 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -167,19 +167,20 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, DisableDasFallbackStoreDataOnChain: false, - MaxSize: 100000, - PollInterval: time.Second * 10, - ErrorDelay: time.Second * 10, - MaxDelay: time.Hour, - WaitForMaxDelay: false, - CompressionLevel: brotli.BestCompression, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 50_000, - DataPoster: dataposter.DefaultDataPosterConfig, - ParentChainWallet: DefaultBatchPosterL1WalletConfig, - L1BlockBound: "", - L1BlockBoundBypass: time.Hour, + // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go + MaxSize: 100000, + PollInterval: time.Second * 10, + ErrorDelay: time.Second * 10, + MaxDelay: time.Hour, + WaitForMaxDelay: false, + CompressionLevel: brotli.BestCompression, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 50_000, + DataPoster: dataposter.DefaultDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ diff --git a/arbnode/execution/sequencer.go b/arbnode/execution/sequencer.go index 402958399d..927ce7ac08 100644 --- a/arbnode/execution/sequencer.go +++ b/arbnode/execution/sequencer.go @@ -110,6 +110,7 @@ var DefaultSequencerConfig = SequencerConfig{ NonceCacheSize: 1024, Dangerous: DefaultDangerousSequencerConfig, // 95% of the default batch poster limit, leaving 5KB for headers and such + // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, diff --git a/arbnode/node.go b/arbnode/node.go index e6960a3f22..2882881dd4 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -234,19 +234,12 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com } } -func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, readerConfig headerreader.ConfigFetcher, config rollupgen.Config) (*chaininfo.RollupAddresses, error) { - l1Reader, err := headerreader.New(ctx, l1client, readerConfig) - if err != nil { - return nil, err - } - l1Reader.Start(ctx) - defer l1Reader.StopAndWait() - +func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config) (*chaininfo.RollupAddresses, error) { if config.WasmModuleRoot == (common.Hash{}) { return nil, errors.New("no machine specified") } - rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, l1Reader, deployAuth) + rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } @@ -265,7 +258,7 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b if err != nil { return nil, fmt.Errorf("error submitting create rollup tx: %w", err) } - receipt, err := l1Reader.WaitForTxApproval(ctx, tx) + receipt, err := parentChainReader.WaitForTxApproval(ctx, tx) if err != nil { return nil, fmt.Errorf("error executing create rollup tx: %w", err) } diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json index f5fa56102c..e66774d426 100644 --- a/cmd/chaininfo/arbitrum_chain_info.json +++ b/cmd/chaininfo/arbitrum_chain_info.json @@ -2,6 +2,7 @@ { "chain-name": "arb1", "parent-chain-id": 1, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://arb1-sequencer.arbitrum.io/rpc", "feed-url": "wss://arb1.arbitrum.io/feed", "has-genesis-state": true, @@ -51,6 +52,7 @@ { "chain-name": "nova", "parent-chain-id": 1, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://nova.arbitrum.io/rpc", "feed-url": "wss://nova.arbitrum.io/feed", "das-index-url": "https://nova.arbitrum.io/das-servers", @@ -100,6 +102,7 @@ { "chain-name": "goerli-rollup", "parent-chain-id": 5, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://goerli-rollup.arbitrum.io/rpc", "feed-url": "wss://goerli-rollup.arbitrum.io/feed", "chain-config": @@ -215,9 +218,10 @@ } } }, - { + { "chain-id": 421614, "parent-chain-id": 11155111, + "parent-chain-is-arbitrum": false, "chain-name": "sepolia-rollup", "sequencer-url": "https://sepolia-rollup-sequencer.arbitrum.io/rpc", "feed-url": "wss://sepolia-rollup.arbitrum.io/feed", diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index c9ffca9830..f75779b4aa 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -18,8 +18,9 @@ import ( var DefaultChainInfo []byte type ChainInfo struct { - ChainName string `json:"chain-name"` - ParentChainId uint64 `json:"parent-chain-id"` + ChainName string `json:"chain-name"` + ParentChainId uint64 `json:"parent-chain-id"` + ParentChainIsArbitrum *bool `json:"parent-chain-is-arbitrum"` // This is the forwarding target to submit transactions to, called the sequencer URL for clarity SequencerUrl string `json:"sequencer-url"` FeedUrl string `json:"feed-url"` diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 357fda14e6..17725a7a4c 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -127,13 +127,19 @@ func main() { panic(fmt.Errorf("failed to deserialize chain config: %w", err)) } + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerReaderConfig }) + if err != nil { + panic(fmt.Errorf("failed to create header reader: %w", err)) + } + l1Reader.Start(ctx) + defer l1Reader.StopAndWait() + deployedAddresses, err := arbnode.DeployOnL1( ctx, - l1client, + l1Reader, l1TransactionOpts, sequencerAddress, *authorizevalidators, - func() *headerreader.Config { return &headerReaderConfig }, arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress), ) if err != nil { @@ -148,12 +154,14 @@ func main() { if err := os.WriteFile(*outfile, deployData, 0600); err != nil { panic(err) } + parentChainIsArbitrum := l1Reader.IsParentChainArbitrum() chainsInfo := []chaininfo.ChainInfo{ { - ChainName: *l2ChainName, - ParentChainId: l1ChainId.Uint64(), - ChainConfig: &chainConfig, - RollupAddresses: deployedAddresses, + ChainName: *l2ChainName, + ParentChainId: l1ChainId.Uint64(), + ParentChainIsArbitrum: &parentChainIsArbitrum, + ChainConfig: &chainConfig, + RollupAddresses: deployedAddresses, }, } chainsInfoJson, err := json.Marshal(chainsInfo) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 407ed0afe7..350a8f7ca4 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -357,7 +357,6 @@ func mainImpl() int { l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }) if err != nil { log.Crit("failed to get L1 headerreader", "error", err) - } // Just create validator smart wallet if needed then exit @@ -768,6 +767,16 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c if err != nil { return false, err } + var parentChainIsArbitrum bool + if chainInfo.ParentChainIsArbitrum != nil { + parentChainIsArbitrum = *chainInfo.ParentChainIsArbitrum + } else { + log.Warn("Chain information parentChainIsArbitrum field missing, in the future this will be required", "chainId", chainId, "parentChainId", chainInfo.ParentChainId) + _, err := chaininfo.ProcessChainInfo(chainInfo.ParentChainId, "", combinedL2ChainInfoFiles, "") + if err == nil { + parentChainIsArbitrum = true + } + } chainDefaults := map[string]interface{}{ "persistent.chain": chainInfo.ChainName, "chain.id": chainInfo.ChainConfig.ChainID.Uint64(), @@ -787,6 +796,11 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c if !chainInfo.HasGenesisState { chainDefaults["init.empty"] = true } + if parentChainIsArbitrum { + safeBatchSize := execution.DefaultSequencerConfig.MaxTxDataSize - 5000 + chainDefaults["node.batch-poster.max-size"] = safeBatchSize + chainDefaults["node.sequencer.max-tx-data-size"] = safeBatchSize - 5000 + } err = k.Load(confmap.Provider(chainDefaults, "."), nil) if err != nil { return false, err diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 81cb18ab30..b92fbf7578 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -475,13 +475,18 @@ func DeployOnTestL1( Require(t, err) serializedChainConfig, err := json.Marshal(chainConfig) Require(t, err) + + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }) + Require(t, err) + l1Reader.Start(ctx) + defer l1Reader.StopAndWait() + addresses, err := arbnode.DeployOnL1( ctx, - l1client, + l1Reader, &l1TransactionOpts, l1info.GetAddress("Sequencer"), 0, - func() *headerreader.Config { return &headerreader.TestConfig }, arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), ) Require(t, err) From 2f24e29d7e7127c43e2a16d0d195552fd6443819 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 8 Sep 2023 11:22:57 -0600 Subject: [PATCH 098/117] Ensure the parent chain has enough space for our batches --- cmd/nitro/nitro.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 350a8f7ca4..4ebe598ee9 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -797,9 +797,14 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c chainDefaults["init.empty"] = true } if parentChainIsArbitrum { - safeBatchSize := execution.DefaultSequencerConfig.MaxTxDataSize - 5000 + l2MaxTxSize := execution.DefaultSequencerConfig.MaxTxDataSize + bufferSpace := 5000 + if l2MaxTxSize < bufferSpace*2 { + return false, fmt.Errorf("not enough room in parent chain max tx size %v for bufferSpace %v * 2", l2MaxTxSize, bufferSpace) + } + safeBatchSize := l2MaxTxSize - bufferSpace chainDefaults["node.batch-poster.max-size"] = safeBatchSize - chainDefaults["node.sequencer.max-tx-data-size"] = safeBatchSize - 5000 + chainDefaults["node.sequencer.max-tx-data-size"] = safeBatchSize - bufferSpace } err = k.Load(confmap.Provider(chainDefaults, "."), nil) if err != nil { From db2eff36890ae967f84ec6047849644fb4a5e163 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 8 Sep 2023 16:26:06 -0600 Subject: [PATCH 099/117] Make the default data poster storage backend LevelDB --- arbnode/dataposter/data_poster.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index dff2602cac..b1e6555b26 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -111,16 +111,16 @@ func NewDataPoster(db ethdb.Database, headerReader *headerreader.HeaderReader, a switch { case initConfig.UseNoOpStorage: queue = &noop.Storage{} - case initConfig.UseLevelDB: - queue = leveldb.New(db, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) - case redisClient == nil: - queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) - default: + case redisClient != nil: var err error queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &initConfig.RedisSigner, encF) if err != nil { return nil, err } + case initConfig.UseLevelDB: + queue = leveldb.New(db, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) + default: + queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) } return &DataPoster{ headerReader: headerReader, @@ -665,7 +665,7 @@ var DefaultDataPosterConfig = DataPosterConfig{ MaxTipCapGwei: 5, NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, - UseLevelDB: false, + UseLevelDB: true, UseNoOpStorage: false, LegacyStorageEncoding: true, } From 6d473afc52784e7573821d216d872ddc0277a03d Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 8 Sep 2023 16:39:07 -0600 Subject: [PATCH 100/117] Use a separate prefix for the staker data poster --- arbnode/dataposter/storage/storage.go | 1 + arbnode/node.go | 6 +++--- system_tests/staker_test.go | 6 +++--- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index b59bf7bf62..70637c48e0 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -15,6 +15,7 @@ var ( ErrStorageRace = errors.New("storage race error") BlockValidatorPrefix string = "v" // the prefix for all block validator keys + StakerPrefix string = "S" // the prefix for all staker keys BatchPosterPrefix string = "b" // the prefix for all batch poster keys // TODO(anodar): move everything else from schema.go file to here once // execution split is complete. diff --git a/arbnode/node.go b/arbnode/node.go index 2882881dd4..356c46681f 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -538,7 +538,7 @@ func checkArbDbSchemaVersion(arbDb ethdb.Database) error { return nil } -func ValidatorDataposter( +func StakerDataposter( db ethdb.Database, l1Reader *headerreader.HeaderReader, transactOpts *bind.TransactOpts, cfgFetcher ConfigFetcher, syncMonitor *SyncMonitor, ) (*dataposter.DataPoster, error) { @@ -802,8 +802,8 @@ func createNodeImpl( var messagePruner *MessagePruner if config.Staker.Enable { - dp, err := ValidatorDataposter( - rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), + dp, err := StakerDataposter( + rawdb.NewTable(arbDb, storage.StakerPrefix), l1Reader, txOptsValidator, configFetcher, diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 82eede9f60..96ea1ee2e7 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -130,7 +130,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) valConfig := staker.TestL1ValidatorConfig - dpA, err := arbnode.ValidatorDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.BlockValidatorPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + dpA, err := arbnode.StakerDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } @@ -178,7 +178,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } Require(t, err) - dpB, err := arbnode.ValidatorDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.BlockValidatorPrefix), l2nodeB.L1Reader, &l1authB, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + dpB, err := arbnode.StakerDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeB.L1Reader, &l1authB, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } @@ -217,7 +217,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) err = valWalletB.Initialize(ctx) Require(t, err) } - dpC, err := arbnode.ValidatorDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.BlockValidatorPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + dpC, err := arbnode.StakerDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } From 8e4c931eeb1b5793cc44a221aa54bb3029db0bf0 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sat, 9 Sep 2023 22:14:20 -0600 Subject: [PATCH 101/117] Handle block "not found" case in batch revert polling --- arbnode/batch_poster.go | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 42b983f0fb..8144c9b7be 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -11,6 +11,7 @@ import ( "fmt" "math" "math/big" + "strings" "sync/atomic" "time" @@ -262,12 +263,13 @@ func NewBatchPoster(dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderRe // contain reverted batch_poster transaction. // It returns true if it finds batch posting needs to halt, which is true if a batch reverts // unless the data poster is configured with noop storage which can tolerate reverts. -func (b *BatchPoster) checkReverts(ctx context.Context, from, to int64) (bool, error) { - if from > to { - return false, fmt.Errorf("wrong range, from: %d is more to: %d", from, to) +// From must be a pointer to the starting block, which is updated after each block is checked for reverts +func (b *BatchPoster) checkReverts(ctx context.Context, from *int64, to int64) (bool, error) { + if *from > to { + return false, fmt.Errorf("wrong range, from: %d > to: %d", from, to) } - for idx := from; idx <= to; idx++ { - number := big.NewInt(idx) + for ; *from <= to; *from++ { + number := big.NewInt(*from) block, err := b.l1Reader.Client().BlockByNumber(ctx, number) if err != nil { return false, fmt.Errorf("getting block: %v by number: %w", number, err) @@ -277,7 +279,7 @@ func (b *BatchPoster) checkReverts(ctx context.Context, from, to int64) (bool, e if err != nil { return false, fmt.Errorf("getting sender of transaction tx: %v, %w", tx.Hash(), err) } - if bytes.Equal(from.Bytes(), b.dataPoster.Sender().Bytes()) { + if from == b.dataPoster.Sender() { r, err := b.l1Reader.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { return false, fmt.Errorf("getting a receipt for transaction: %v, %w", tx.Hash(), err) @@ -303,7 +305,7 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) { headerCh, unsubscribe := b.l1Reader.Subscribe(false) defer unsubscribe() - last := int64(0) // number of last seen block + nextToCheck := int64(0) // the first unchecked block for { // Poll until: // - L1 headers reader channel is closed, or @@ -312,31 +314,37 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) { select { case h, ok := <-headerCh: if !ok { - log.Info("L1 headers channel has been closed") + log.Info("L1 headers channel checking for batch poster reverts has been closed") return } // If this is the first block header, set last seen as number-1. // We may see same block number again if there is L1 reorg, in that // case we check the block again. - if last == 0 || last == h.Number.Int64() { - last = h.Number.Int64() - 1 + if nextToCheck == 0 || nextToCheck == h.Number.Int64() { + nextToCheck = h.Number.Int64() } - if h.Number.Int64()-last > 100 { - log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", last, "current", h.Number) - last = h.Number.Int64() + if h.Number.Int64()-nextToCheck > 100 { + log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", nextToCheck, "current", h.Number) + nextToCheck = h.Number.Int64() continue } - reverted, err := b.checkReverts(ctx, last+1, h.Number.Int64()) + reverted, err := b.checkReverts(ctx, &nextToCheck, h.Number.Int64()) if err != nil { - log.Error("Checking batch reverts", "error", err) + logLevel := log.Error + if strings.Contains(err.Error(), "not found") { + // Just parent chain node inconsistency + // One node sent us a block, but another didn't have it + // We'll try to check this block again next loop + logLevel = log.Debug + } + logLevel("Error checking batch reverts", "err", err) continue } if reverted { b.batchReverted.Store(true) return } - last = h.Number.Int64() case <-ctx.Done(): return } From e9dd36a900545be953f38449bf57aa8317808e58 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 12 Sep 2023 12:18:13 -0600 Subject: [PATCH 102/117] Don't confirmDataPosterIsReady if watchtower --- staker/staker.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/staker/staker.go b/staker/staker.go index 8fdbbd648f..1b6538b161 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -552,11 +552,11 @@ func (s *Staker) confirmDataPosterIsReady(ctx context.Context) error { } func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { - err := s.confirmDataPosterIsReady(ctx) - if err != nil { - return nil, err - } if s.config.strategy != WatchtowerStrategy { + err := s.confirmDataPosterIsReady(ctx) + if err != nil { + return nil, err + } whitelisted, err := s.IsWhitelisted(ctx) if err != nil { return nil, fmt.Errorf("error checking if whitelisted: %w", err) From efc4fc134d97b55f9b5f2fd91a7fb1e8c1021f5a Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 13 Sep 2023 11:08:10 -0500 Subject: [PATCH 103/117] add new RPC GetL2BlockRangeForL1 to fetch L2 block range for L1 block number --- contracts | 2 +- go-ethereum | 2 +- nodeInterface/NodeInterface.go | 80 ++++++++++++++++++++++++++++++ system_tests/nodeinterface_test.go | 72 +++++++++++++++++++++++++++ 4 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 system_tests/nodeinterface_test.go diff --git a/contracts b/contracts index 97cfbe00ff..accdcee457 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 97cfbe00ff0eea4d7f5f5f3afb01598c19ddabc4 +Subproject commit accdcee45798af5025836a04ee5bdcb0669cb476 diff --git a/go-ethereum b/go-ethereum index b4bd0da114..3f2e789b38 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit b4bd0da1142fe6bb81cac7e0794ebb4746b9885a +Subproject commit 3f2e789b3857ccdd647c319e16f1a00805d1d6bd diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index a363458663..3b743dbb2d 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -590,3 +590,83 @@ func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum h calldataForL1 = data return } + +func (n NodeInterface) getL1BlockNum(l2BlockNum uint64) (uint64, error) { + blockHeader, err := n.backend.HeaderByNumber(n.context, rpc.BlockNumber(l2BlockNum)) + if err != nil { + return 0, err + } + l1BlockNum := types.DeserializeHeaderExtraInformation(blockHeader).L1BlockNumber + return l1BlockNum, nil +} + +func (n NodeInterface) GetL2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) ([]uint64, error) { + currentBlockNum := n.backend.CurrentBlock().Number.Uint64() + genesis := n.backend.ChainConfig().ArbitrumChainParams.GenesisBlockNum + + checkCorrectness := func(blockNum uint64, target uint64) error { + blockL1Num, err := n.getL1BlockNum(blockNum) + if err != nil { + return err + } + if blockL1Num != target { + return errors.New("no L2 block was found with the given L1 block number") + } + return nil + } + + lowFirstBlock := genesis + highFirstBlock := currentBlockNum + lowLastBlock := genesis + highLastBlock := currentBlockNum + var storedMid uint64 + var storedMidBlockL1Num uint64 + for lowFirstBlock < highFirstBlock || lowLastBlock < highLastBlock { + if lowFirstBlock < highFirstBlock { + mid := (lowFirstBlock + highFirstBlock) / 2 + midBlockL1Num, err := n.getL1BlockNum(mid) + if err != nil { + return nil, err + } + storedMid = mid + storedMidBlockL1Num = midBlockL1Num + if midBlockL1Num < l1BlockNum { + lowFirstBlock = mid + 1 + } else { + highFirstBlock = mid + } + } + if lowLastBlock < highLastBlock { + // dont fetch midBlockL1Num if its already fetched above + mid := (lowLastBlock + highLastBlock) / 2 + var midBlockL1Num uint64 + var err error + if mid == storedMid { + midBlockL1Num = storedMidBlockL1Num + } else { + midBlockL1Num, err = n.getL1BlockNum(mid) + if err != nil { + return nil, err + } + } + if midBlockL1Num < l1BlockNum+1 { + lowLastBlock = mid + 1 + } else { + highLastBlock = mid + } + } + } + err := checkCorrectness(highFirstBlock, l1BlockNum) + if err != nil { + return nil, err + } + err = checkCorrectness(highLastBlock, l1BlockNum) + if err != nil { + highLastBlock -= 1 + err = checkCorrectness(highLastBlock, l1BlockNum) + if err != nil { + return nil, err + } + } + return []uint64{highFirstBlock, highLastBlock}, nil +} diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go new file mode 100644 index 0000000000..266b50d6c8 --- /dev/null +++ b/system_tests/nodeinterface_test.go @@ -0,0 +1,72 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbtest + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" +) + +func getL1BlockNum(t *testing.T, ctx context.Context, client *ethclient.Client, l2BlockNum uint64) uint64 { + header, err := client.HeaderByNumber(ctx, big.NewInt(int64(l2BlockNum))) + Require(t, err) + l1BlockNum := types.DeserializeHeaderExtraInformation(header).L1BlockNumber + return l1BlockNum +} + +func TestGetL2BlockRangeForL1(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + l2info, node, l2client, l1info, _, _, l1stack := createTestNodeOnL1(t, ctx, true) + defer requireClose(t, l1stack) + defer node.StopAndWait() + user := l1info.GetDefaultTransactOpts("User", ctx) + + numTransactions := 30 + for i := 0; i < numTransactions; i++ { + TransferBalanceTo(t, "Owner", util.RemapL1Address(user.From), big.NewInt(1e18), l2info, l2client, ctx) + } + + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2client) + Require(t, err) + + l1BlockNums := map[uint64][]uint64{} + latestL2, err := l2client.BlockNumber(ctx) + Require(t, err) + for l2BlockNum := uint64(0); l2BlockNum <= latestL2; l2BlockNum++ { + l1BlockNum := getL1BlockNum(t, ctx, l2client, l2BlockNum) + l1BlockNums[l1BlockNum] = append(l1BlockNums[l1BlockNum], l2BlockNum) + } + + // Test success + for l1BlockNum := range l1BlockNums { + rng, err := nodeInterface.GetL2BlockRangeForL1(&bind.CallOpts{}, l1BlockNum) + Require(t, err) + n := len(l1BlockNums[l1BlockNum]) + expected := []uint64{l1BlockNums[l1BlockNum][0], l1BlockNums[l1BlockNum][n-1]} + if expected[0] != rng[0] || expected[1] != rng[1] { + unexpectedL1BlockNum := getL1BlockNum(t, ctx, l2client, rng[1]) + // handle the edge case when new l2 blocks are produced between latestL2 was last calculated and now + if unexpectedL1BlockNum != l1BlockNum { + t.Fatalf("GetL2BlockRangeForL1 failed to get a valid range for L1 block number: %v. Given range: %v. Expected range: %v", l1BlockNum, rng, expected) + } + } + } + // Test invalid case + finalValidL1BlockNumber := getL1BlockNum(t, ctx, l2client, latestL2) + _, err = nodeInterface.GetL2BlockRangeForL1(&bind.CallOpts{}, finalValidL1BlockNumber+1) + if err == nil { + t.Fatalf("GetL2BlockRangeForL1 didn't fail for an invalid input") + } + +} From 176d7be4df2e227799355451b56c969aaff67b60 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 13 Sep 2023 11:01:32 -0600 Subject: [PATCH 104/117] Make batch revert nextToCheck a field --- arbnode/batch_poster.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 8144c9b7be..89a36eba91 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -76,7 +76,8 @@ type BatchPoster struct { backlog uint64 lastHitL1Bounds time.Time // The last time we wanted to post a message but hit the L1 bounds - batchReverted atomic.Bool // indicates whether data poster batch was reverted + batchReverted atomic.Bool // indicates whether data poster batch was reverted + nextRevertCheckBlock int64 // the last parent block scanned for reverting batches } type l1BlockBound int @@ -263,13 +264,12 @@ func NewBatchPoster(dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderRe // contain reverted batch_poster transaction. // It returns true if it finds batch posting needs to halt, which is true if a batch reverts // unless the data poster is configured with noop storage which can tolerate reverts. -// From must be a pointer to the starting block, which is updated after each block is checked for reverts -func (b *BatchPoster) checkReverts(ctx context.Context, from *int64, to int64) (bool, error) { - if *from > to { - return false, fmt.Errorf("wrong range, from: %d > to: %d", from, to) +func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) { + if b.nextRevertCheckBlock > to { + return false, fmt.Errorf("wrong range, from: %d > to: %d", b.nextRevertCheckBlock, to) } - for ; *from <= to; *from++ { - number := big.NewInt(*from) + for ; b.nextRevertCheckBlock <= to; b.nextRevertCheckBlock++ { + number := big.NewInt(b.nextRevertCheckBlock) block, err := b.l1Reader.Client().BlockByNumber(ctx, number) if err != nil { return false, fmt.Errorf("getting block: %v by number: %w", number, err) @@ -305,7 +305,6 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) { headerCh, unsubscribe := b.l1Reader.Subscribe(false) defer unsubscribe() - nextToCheck := int64(0) // the first unchecked block for { // Poll until: // - L1 headers reader channel is closed, or @@ -317,19 +316,20 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) { log.Info("L1 headers channel checking for batch poster reverts has been closed") return } + blockNum := h.Number.Int64() // If this is the first block header, set last seen as number-1. // We may see same block number again if there is L1 reorg, in that // case we check the block again. - if nextToCheck == 0 || nextToCheck == h.Number.Int64() { - nextToCheck = h.Number.Int64() + if b.nextRevertCheckBlock == 0 || b.nextRevertCheckBlock > blockNum { + b.nextRevertCheckBlock = blockNum } - if h.Number.Int64()-nextToCheck > 100 { - log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", nextToCheck, "current", h.Number) - nextToCheck = h.Number.Int64() + if blockNum-b.nextRevertCheckBlock > 100 { + log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", b.nextRevertCheckBlock, "current", blockNum) + b.nextRevertCheckBlock = blockNum continue } - reverted, err := b.checkReverts(ctx, &nextToCheck, h.Number.Int64()) + reverted, err := b.checkReverts(ctx, blockNum) if err != nil { logLevel := log.Error if strings.Contains(err.Error(), "not found") { From 39480a0425ff76b4b743273004eb029f5d86aa51 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 13 Sep 2023 11:14:11 -0600 Subject: [PATCH 105/117] Reorder prechecker balance and conditional options checks --- arbnode/execution/tx_pre_checker.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arbnode/execution/tx_pre_checker.go b/arbnode/execution/tx_pre_checker.go index dc069a6d18..968a1f266b 100644 --- a/arbnode/execution/tx_pre_checker.go +++ b/arbnode/execution/tx_pre_checker.go @@ -145,11 +145,6 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty if config.Strictness < TxPreCheckerStrictnessLikelyCompatible { return nil } - balance := statedb.GetBalance(sender) - cost := tx.Cost() - if arbmath.BigLessThan(balance, cost) { - return fmt.Errorf("%w: address %v have %v want %v", core.ErrInsufficientFunds, sender, balance, cost) - } if options != nil { if err := options.Check(extraInfo.L1BlockNumber, header.Time, statedb); err != nil { conditionalTxRejectedByTxPreCheckerCurrentStateCounter.Inc(1) @@ -185,6 +180,11 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty conditionalTxAcceptedByTxPreCheckerOldStateCounter.Inc(1) } } + balance := statedb.GetBalance(sender) + cost := tx.Cost() + if arbmath.BigLessThan(balance, cost) { + return fmt.Errorf("%w: address %v have %v want %v", core.ErrInsufficientFunds, sender, balance, cost) + } if config.Strictness >= TxPreCheckerStrictnessFullValidation && tx.Nonce() > stateNonce { return MakeNonceError(sender, tx.Nonce(), stateNonce) } From bf0c01a1e98a846801288d1893f3a171ff96cb35 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 13 Sep 2023 15:27:04 -0500 Subject: [PATCH 106/117] fix failing test --- cmd/deploy/deploy.go | 5 ++++- system_tests/common_test.go | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 17725a7a4c..3e698749d5 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -14,10 +14,12 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/validator/server_common" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -127,7 +129,8 @@ func main() { panic(fmt.Errorf("failed to deserialize chain config: %w", err)) } - l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerReaderConfig }) + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerReaderConfig }, arbSys, true) if err != nil { panic(fmt.Errorf("failed to create header reader: %w", err)) } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index b92fbf7578..a643b1b719 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -476,7 +476,8 @@ func DeployOnTestL1( serializedChainConfig, err := json.Marshal(chainConfig) Require(t, err) - l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }) + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys, true) Require(t, err) l1Reader.Start(ctx) defer l1Reader.StopAndWait() From 6a9fd710cd6611ec6adcdadc56f8307afe13ecce Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 13 Sep 2023 19:42:33 -0500 Subject: [PATCH 107/117] revert impl --- arbnode/node.go | 2 +- cmd/daserver/daserver.go | 2 +- cmd/deploy/deploy.go | 2 +- cmd/nitro/nitro.go | 2 +- system_tests/common_test.go | 2 +- system_tests/das_test.go | 2 +- util/headerreader/header_reader.go | 11 +++++------ 7 files changed, 11 insertions(+), 12 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index a40c84fa94..5bdc716264 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -606,7 +606,7 @@ func createNodeImpl( var l1Reader *headerreader.HeaderReader if config.ParentChainReader.Enable { arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) - l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }, arbSys, true) + l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }, arbSys) if err != nil { return nil, err } diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 6b874f4639..335aba6a1b 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -199,7 +199,7 @@ func startup() error { return err } arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) - l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys, true) // TODO: config + l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys) // TODO: config if err != nil { return err } diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 3e698749d5..d687821e8b 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -130,7 +130,7 @@ func main() { } arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) - l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerReaderConfig }, arbSys, true) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerReaderConfig }, arbSys) if err != nil { panic(fmt.Errorf("failed to create header reader: %w", err)) } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index f2cb3a37d2..a7dc7f26f9 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -357,7 +357,7 @@ func mainImpl() int { log.Crit("--node.validator.only-create-wallet-contract requires --node.validator.use-smart-contract-wallet") } arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys, true) + l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) if err != nil { log.Crit("failed to get L1 headerreader", "error", err) } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index a643b1b719..9fd002bd94 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -477,7 +477,7 @@ func DeployOnTestL1( Require(t, err) arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) - l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys, true) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys) Require(t, err) l1Reader.Start(ctx) defer l1Reader.StopAndWait() diff --git a/system_tests/das_test.go b/system_tests/das_test.go index e79b993d03..8889d2d53d 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -235,7 +235,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { l1info, l1client, _, l1stack := createTestL1BlockChain(t, nil) defer requireClose(t, l1stack) arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) - l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys, true) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys) Require(t, err) l1Reader.Start(ctx) defer l1Reader.StopAndWait() diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index c7fa937385..befd54ace3 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -94,25 +94,24 @@ var TestConfig = Config{ UseFinalityData: false, } -func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher, arbSysPrecompile ArbSysInterface, usePrecompilesgen bool) (*HeaderReader, error) { +func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher, arbSysPrecompile ArbSysInterface) (*HeaderReader, error) { isParentChainArbitrum := false - if usePrecompilesgen { + var arbSys ArbSysInterface + if arbSysPrecompile != nil { codeAt, err := client.CodeAt(ctx, types.ArbSysAddress, nil) if err != nil { return nil, err } if len(codeAt) != 0 { isParentChainArbitrum = true - if arbSysPrecompile == nil { - return nil, errors.New("unable to create ArbSys from precompilesgen") - } + arbSys = arbSysPrecompile } } return &HeaderReader{ client: client, config: config, isParentChainArbitrum: isParentChainArbitrum, - arbSys: arbSysPrecompile, + arbSys: arbSys, outChannels: make(map[chan<- *types.Header]struct{}), outChannelsBehind: make(map[chan<- *types.Header]struct{}), safe: cachedHeader{rpcBlockNum: big.NewInt(rpc.SafeBlockNumber.Int64())}, From 3941ebc9e07bca3f5be9a9c3abe2816b8665312b Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 15 Sep 2023 14:02:42 -0500 Subject: [PATCH 108/117] fix overflow possibility --- nodeInterface/NodeInterface.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index 3b743dbb2d..92ed2064c3 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -623,7 +623,7 @@ func (n NodeInterface) GetL2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) var storedMidBlockL1Num uint64 for lowFirstBlock < highFirstBlock || lowLastBlock < highLastBlock { if lowFirstBlock < highFirstBlock { - mid := (lowFirstBlock + highFirstBlock) / 2 + mid := arbmath.SaturatingUAdd(lowFirstBlock, highFirstBlock) / 2 midBlockL1Num, err := n.getL1BlockNum(mid) if err != nil { return nil, err @@ -638,7 +638,7 @@ func (n NodeInterface) GetL2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) } if lowLastBlock < highLastBlock { // dont fetch midBlockL1Num if its already fetched above - mid := (lowLastBlock + highLastBlock) / 2 + mid := arbmath.SaturatingUAdd(lowLastBlock, highLastBlock) / 2 var midBlockL1Num uint64 var err error if mid == storedMid { From b5379b9eba7b847bdb64cdb6c0bb2640bbe86211 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 18 Sep 2023 16:06:02 -0500 Subject: [PATCH 109/117] address PR comments --- contracts | 2 +- nodeInterface/NodeInterface.go | 106 ++++++++++++++--------------- system_tests/nodeinterface_test.go | 43 ++++++------ 3 files changed, 77 insertions(+), 74 deletions(-) diff --git a/contracts b/contracts index accdcee457..436e1cf82c 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit accdcee45798af5025836a04ee5bdcb0669cb476 +Subproject commit 436e1cf82c5696eb918d842256328ba86fbe5019 diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index 92ed2064c3..98394f9343 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -591,82 +591,82 @@ func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum h return } -func (n NodeInterface) getL1BlockNum(l2BlockNum uint64) (uint64, error) { +func (n NodeInterface) blockL1Num(l2BlockNum uint64) (uint64, error) { blockHeader, err := n.backend.HeaderByNumber(n.context, rpc.BlockNumber(l2BlockNum)) if err != nil { return 0, err } - l1BlockNum := types.DeserializeHeaderExtraInformation(blockHeader).L1BlockNumber - return l1BlockNum, nil + blockL1Num := types.DeserializeHeaderExtraInformation(blockHeader).L1BlockNumber + return blockL1Num, nil } -func (n NodeInterface) GetL2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) ([]uint64, error) { +func (n NodeInterface) matchL2BlockNumWithL1(l2BlockNum uint64, l1BlockNum uint64) error { + blockL1Num, err := n.blockL1Num(l2BlockNum) + if err != nil { + return fmt.Errorf("failed to get the L1 block number of the L2 block: %v. Error: %w", l2BlockNum, err) + } + if blockL1Num != l1BlockNum { + return fmt.Errorf("no L2 block was found with the given L1 block number. Found L2 block: %v with L1 block number: %v, given L1 block number: %v", l2BlockNum, blockL1Num, l1BlockNum) + } + return nil +} + +// L2BlockRangeForL1 finds the first and last L2 block numbers that have the given L1 block number +func (n NodeInterface) L2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) (uint64, uint64, error) { currentBlockNum := n.backend.CurrentBlock().Number.Uint64() genesis := n.backend.ChainConfig().ArbitrumChainParams.GenesisBlockNum - checkCorrectness := func(blockNum uint64, target uint64) error { - blockL1Num, err := n.getL1BlockNum(blockNum) - if err != nil { - return err - } - if blockL1Num != target { - return errors.New("no L2 block was found with the given L1 block number") - } - return nil + type helperStruct struct { + low uint64 + high uint64 } - lowFirstBlock := genesis - highFirstBlock := currentBlockNum - lowLastBlock := genesis - highLastBlock := currentBlockNum - var storedMid uint64 - var storedMidBlockL1Num uint64 - for lowFirstBlock < highFirstBlock || lowLastBlock < highLastBlock { - if lowFirstBlock < highFirstBlock { - mid := arbmath.SaturatingUAdd(lowFirstBlock, highFirstBlock) / 2 - midBlockL1Num, err := n.getL1BlockNum(mid) - if err != nil { - return nil, err - } - storedMid = mid - storedMidBlockL1Num = midBlockL1Num - if midBlockL1Num < l1BlockNum { - lowFirstBlock = mid + 1 - } else { - highFirstBlock = mid - } - } - if lowLastBlock < highLastBlock { + searchHelper := func(currentBlock *helperStruct, fetchedMid *helperStruct, target uint64) error { + if currentBlock.low < currentBlock.high { // dont fetch midBlockL1Num if its already fetched above - mid := arbmath.SaturatingUAdd(lowLastBlock, highLastBlock) / 2 + mid := arbmath.SaturatingUAdd(currentBlock.low, currentBlock.high) / 2 var midBlockL1Num uint64 var err error - if mid == storedMid { - midBlockL1Num = storedMidBlockL1Num + if mid == fetchedMid.low { + midBlockL1Num = fetchedMid.high } else { - midBlockL1Num, err = n.getL1BlockNum(mid) + midBlockL1Num, err = n.blockL1Num(mid) if err != nil { - return nil, err + return err } + fetchedMid.low = mid + fetchedMid.high = midBlockL1Num } - if midBlockL1Num < l1BlockNum+1 { - lowLastBlock = mid + 1 + if midBlockL1Num < target { + currentBlock.low = mid + 1 } else { - highLastBlock = mid + currentBlock.high = mid } + return nil } + return nil } - err := checkCorrectness(highFirstBlock, l1BlockNum) - if err != nil { - return nil, err + firstBlock := &helperStruct{low: genesis, high: currentBlockNum} + lastBlock := &helperStruct{low: genesis, high: currentBlockNum} + // in storedMid low corresponds to value mid and high corresponds to midBlockL1Num inside searchHelper + storedMid := &helperStruct{low: currentBlockNum + 1} + var err error + for firstBlock.low < firstBlock.high || lastBlock.low < lastBlock.high { + if err = searchHelper(firstBlock, storedMid, l1BlockNum); err != nil { + return 0, 0, err + } + if err = searchHelper(lastBlock, storedMid, l1BlockNum+1); err != nil { + return 0, 0, err + } } - err = checkCorrectness(highLastBlock, l1BlockNum) - if err != nil { - highLastBlock -= 1 - err = checkCorrectness(highLastBlock, l1BlockNum) - if err != nil { - return nil, err + if err := n.matchL2BlockNumWithL1(firstBlock.high, l1BlockNum); err != nil { + return 0, 0, err + } + if err := n.matchL2BlockNumWithL1(lastBlock.high, l1BlockNum); err != nil { + lastBlock.high -= 1 + if err = n.matchL2BlockNumWithL1(lastBlock.high, l1BlockNum); err != nil { + return 0, 0, err } } - return []uint64{highFirstBlock, highLastBlock}, nil + return firstBlock.high, lastBlock.high, nil } diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index 266b50d6c8..bfdff3d02d 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -10,19 +10,11 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" ) -func getL1BlockNum(t *testing.T, ctx context.Context, client *ethclient.Client, l2BlockNum uint64) uint64 { - header, err := client.HeaderByNumber(ctx, big.NewInt(int64(l2BlockNum))) - Require(t, err) - l1BlockNum := types.DeserializeHeaderExtraInformation(header).L1BlockNumber - return l1BlockNum -} - -func TestGetL2BlockRangeForL1(t *testing.T) { +func TestL2BlockRangeForL1(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -32,7 +24,7 @@ func TestGetL2BlockRangeForL1(t *testing.T) { defer node.StopAndWait() user := l1info.GetDefaultTransactOpts("User", ctx) - numTransactions := 30 + numTransactions := 200 for i := 0; i < numTransactions; i++ { TransferBalanceTo(t, "Owner", util.RemapL1Address(user.From), big.NewInt(1e18), l2info, l2client, ctx) } @@ -40,31 +32,42 @@ func TestGetL2BlockRangeForL1(t *testing.T) { nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2client) Require(t, err) + getBlockL1Num := func(l2BlockNum uint64) uint64 { + header, err := l2client.HeaderByNumber(ctx, big.NewInt(int64(l2BlockNum))) + Require(t, err) + l1BlockNum := types.DeserializeHeaderExtraInformation(header).L1BlockNumber + return l1BlockNum + } + l1BlockNums := map[uint64][]uint64{} latestL2, err := l2client.BlockNumber(ctx) Require(t, err) for l2BlockNum := uint64(0); l2BlockNum <= latestL2; l2BlockNum++ { - l1BlockNum := getL1BlockNum(t, ctx, l2client, l2BlockNum) - l1BlockNums[l1BlockNum] = append(l1BlockNums[l1BlockNum], l2BlockNum) + l1BlockNum := getBlockL1Num(l2BlockNum) + if len(l1BlockNums[l1BlockNum]) <= 1 { + l1BlockNums[l1BlockNum] = append(l1BlockNums[l1BlockNum], l2BlockNum) + } else { + l1BlockNums[l1BlockNum][1] = l2BlockNum + } } // Test success for l1BlockNum := range l1BlockNums { - rng, err := nodeInterface.GetL2BlockRangeForL1(&bind.CallOpts{}, l1BlockNum) + rng, err := nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, l1BlockNum) Require(t, err) n := len(l1BlockNums[l1BlockNum]) expected := []uint64{l1BlockNums[l1BlockNum][0], l1BlockNums[l1BlockNum][n-1]} - if expected[0] != rng[0] || expected[1] != rng[1] { - unexpectedL1BlockNum := getL1BlockNum(t, ctx, l2client, rng[1]) - // handle the edge case when new l2 blocks are produced between latestL2 was last calculated and now - if unexpectedL1BlockNum != l1BlockNum { - t.Fatalf("GetL2BlockRangeForL1 failed to get a valid range for L1 block number: %v. Given range: %v. Expected range: %v", l1BlockNum, rng, expected) + if expected[0] != rng.FirstBlock || expected[1] != rng.LastBlock { + unexpectedL1BlockNum := getBlockL1Num(rng.LastBlock) + // Handle the edge case when new l2 blocks are produced between latestL2 was last calculated and now. + if unexpectedL1BlockNum != l1BlockNum || rng.LastBlock < expected[1] { + t.Errorf("L2BlockRangeForL1(%d) = (%d %d) want (%d %d)", l1BlockNum, rng.FirstBlock, rng.LastBlock, expected[0], expected[1]) } } } // Test invalid case - finalValidL1BlockNumber := getL1BlockNum(t, ctx, l2client, latestL2) - _, err = nodeInterface.GetL2BlockRangeForL1(&bind.CallOpts{}, finalValidL1BlockNumber+1) + finalValidL1BlockNumber := getBlockL1Num(latestL2) + _, err = nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, finalValidL1BlockNumber+1) if err == nil { t.Fatalf("GetL2BlockRangeForL1 didn't fail for an invalid input") } From 2aa15ad45eaf2f8390e12c75611cf3e29cd16bf0 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 19 Sep 2023 15:44:00 -0600 Subject: [PATCH 110/117] Add an optional limit to the maximum size of the relay catchup buffer --- broadcaster/broadcaster.go | 2 +- broadcaster/sequencenumbercatchupbuffer.go | 29 +++++++++--- .../sequencenumbercatchupbuffer_test.go | 47 +++++++++++++++++++ wsbroadcastserver/wsbroadcastserver.go | 4 ++ 4 files changed, 75 insertions(+), 7 deletions(-) diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index bde80c93d1..c3f4c62ce0 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -61,7 +61,7 @@ type ConfirmedSequenceNumberMessage struct { } func NewBroadcaster(config wsbroadcastserver.BroadcasterConfigFetcher, chainId uint64, feedErrChan chan error, dataSigner signature.DataSignerFunc) *Broadcaster { - catchupBuffer := NewSequenceNumberCatchupBuffer(func() bool { return config().LimitCatchup }) + catchupBuffer := NewSequenceNumberCatchupBuffer(func() bool { return config().LimitCatchup }, func() int { return config().MaxCatchup }) return &Broadcaster{ server: wsbroadcastserver.NewWSBroadcastServer(config, catchupBuffer, chainId, feedErrChan), catchupBuffer: catchupBuffer, diff --git a/broadcaster/sequencenumbercatchupbuffer.go b/broadcaster/sequencenumbercatchupbuffer.go index 7664f1b8da..bdd3e60c5b 100644 --- a/broadcaster/sequencenumbercatchupbuffer.go +++ b/broadcaster/sequencenumbercatchupbuffer.go @@ -29,11 +29,13 @@ type SequenceNumberCatchupBuffer struct { messages []*BroadcastFeedMessage messageCount int32 limitCatchup func() bool + maxCatchup func() int } -func NewSequenceNumberCatchupBuffer(limitCatchup func() bool) *SequenceNumberCatchupBuffer { +func NewSequenceNumberCatchupBuffer(limitCatchup func() bool, maxCatchup func() int) *SequenceNumberCatchupBuffer { return &SequenceNumberCatchupBuffer{ limitCatchup: limitCatchup, + maxCatchup: maxCatchup, } } @@ -98,6 +100,15 @@ func (b *SequenceNumberCatchupBuffer) OnRegisterClient(clientConnection *wsbroad return nil, bmCount, time.Since(start) } +// Takes as input an index into the messages array, not a message index +func (b *SequenceNumberCatchupBuffer) pruneBufferToIndex(idx int) { + b.messages = b.messages[idx:] + if len(b.messages) > 10 && cap(b.messages) > len(b.messages)*10 { + // Too much spare capacity, copy to fresh slice to reset memory usage + b.messages = append([]*BroadcastFeedMessage(nil), b.messages[:len(b.messages)]...) + } +} + func (b *SequenceNumberCatchupBuffer) deleteConfirmed(confirmedSequenceNumber arbutil.MessageIndex) { if len(b.messages) == 0 { return @@ -126,11 +137,7 @@ func (b *SequenceNumberCatchupBuffer) deleteConfirmed(confirmedSequenceNumber ar return } - b.messages = b.messages[confirmedIndex+1:] - if len(b.messages) > 10 && cap(b.messages) > len(b.messages)*10 { - // Too much spare capacity, copy to fresh slice to reset memory usage - b.messages = append([]*BroadcastFeedMessage(nil), b.messages[:len(b.messages)]...) - } + b.pruneBufferToIndex(int(confirmedIndex) + 1) } func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { @@ -147,6 +154,12 @@ func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { confirmedSequenceNumberGauge.Update(int64(confirmMsg.SequenceNumber)) } + maxCatchup := b.maxCatchup() + if maxCatchup == 0 { + b.messages = nil + return nil + } + for _, newMsg := range broadcastMessage.Messages { if len(b.messages) == 0 { // Add to empty list @@ -167,6 +180,10 @@ func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { } } + if maxCatchup >= 0 && len(b.messages) > maxCatchup { + b.pruneBufferToIndex(len(b.messages) - maxCatchup) + } + return nil } diff --git a/broadcaster/sequencenumbercatchupbuffer_test.go b/broadcaster/sequencenumbercatchupbuffer_test.go index 40fae9875f..17d850d9eb 100644 --- a/broadcaster/sequencenumbercatchupbuffer_test.go +++ b/broadcaster/sequencenumbercatchupbuffer_test.go @@ -22,6 +22,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/util/arbmath" ) func TestGetEmptyCacheMessages(t *testing.T) { @@ -29,6 +30,7 @@ func TestGetEmptyCacheMessages(t *testing.T) { messages: nil, messageCount: 0, limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Get everything @@ -60,6 +62,7 @@ func TestGetCacheMessages(t *testing.T) { messages: createDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Get everything @@ -110,6 +113,7 @@ func TestDeleteConfirmedNil(t *testing.T) { messages: nil, messageCount: 0, limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } buffer.deleteConfirmed(0) @@ -124,6 +128,7 @@ func TestDeleteConfirmInvalidOrder(t *testing.T) { messages: createDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Confirm before cache @@ -139,6 +144,7 @@ func TestDeleteConfirmed(t *testing.T) { messages: createDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Confirm older than cache @@ -154,6 +160,7 @@ func TestDeleteFreeMem(t *testing.T) { messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Confirm older than cache @@ -169,6 +176,7 @@ func TestBroadcastBadMessage(t *testing.T) { messages: nil, messageCount: 0, limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } var foo int @@ -187,6 +195,7 @@ func TestBroadcastPastSeqNum(t *testing.T) { messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } bm := BroadcastMessage{ @@ -208,6 +217,8 @@ func TestBroadcastFutureSeqNum(t *testing.T) { buffer := SequenceNumberCatchupBuffer{ messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), + limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } bm := BroadcastMessage{ @@ -223,3 +234,39 @@ func TestBroadcastFutureSeqNum(t *testing.T) { } } + +func TestMaxCatchupBufferSize(t *testing.T) { + limit := 5 + buffer := SequenceNumberCatchupBuffer{ + messages: nil, + messageCount: 0, + limitCatchup: func() bool { return false }, + maxCatchup: func() int { return limit }, + } + + for i := 10; i <= 20; i += 2 { + bm := BroadcastMessage{ + Messages: []*BroadcastFeedMessage{ + { + SequenceNumber: arbutil.MessageIndex(i), + }, + { + SequenceNumber: arbutil.MessageIndex(i + 1), + }, + }, + } + err := buffer.OnDoBroadcast(bm) + if err != nil { + t.Error("expected error") + } + haveMessages := buffer.getCacheMessages(0) + expectedCount := arbmath.MinInt(i+2-10, limit) + if len(haveMessages.Messages) != expectedCount { + t.Errorf("after broadcasting messages %v and %v, expected to have %v messages but got %v", i, i+1, expectedCount, len(haveMessages.Messages)) + } + expectedFirstMessage := arbutil.MessageIndex(arbmath.MaxInt(10, i+2-limit)) + if haveMessages.Messages[0].SequenceNumber != expectedFirstMessage { + t.Errorf("after broadcasting messages %v and %v, expected the first message to be %v but got %v", i, i+1, expectedFirstMessage, haveMessages.Messages[0].SequenceNumber) + } + } +} diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index 014995cee0..cd277387a0 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -60,6 +60,7 @@ type BroadcasterConfig struct { EnableCompression bool `koanf:"enable-compression" reload:"hot"` // if reloaded to false will cause disconnection of clients with enabled compression on next broadcast RequireCompression bool `koanf:"require-compression" reload:"hot"` // if reloaded to true will cause disconnection of clients with disabled compression on next broadcast LimitCatchup bool `koanf:"limit-catchup" reload:"hot"` + MaxCatchup int `koanf:"max-catchup" reload:"hot"` ConnectionLimits ConnectionLimiterConfig `koanf:"connection-limits" reload:"hot"` ClientDelay time.Duration `koanf:"client-delay" reload:"hot"` } @@ -93,6 +94,7 @@ func BroadcasterConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable-compression", DefaultBroadcasterConfig.EnableCompression, "enable per message deflate compression support") f.Bool(prefix+".require-compression", DefaultBroadcasterConfig.RequireCompression, "require clients to use compression") f.Bool(prefix+".limit-catchup", DefaultBroadcasterConfig.LimitCatchup, "only supply catchup buffer if requested sequence number is reasonable") + f.Int(prefix+".max-catchup", DefaultBroadcasterConfig.MaxCatchup, "the maximum size of the catchup buffer (-1 means unlimited)") ConnectionLimiterConfigAddOptions(prefix+".connection-limits", f) f.Duration(prefix+".client-delay", DefaultBroadcasterConfig.ClientDelay, "delay the first messages sent to each client by this amount") } @@ -117,6 +119,7 @@ var DefaultBroadcasterConfig = BroadcasterConfig{ EnableCompression: true, RequireCompression: false, LimitCatchup: false, + MaxCatchup: -1, ConnectionLimits: DefaultConnectionLimiterConfig, ClientDelay: 0, } @@ -141,6 +144,7 @@ var DefaultTestBroadcasterConfig = BroadcasterConfig{ EnableCompression: true, RequireCompression: false, LimitCatchup: false, + MaxCatchup: -1, ConnectionLimits: DefaultConnectionLimiterConfig, ClientDelay: 0, } From 724e82aa8d771f7f4f7f4de94175c029d3e95ca1 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 19 Sep 2023 15:45:05 -0600 Subject: [PATCH 111/117] Make constants in test clearer --- broadcaster/sequencenumbercatchupbuffer_test.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/broadcaster/sequencenumbercatchupbuffer_test.go b/broadcaster/sequencenumbercatchupbuffer_test.go index 17d850d9eb..fc6655057e 100644 --- a/broadcaster/sequencenumbercatchupbuffer_test.go +++ b/broadcaster/sequencenumbercatchupbuffer_test.go @@ -244,7 +244,8 @@ func TestMaxCatchupBufferSize(t *testing.T) { maxCatchup: func() int { return limit }, } - for i := 10; i <= 20; i += 2 { + firstMessage := 10 + for i := firstMessage; i <= 20; i += 2 { bm := BroadcastMessage{ Messages: []*BroadcastFeedMessage{ { @@ -256,15 +257,13 @@ func TestMaxCatchupBufferSize(t *testing.T) { }, } err := buffer.OnDoBroadcast(bm) - if err != nil { - t.Error("expected error") - } + Require(t, err) haveMessages := buffer.getCacheMessages(0) - expectedCount := arbmath.MinInt(i+2-10, limit) + expectedCount := arbmath.MinInt(i+len(bm.Messages)-firstMessage, limit) if len(haveMessages.Messages) != expectedCount { t.Errorf("after broadcasting messages %v and %v, expected to have %v messages but got %v", i, i+1, expectedCount, len(haveMessages.Messages)) } - expectedFirstMessage := arbutil.MessageIndex(arbmath.MaxInt(10, i+2-limit)) + expectedFirstMessage := arbutil.MessageIndex(arbmath.MaxInt(firstMessage, i+len(bm.Messages)-limit)) if haveMessages.Messages[0].SequenceNumber != expectedFirstMessage { t.Errorf("after broadcasting messages %v and %v, expected the first message to be %v but got %v", i, i+1, expectedFirstMessage, haveMessages.Messages[0].SequenceNumber) } From 9b2788f8dfd6f915f27b2f10323f809d8d34b477 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 20 Sep 2023 11:05:34 -0500 Subject: [PATCH 112/117] address PR comments --- contracts | 2 +- nodeInterface/NodeInterface.go | 75 ++++++++++++------------------ system_tests/nodeinterface_test.go | 34 ++++++-------- 3 files changed, 46 insertions(+), 65 deletions(-) diff --git a/contracts b/contracts index 436e1cf82c..9edc1b943e 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 436e1cf82c5696eb918d842256328ba86fbe5019 +Subproject commit 9edc1b943ed0255f050f91f265d96bc1ad9de1a2 diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index 98394f9343..e990383a3b 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -591,7 +591,7 @@ func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum h return } -func (n NodeInterface) blockL1Num(l2BlockNum uint64) (uint64, error) { +func (n NodeInterface) BlockL1Num(c ctx, evm mech, l2BlockNum uint64) (uint64, error) { blockHeader, err := n.backend.HeaderByNumber(n.context, rpc.BlockNumber(l2BlockNum)) if err != nil { return 0, err @@ -600,8 +600,8 @@ func (n NodeInterface) blockL1Num(l2BlockNum uint64) (uint64, error) { return blockL1Num, nil } -func (n NodeInterface) matchL2BlockNumWithL1(l2BlockNum uint64, l1BlockNum uint64) error { - blockL1Num, err := n.blockL1Num(l2BlockNum) +func (n NodeInterface) matchL2BlockNumWithL1(c ctx, evm mech, l2BlockNum uint64, l1BlockNum uint64) error { + blockL1Num, err := n.BlockL1Num(c, evm, l2BlockNum) if err != nil { return fmt.Errorf("failed to get the L1 block number of the L2 block: %v. Error: %w", l2BlockNum, err) } @@ -616,57 +616,44 @@ func (n NodeInterface) L2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) (ui currentBlockNum := n.backend.CurrentBlock().Number.Uint64() genesis := n.backend.ChainConfig().ArbitrumChainParams.GenesisBlockNum - type helperStruct struct { - low uint64 - high uint64 - } - - searchHelper := func(currentBlock *helperStruct, fetchedMid *helperStruct, target uint64) error { - if currentBlock.low < currentBlock.high { - // dont fetch midBlockL1Num if its already fetched above - mid := arbmath.SaturatingUAdd(currentBlock.low, currentBlock.high) / 2 - var midBlockL1Num uint64 - var err error - if mid == fetchedMid.low { - midBlockL1Num = fetchedMid.high - } else { - midBlockL1Num, err = n.blockL1Num(mid) + storedMids := map[uint64]uint64{} + firstL2BlockForL1 := func(target uint64) (uint64, error) { + low, high := genesis, currentBlockNum + for low < high { + mid := arbmath.SaturatingUAdd(low, high) / 2 + if _, ok := storedMids[mid]; !ok { + midBlockL1Num, err := n.BlockL1Num(c, evm, mid) if err != nil { - return err + return 0, err } - fetchedMid.low = mid - fetchedMid.high = midBlockL1Num + storedMids[mid] = midBlockL1Num } - if midBlockL1Num < target { - currentBlock.low = mid + 1 + if storedMids[mid] < target { + low = mid + 1 } else { - currentBlock.high = mid + high = mid } - return nil - } - return nil - } - firstBlock := &helperStruct{low: genesis, high: currentBlockNum} - lastBlock := &helperStruct{low: genesis, high: currentBlockNum} - // in storedMid low corresponds to value mid and high corresponds to midBlockL1Num inside searchHelper - storedMid := &helperStruct{low: currentBlockNum + 1} - var err error - for firstBlock.low < firstBlock.high || lastBlock.low < lastBlock.high { - if err = searchHelper(firstBlock, storedMid, l1BlockNum); err != nil { - return 0, 0, err - } - if err = searchHelper(lastBlock, storedMid, l1BlockNum+1); err != nil { - return 0, 0, err } + return high, nil } - if err := n.matchL2BlockNumWithL1(firstBlock.high, l1BlockNum); err != nil { + + firstBlock, err := firstL2BlockForL1(l1BlockNum) + if err != nil { + return 0, 0, err + } + lastBlock, err := firstL2BlockForL1(l1BlockNum + 1) + if err != nil { + return 0, 0, err + } + + if err := n.matchL2BlockNumWithL1(c, evm, firstBlock, l1BlockNum); err != nil { return 0, 0, err } - if err := n.matchL2BlockNumWithL1(lastBlock.high, l1BlockNum); err != nil { - lastBlock.high -= 1 - if err = n.matchL2BlockNumWithL1(lastBlock.high, l1BlockNum); err != nil { + if err := n.matchL2BlockNumWithL1(c, evm, lastBlock, l1BlockNum); err != nil { + lastBlock -= 1 + if err = n.matchL2BlockNumWithL1(c, evm, lastBlock, l1BlockNum); err != nil { return 0, 0, err } } - return firstBlock.high, lastBlock.high, nil + return firstBlock, lastBlock, nil } diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index bfdff3d02d..3389dda7c9 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -32,22 +32,16 @@ func TestL2BlockRangeForL1(t *testing.T) { nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2client) Require(t, err) - getBlockL1Num := func(l2BlockNum uint64) uint64 { - header, err := l2client.HeaderByNumber(ctx, big.NewInt(int64(l2BlockNum))) - Require(t, err) - l1BlockNum := types.DeserializeHeaderExtraInformation(header).L1BlockNumber - return l1BlockNum - } - - l1BlockNums := map[uint64][]uint64{} + l1BlockNums := map[uint64][2]uint64{} latestL2, err := l2client.BlockNumber(ctx) Require(t, err) for l2BlockNum := uint64(0); l2BlockNum <= latestL2; l2BlockNum++ { - l1BlockNum := getBlockL1Num(l2BlockNum) - if len(l1BlockNums[l1BlockNum]) <= 1 { - l1BlockNums[l1BlockNum] = append(l1BlockNums[l1BlockNum], l2BlockNum) + l1BlockNum, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, l2BlockNum) + Require(t, err) + if _, ok := l1BlockNums[l1BlockNum]; !ok { + l1BlockNums[l1BlockNum] = [2]uint64{l2BlockNum, l2BlockNum} } else { - l1BlockNums[l1BlockNum][1] = l2BlockNum + l1BlockNums[l1BlockNum] = [2]uint64{l1BlockNums[l1BlockNum][0], l2BlockNum} } } @@ -55,20 +49,20 @@ func TestL2BlockRangeForL1(t *testing.T) { for l1BlockNum := range l1BlockNums { rng, err := nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, l1BlockNum) Require(t, err) - n := len(l1BlockNums[l1BlockNum]) - expected := []uint64{l1BlockNums[l1BlockNum][0], l1BlockNums[l1BlockNum][n-1]} - if expected[0] != rng.FirstBlock || expected[1] != rng.LastBlock { - unexpectedL1BlockNum := getBlockL1Num(rng.LastBlock) + expected := l1BlockNums[l1BlockNum] + if rng.FirstBlock != expected[0] || rng.LastBlock != expected[1] { + unexpectedL1BlockNum, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, rng.LastBlock) + Require(t, err) // Handle the edge case when new l2 blocks are produced between latestL2 was last calculated and now. - if unexpectedL1BlockNum != l1BlockNum || rng.LastBlock < expected[1] { + if unexpectedL1BlockNum != l1BlockNum || rng.LastBlock < expected[1] || rng.FirstBlock != expected[0] { t.Errorf("L2BlockRangeForL1(%d) = (%d %d) want (%d %d)", l1BlockNum, rng.FirstBlock, rng.LastBlock, expected[0], expected[1]) } } } // Test invalid case - finalValidL1BlockNumber := getBlockL1Num(latestL2) - _, err = nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, finalValidL1BlockNumber+1) - if err == nil { + finalValidL1BlockNumber, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, latestL2) + Require(t, err) + if _, err := nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, finalValidL1BlockNumber+1); err == nil { t.Fatalf("GetL2BlockRangeForL1 didn't fail for an invalid input") } From 0979ae8bcc6497537bec0bcd3f793ae41f2035f2 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 20 Sep 2023 17:07:14 -0700 Subject: [PATCH 113/117] Add DAS error when 1 error away from failure --- das/aggregator.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/das/aggregator.go b/das/aggregator.go index 3b34f12767..e8cc0a3c25 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -290,6 +290,10 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, cd.aggSignersMask = aggSignersMask certDetailsChan <- cd returned = true + if a.maxAllowedServiceStoreFailures > 0 && // Ignore the case where AssumedHonest = 1, probably a testnet + storeFailures+1 > a.maxAllowedServiceStoreFailures { + log.Error("das.Aggregator: storing the batch data succeeded to enough DAS commitee members to generate the Data Availability Cert, but if one more had failed then the cert would not have been able to be generated. Look for preceding logs with \"Error from backend\"") + } } else if storeFailures > a.maxAllowedServiceStoreFailures { cd := certDetails{} cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, BatchToDasFailed) From 994d0b5b9a640e8cc745c39a24373a5127f867d7 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Thu, 21 Sep 2023 09:17:47 -0500 Subject: [PATCH 114/117] code refactor --- nodeInterface/NodeInterface.go | 4 ++-- system_tests/nodeinterface_test.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index e990383a3b..2a45ac033d 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -639,11 +639,11 @@ func (n NodeInterface) L2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) (ui firstBlock, err := firstL2BlockForL1(l1BlockNum) if err != nil { - return 0, 0, err + return 0, 0, fmt.Errorf("failed to get the first L2 block with the L1 block: %v. Error: %w", l1BlockNum, err) } lastBlock, err := firstL2BlockForL1(l1BlockNum + 1) if err != nil { - return 0, 0, err + return 0, 0, fmt.Errorf("failed to get the last L2 block with the L1 block: %v. Error: %w", l1BlockNum, err) } if err := n.matchL2BlockNumWithL1(c, evm, firstBlock, l1BlockNum); err != nil { diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index 3389dda7c9..63b3d7bb7b 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -32,16 +32,16 @@ func TestL2BlockRangeForL1(t *testing.T) { nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2client) Require(t, err) - l1BlockNums := map[uint64][2]uint64{} + l1BlockNums := map[uint64]*[2]uint64{} latestL2, err := l2client.BlockNumber(ctx) Require(t, err) for l2BlockNum := uint64(0); l2BlockNum <= latestL2; l2BlockNum++ { l1BlockNum, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, l2BlockNum) Require(t, err) if _, ok := l1BlockNums[l1BlockNum]; !ok { - l1BlockNums[l1BlockNum] = [2]uint64{l2BlockNum, l2BlockNum} + l1BlockNums[l1BlockNum] = &[2]uint64{l2BlockNum, l2BlockNum} } else { - l1BlockNums[l1BlockNum] = [2]uint64{l1BlockNums[l1BlockNum][0], l2BlockNum} + l1BlockNums[l1BlockNum][1] = l2BlockNum } } From cb10050cbbee58b228055c1454744670121613fb Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 22 Sep 2023 09:33:45 -0500 Subject: [PATCH 115/117] code refactor --- nodeInterface/NodeInterface.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index 2a45ac033d..f114cd5ac9 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -591,6 +591,7 @@ func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum h return } +// L2BlockRangeForL1 fetches the L1 block number of a given l2 block number. func (n NodeInterface) BlockL1Num(c ctx, evm mech, l2BlockNum uint64) (uint64, error) { blockHeader, err := n.backend.HeaderByNumber(n.context, rpc.BlockNumber(l2BlockNum)) if err != nil { @@ -619,6 +620,13 @@ func (n NodeInterface) L2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) (ui storedMids := map[uint64]uint64{} firstL2BlockForL1 := func(target uint64) (uint64, error) { low, high := genesis, currentBlockNum + highBlockL1Num, err := n.BlockL1Num(c, evm, high) + if err != nil { + return 0, err + } + if highBlockL1Num < target { + return high + 1, nil + } for low < high { mid := arbmath.SaturatingUAdd(low, high) / 2 if _, ok := storedMids[mid]; !ok { @@ -649,11 +657,9 @@ func (n NodeInterface) L2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) (ui if err := n.matchL2BlockNumWithL1(c, evm, firstBlock, l1BlockNum); err != nil { return 0, 0, err } - if err := n.matchL2BlockNumWithL1(c, evm, lastBlock, l1BlockNum); err != nil { - lastBlock -= 1 - if err = n.matchL2BlockNumWithL1(c, evm, lastBlock, l1BlockNum); err != nil { - return 0, 0, err - } + lastBlock -= 1 + if err = n.matchL2BlockNumWithL1(c, evm, lastBlock, l1BlockNum); err != nil { + return 0, 0, err } return firstBlock, lastBlock, nil } From 5bd76f333be3fdd54fd48db02723f8b84726daf1 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 25 Sep 2023 10:13:48 -0500 Subject: [PATCH 116/117] add documentation --- nodeInterface/NodeInterface.go | 1 + 1 file changed, 1 insertion(+) diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index f114cd5ac9..6984255393 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -592,6 +592,7 @@ func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum h } // L2BlockRangeForL1 fetches the L1 block number of a given l2 block number. +// c ctx and evm mech arguments are not used but supplied to match the precompile function type in NodeInterface contract func (n NodeInterface) BlockL1Num(c ctx, evm mech, l2BlockNum uint64) (uint64, error) { blockHeader, err := n.backend.HeaderByNumber(n.context, rpc.BlockNumber(l2BlockNum)) if err != nil { From 000a97b49f842356211d6958c304fbc657266fb2 Mon Sep 17 00:00:00 2001 From: anodar Date: Tue, 26 Sep 2023 16:24:22 +0200 Subject: [PATCH 117/117] Fix nil pointer dereference in BlockNum precompile --- nodeInterface/NodeInterface.go | 3 +++ system_tests/nodeinterface_test.go | 32 ++++++++++++++++++------------ 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index 6984255393..f13f8ce6c0 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -598,6 +598,9 @@ func (n NodeInterface) BlockL1Num(c ctx, evm mech, l2BlockNum uint64) (uint64, e if err != nil { return 0, err } + if blockHeader == nil { + return 0, fmt.Errorf("nil header for l2 block: %d", l2BlockNum) + } blockL1Num := types.DeserializeHeaderExtraInformation(blockHeader).L1BlockNumber return blockL1Num, nil } diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index 63b3d7bb7b..167f2204cd 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -30,40 +30,46 @@ func TestL2BlockRangeForL1(t *testing.T) { } nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2client) - Require(t, err) + if err != nil { + t.Fatalf("Error creating node interface: %v", err) + } l1BlockNums := map[uint64]*[2]uint64{} latestL2, err := l2client.BlockNumber(ctx) - Require(t, err) + if err != nil { + t.Fatalf("Error querying most recent l2 block: %v", err) + } for l2BlockNum := uint64(0); l2BlockNum <= latestL2; l2BlockNum++ { l1BlockNum, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, l2BlockNum) - Require(t, err) + if err != nil { + t.Fatalf("Error quering l1 block number for l2 block: %d, error: %v", l2BlockNum, err) + } if _, ok := l1BlockNums[l1BlockNum]; !ok { l1BlockNums[l1BlockNum] = &[2]uint64{l2BlockNum, l2BlockNum} - } else { - l1BlockNums[l1BlockNum][1] = l2BlockNum } + l1BlockNums[l1BlockNum][1] = l2BlockNum } - // Test success + // Test success. for l1BlockNum := range l1BlockNums { rng, err := nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, l1BlockNum) - Require(t, err) + if err != nil { + t.Fatalf("Error getting l2 block range for l1 block: %d, error: %v", l1BlockNum, err) + } expected := l1BlockNums[l1BlockNum] if rng.FirstBlock != expected[0] || rng.LastBlock != expected[1] { unexpectedL1BlockNum, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, rng.LastBlock) - Require(t, err) + if err != nil { + t.Fatalf("Error quering l1 block number for l2 block: %d, error: %v", rng.LastBlock, err) + } // Handle the edge case when new l2 blocks are produced between latestL2 was last calculated and now. if unexpectedL1BlockNum != l1BlockNum || rng.LastBlock < expected[1] || rng.FirstBlock != expected[0] { t.Errorf("L2BlockRangeForL1(%d) = (%d %d) want (%d %d)", l1BlockNum, rng.FirstBlock, rng.LastBlock, expected[0], expected[1]) } } } - // Test invalid case - finalValidL1BlockNumber, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, latestL2) - Require(t, err) - if _, err := nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, finalValidL1BlockNumber+1); err == nil { + // Test invalid case. + if _, err := nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, 1e5); err == nil { t.Fatalf("GetL2BlockRangeForL1 didn't fail for an invalid input") } - }