diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fbf00bcb2d..b27c196a6f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -117,8 +117,7 @@ jobs: skip-pkg-cache: true - name: Custom Lint run: | - go run ./linter/koanf ./... - go run ./linter/pointercheck ./... + go run ./linters ./... - name: Set environment variables run: | diff --git a/Makefile b/Makefile index 8b149bc0e1..d03b940726 100644 --- a/Makefile +++ b/Makefile @@ -311,8 +311,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make - go run ./linter/koanf ./... - go run ./linter/pointercheck ./... + go run ./linters ./... golangci-lint run --fix yarn --cwd contracts solhint @touch $@ diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index f96d51ce0e..7e9cf1dbad 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -968,8 +968,16 @@ func (s *TransactionStreamer) executeNextMsg(ctx context.Context, exec execution log.Error("feedOneMsg failed to readMessage", "err", err, "pos", pos) return false } - err = s.exec.DigestMessage(pos, msg) - if err != nil { + var msgForPrefetch *arbostypes.MessageWithMetadata + if pos+1 < msgCount { + msg, err := s.GetMessage(pos + 1) + if err != nil { + log.Error("feedOneMsg failed to readMessage", "err", err, "pos", pos+1) + return false + } + msgForPrefetch = msg + } + if err = s.exec.DigestMessage(pos, msg, msgForPrefetch); err != nil { logger := log.Warn if prevMessageCount < msgCount { logger = log.Debug diff --git a/arbstate/inbox.go b/arbstate/inbox.go index cf8f61e97a..fcb1c1ebcb 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -75,6 +75,9 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } payload := data[40:] + // Stage 1: Extract the payload from any data availability header. + // It's important that multiple DAS strategies can't both be invoked in the same batch, + // as these headers are validated by the sequencer inbox and not other DASs. if len(payload) > 0 && IsDASMessageHeaderByte(payload[0]) { if dasReader == nil { log.Error("No DAS Reader configured, but sequencer message found with DAS header") @@ -88,9 +91,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash return parsedMsg, nil } } - } - - if len(payload) > 0 && IsBlobHashesHeaderByte(payload[0]) { + } else if len(payload) > 0 && IsBlobHashesHeaderByte(payload[0]) { blobHashes := payload[1:] if len(blobHashes)%len(common.Hash{}) != 0 { return nil, fmt.Errorf("blob batch data is not a list of hashes as expected") @@ -115,6 +116,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } } + // Stage 2: If enabled, decode the zero heavy payload (saves gas based on calldata charging). if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) if err != nil { @@ -124,6 +126,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash payload = pl } + // Stage 3: Decompress the brotli payload and fill the parsedMsg.segments list. if len(payload) > 0 && IsBrotliMessageHeaderByte(payload[0]) { decompressed, err := arbcompress.Decompress(payload[1:], MaxDecompressedLen) if err == nil { diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 20e9ca6f3b..003159589a 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -41,6 +41,8 @@ type ExecutionEngine struct { nextScheduledVersionCheck time.Time // protected by the createBlocksMutex reorgSequencing bool + + prefetchBlock bool } func NewExecutionEngine(bc *core.BlockChain) (*ExecutionEngine, error) { @@ -71,6 +73,16 @@ func (s *ExecutionEngine) EnableReorgSequencing() { s.reorgSequencing = true } +func (s *ExecutionEngine) EnablePrefetchBlock() { + if s.Started() { + panic("trying to enable prefetch block after start") + } + if s.prefetchBlock { + panic("trying to enable prefetch block when already set") + } + s.prefetchBlock = true +} + func (s *ExecutionEngine) SetTransactionStreamer(streamer execution.TransactionStreamer) { if s.Started() { panic("trying to set transaction streamer after start") @@ -107,7 +119,11 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost return err } for i := range newMessages { - err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i]) + var msgForPrefetch *arbostypes.MessageWithMetadata + if i < len(newMessages)-1 { + msgForPrefetch = &newMessages[i] + } + err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i], msgForPrefetch) if err != nil { return err } @@ -489,15 +505,20 @@ func (s *ExecutionEngine) ResultAtPos(pos arbutil.MessageIndex) (*execution.Mess return s.resultFromHeader(s.bc.GetHeaderByNumber(s.MessageIndexToBlockNumber(pos))) } -func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error { +// DigestMessage is used to create a block by executing msg against the latest state and storing it. +// Also, while creating a block by executing msg against the latest state, +// in parallel, creates a block by executing msgForPrefetch (msg+1) against the latest state +// but does not store the block. +// This helps in filling the cache, so that the next block creation is faster. +func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { if !s.createBlocksMutex.TryLock() { return errors.New("createBlock mutex held") } defer s.createBlocksMutex.Unlock() - return s.digestMessageWithBlockMutex(num, msg) + return s.digestMessageWithBlockMutex(num, msg, msgForPrefetch) } -func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error { +func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { currentHeader, err := s.getCurrentHeader() if err != nil { return err @@ -511,11 +532,23 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, } startTime := time.Now() + var wg sync.WaitGroup + if s.prefetchBlock && msgForPrefetch != nil { + wg.Add(1) + go func() { + defer wg.Done() + _, _, _, err := s.createBlockFromNextMessage(msgForPrefetch) + if err != nil { + return + } + }() + } + block, statedb, receipts, err := s.createBlockFromNextMessage(msg) if err != nil { return err } - + wg.Wait() err = s.appendBlock(block, statedb, receipts, time.Since(startTime)) if err != nil { return err diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 00337cc355..1ad73febe7 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -311,8 +311,8 @@ func (n *ExecutionNode) StopAndWait() { // } } -func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error { - return n.ExecEngine.DigestMessage(num, msg) +func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { + return n.ExecEngine.DigestMessage(num, msg, msgForPrefetch) } func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error { return n.ExecEngine.Reorg(count, newMessages, oldMessages) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 5db38cbb4d..9bc6f4378d 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -66,6 +66,7 @@ type SequencerConfig struct { MaxTxDataSize int `koanf:"max-tx-data-size" reload:"hot"` NonceFailureCacheSize int `koanf:"nonce-failure-cache-size" reload:"hot"` NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` + EnablePrefetchBlock bool `koanf:"enable-prefetch-block"` } func (c *SequencerConfig) Validate() error { @@ -97,6 +98,7 @@ var DefaultSequencerConfig = SequencerConfig{ MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, + EnablePrefetchBlock: false, } var TestSequencerConfig = SequencerConfig{ @@ -112,6 +114,7 @@ var TestSequencerConfig = SequencerConfig{ MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, + EnablePrefetchBlock: false, } func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -127,6 +130,7 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".max-tx-data-size", DefaultSequencerConfig.MaxTxDataSize, "maximum transaction size the sequencer will accept") f.Int(prefix+".nonce-failure-cache-size", DefaultSequencerConfig.NonceFailureCacheSize, "number of transactions with too high of a nonce to keep in memory while waiting for their predecessor") f.Duration(prefix+".nonce-failure-cache-expiry", DefaultSequencerConfig.NonceFailureCacheExpiry, "maximum amount of time to wait for a predecessor before rejecting a tx with nonce too high") + f.Bool(prefix+".enable-prefetch-block", DefaultSequencerConfig.EnablePrefetchBlock, "enable prefetching of blocks") } type txQueueItem struct { @@ -324,6 +328,9 @@ func NewSequencer(execEngine *ExecutionEngine, l1Reader *headerreader.HeaderRead } s.Pause() execEngine.EnableReorgSequencing() + if config.EnablePrefetchBlock { + execEngine.EnablePrefetchBlock() + } return s, nil } diff --git a/execution/interface.go b/execution/interface.go index 5f7c01719e..6761011a77 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -28,7 +28,7 @@ var ErrSequencerInsertLockTaken = errors.New("insert lock taken") // always needed type ExecutionClient interface { - DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error + DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error HeadMessageNumber() (arbutil.MessageIndex, error) HeadMessageNumberSync(t *testing.T) (arbutil.MessageIndex, error) diff --git a/linter/koanf/handlers.go b/linters/koanf/handlers.go similarity index 99% rename from linter/koanf/handlers.go rename to linters/koanf/handlers.go index 5826004014..5ee3b80f9f 100644 --- a/linter/koanf/handlers.go +++ b/linters/koanf/handlers.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "fmt" diff --git a/linter/koanf/koanf.go b/linters/koanf/koanf.go similarity index 92% rename from linter/koanf/koanf.go rename to linters/koanf/koanf.go index d6780760e7..e53064b6b3 100644 --- a/linter/koanf/koanf.go +++ b/linters/koanf/koanf.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "errors" @@ -8,7 +8,6 @@ import ( "reflect" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) var ( @@ -18,10 +17,6 @@ var ( errIncorrectFlag = errors.New("mismatching flag initialization") ) -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - var Analyzer = &analysis.Analyzer{ Name: "koanfcheck", Doc: "check for koanf misconfigurations", @@ -101,7 +96,3 @@ func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { } return ret, nil } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/koanf/koanf_test.go b/linters/koanf/koanf_test.go similarity index 95% rename from linter/koanf/koanf_test.go rename to linters/koanf/koanf_test.go index 064ae533c4..9029951dfa 100644 --- a/linter/koanf/koanf_test.go +++ b/linters/koanf/koanf_test.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "errors" @@ -20,7 +20,7 @@ func testData(t *testing.T) string { t.Helper() wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } return filepath.Join(filepath.Dir(wd), "testdata") } diff --git a/linters/linters.go b/linters/linters.go new file mode 100644 index 0000000000..a6c9f6d55e --- /dev/null +++ b/linters/linters.go @@ -0,0 +1,18 @@ +package main + +import ( + "github.com/offchainlabs/nitro/linters/koanf" + "github.com/offchainlabs/nitro/linters/pointercheck" + "github.com/offchainlabs/nitro/linters/rightshift" + "github.com/offchainlabs/nitro/linters/structinit" + "golang.org/x/tools/go/analysis/multichecker" +) + +func main() { + multichecker.Main( + koanf.Analyzer, + pointercheck.Analyzer, + rightshift.Analyzer, + structinit.Analyzer, + ) +} diff --git a/linter/pointercheck/pointer.go b/linters/pointercheck/pointercheck.go similarity index 91% rename from linter/pointercheck/pointer.go rename to linters/pointercheck/pointercheck.go index 6500b01222..682ebd9357 100644 --- a/linter/pointercheck/pointer.go +++ b/linters/pointercheck/pointercheck.go @@ -1,4 +1,4 @@ -package main +package pointercheck import ( "fmt" @@ -8,13 +8,8 @@ import ( "reflect" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - var Analyzer = &analysis.Analyzer{ Name: "pointercheck", Doc: "check for pointer comparison", @@ -94,7 +89,3 @@ func ptrIdent(pass *analysis.Pass, e ast.Expr) bool { } return false } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/pointercheck/pointer_test.go b/linters/pointercheck/pointercheck_test.go similarity index 88% rename from linter/pointercheck/pointer_test.go rename to linters/pointercheck/pointercheck_test.go index 290e3826de..24f4534bca 100644 --- a/linter/pointercheck/pointer_test.go +++ b/linters/pointercheck/pointercheck_test.go @@ -1,4 +1,4 @@ -package main +package pointercheck import ( "os" @@ -11,7 +11,7 @@ import ( func TestAll(t *testing.T) { wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } testdata := filepath.Join(filepath.Dir(wd), "testdata") res := analysistest.Run(t, testdata, analyzerForTests, "pointercheck") diff --git a/linters/rightshift/rightshift.go b/linters/rightshift/rightshift.go new file mode 100644 index 0000000000..d6fcbfec6c --- /dev/null +++ b/linters/rightshift/rightshift.go @@ -0,0 +1,71 @@ +package rightshift + +import ( + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" +) + +var Analyzer = &analysis.Analyzer{ + Name: "rightshift", + Doc: "check for 1 >> x operation", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "testrightshift", + Doc: "check for pointer comparison (for tests)", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +// rightShiftError indicates the position of pointer comparison. +type rightShiftError struct { + Pos token.Position + Message string +} + +// Result is returned from the checkStruct function, and holds all rightshift +// operations. +type Result struct { + Errors []rightShiftError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ret Result + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + be, ok := node.(*ast.BinaryExpr) + if !ok { + return true + } + // Check if the expression is '1 >> x'. + if be.Op == token.SHR && isOne(be.X) { + err := rightShiftError{ + Pos: pass.Fset.Position(be.Pos()), + Message: "found rightshift ('1 >> x') expression, did you mean '1 << x' ?", + } + ret.Errors = append(ret.Errors, err) + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), + Message: err.Message, + Category: "pointercheck", + }) + } + } + return true + }, + ) + } + return ret, nil +} + +// isOne checks if the expression is a constant 1. +func isOne(expr ast.Expr) bool { + bl, ok := expr.(*ast.BasicLit) + return ok && bl.Kind == token.INT && bl.Value == "1" +} diff --git a/linters/rightshift/rightshift_test.go b/linters/rightshift/rightshift_test.go new file mode 100644 index 0000000000..3640d79975 --- /dev/null +++ b/linters/rightshift/rightshift_test.go @@ -0,0 +1,36 @@ +package rightshift + +import ( + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/go/analysis/analysistest" +) + +func TestAll(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get working directory: %v", err) + } + testdata := filepath.Join(filepath.Dir(wd), "testdata") + res := analysistest.Run(t, testdata, analyzerForTests, "rightshift") + want := []int{6, 11, 12} + got := erroLines(res) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("analysistest.Ru() unexpected diff in error lines:\n%s\n", diff) + } +} + +func erroLines(errs []*analysistest.Result) []int { + var ret []int + for _, e := range errs { + if r, ok := e.Result.(Result); ok { + for _, err := range r.Errors { + ret = append(ret, err.Pos.Line) + } + } + } + return ret +} diff --git a/linter/structinit/structinit.go b/linters/structinit/structinit.go similarity index 93% rename from linter/structinit/structinit.go rename to linters/structinit/structinit.go index e4e65bc3fc..236b8747b2 100644 --- a/linter/structinit/structinit.go +++ b/linters/structinit/structinit.go @@ -1,4 +1,4 @@ -package main +package structinit import ( "fmt" @@ -8,7 +8,6 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) // Tip for linter that struct that has this comment should be included in the @@ -16,10 +15,6 @@ import ( // Note: comment should be directly line above the struct definition. const linterTip = "// lint:require-exhaustive-initialization" -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - // Analyzer implements struct analyzer for structs that are annotated with // `linterTip`, it checks that every instantiation initializes all the fields. var Analyzer = &analysis.Analyzer{ @@ -116,7 +111,3 @@ type position struct { fileName string line int } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/structinit/structinit_test.go b/linters/structinit/structinit_test.go similarity index 89% rename from linter/structinit/structinit_test.go rename to linters/structinit/structinit_test.go index db3676e185..57dfc2b000 100644 --- a/linter/structinit/structinit_test.go +++ b/linters/structinit/structinit_test.go @@ -1,4 +1,4 @@ -package main +package structinit import ( "os" @@ -12,7 +12,7 @@ func testData(t *testing.T) string { t.Helper() wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } return filepath.Join(filepath.Dir(wd), "testdata") } diff --git a/linter/testdata/src/koanf/a/a.go b/linters/testdata/src/koanf/a/a.go similarity index 100% rename from linter/testdata/src/koanf/a/a.go rename to linters/testdata/src/koanf/a/a.go diff --git a/linter/testdata/src/koanf/b/b.go b/linters/testdata/src/koanf/b/b.go similarity index 100% rename from linter/testdata/src/koanf/b/b.go rename to linters/testdata/src/koanf/b/b.go diff --git a/linter/testdata/src/pointercheck/pointercheck.go b/linters/testdata/src/pointercheck/pointercheck.go similarity index 100% rename from linter/testdata/src/pointercheck/pointercheck.go rename to linters/testdata/src/pointercheck/pointercheck.go diff --git a/linters/testdata/src/rightshift/rightshift.go b/linters/testdata/src/rightshift/rightshift.go new file mode 100644 index 0000000000..3ad6d95980 --- /dev/null +++ b/linters/testdata/src/rightshift/rightshift.go @@ -0,0 +1,14 @@ +package rightshift + +import "fmt" + +func doThing(v int) int { + return 1 >> v // Error: Ln: 6 +} + +func calc() { + val := 10 + fmt.Printf("%v", 1>>val) // Error: Ln 11 + _ = doThing(1 >> val) // Error: Ln 12 + fmt.Printf("%v", 1<