From cee7d1fb02cf17d06a71e71ef070c139be3ed406 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 8 Apr 2024 16:41:30 -0500 Subject: [PATCH 01/92] Improve blocks re-execution and make it compatible with --init.then-quit --- blocks_reexecutor/blocks_reexecutor.go | 51 ++++++++++++++++++-------- cmd/nitro/nitro.go | 26 ++++++++++--- system_tests/blocks_reexecutor_test.go | 26 +++++-------- 3 files changed, 65 insertions(+), 38 deletions(-) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index bb6de00cad..bedea37776 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -42,10 +42,9 @@ func (c *Config) Validate() error { } var DefaultConfig = Config{ - Enable: false, - Mode: "random", - Room: runtime.NumCPU(), - BlocksPerThread: 10000, + Enable: false, + Mode: "random", + Room: runtime.NumCPU(), } var TestConfig = Config{ @@ -84,25 +83,38 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block start = chainStart end = chainEnd } - if start < chainStart { - log.Warn("state reexecutor's start block number is lower than genesis, resetting to genesis") + if start < chainStart || start > chainEnd { + log.Warn("invalid state reexecutor's start block number, resetting to genesis", "start", start, "genesis", chainStart) start = chainStart } - if end > chainEnd { - log.Warn("state reexecutor's end block number is greater than latest, resetting to latest") + if end > chainEnd || end < chainStart { + log.Warn("invalid state reexecutor's end block number, resetting to latest", "end", end, "latest", chainEnd) end = chainEnd } if c.Mode == "random" && end != start { - if c.BlocksPerThread > end-start { - c.BlocksPerThread = end - start + // Reexecute a range of 10000 or (non-zero) c.BlocksPerThread number of blocks between start to end picked randomly + rng := uint64(10000) + if c.BlocksPerThread != 0 { + rng = c.BlocksPerThread + } + if rng > end-start { + rng = end - start } - start += uint64(rand.Intn(int(end - start - c.BlocksPerThread + 1))) - end = start + c.BlocksPerThread + start += uint64(rand.Intn(int(end - start - rng + 1))) + end = start + rng } - // inclusive of block reexecution [start, end] + // Inclusive of block reexecution [start, end] if start > 0 { start-- } + // Divide work equally among available threads + if c.BlocksPerThread == 0 { + c.BlocksPerThread = 10000 + work := (end - start) / uint64(c.Room) + if work > 0 { + c.BlocksPerThread = work + } + } return &BlocksReExecutor{ config: c, blockchain: blockchain, @@ -125,11 +137,13 @@ func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentB } // we don't use state release pattern here // TODO do we want to use release pattern here? - startState, startHeader, _, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) + startState, startHeader, release, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) if err != nil { s.fatalErrChan <- fmt.Errorf("blocksReExecutor failed to get last available state while searching for state at %d, err: %w", start, err) return s.startBlock } + // NoOp + defer release() start = startHeader.Number.Uint64() s.LaunchThread(func(ctx context.Context) { _, err := arbitrum.AdvanceStateUpToBlock(ctx, s.blockchain, startState, s.blockchain.GetHeaderByNumber(currentBlock), startHeader, nil) @@ -169,9 +183,14 @@ func (s *BlocksReExecutor) Impl(ctx context.Context) { log.Info("BlocksReExecutor successfully completed re-execution of blocks against historic state", "stateAt", s.startBlock, "startBlock", s.startBlock+1, "endBlock", end) } -func (s *BlocksReExecutor) Start(ctx context.Context) { +func (s *BlocksReExecutor) Start(ctx context.Context, done chan struct{}) { s.StopWaiter.Start(ctx, s) - s.LaunchThread(s.Impl) + s.LaunchThread(func(ctx context.Context) { + s.Impl(ctx) + if done != nil { + close(done) + } + }) } func (s *BlocksReExecutor) StopAndWait() { diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 997adf9369..59241204f1 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -494,6 +494,25 @@ func mainImpl() int { return 1 } + fatalErrChan := make(chan error, 10) + + var blocksReExecutor *blocksreexecutor.BlocksReExecutor + if nodeConfig.BlocksReExecutor.Enable && l2BlockChain != nil { + blocksReExecutor = blocksreexecutor.New(&nodeConfig.BlocksReExecutor, l2BlockChain, fatalErrChan) + if nodeConfig.Init.ThenQuit { + success := make(chan struct{}) + blocksReExecutor.Start(ctx, success) + deferFuncs = append(deferFuncs, func() { blocksReExecutor.StopAndWait() }) + select { + case err := <-fatalErrChan: + log.Error("shutting down due to fatal error", "err", err) + defer log.Error("shut down due to fatal error", "err", err) + return 1 + case <-success: + } + } + } + if nodeConfig.Init.ThenQuit && nodeConfig.Init.ResetToMessage < 0 { return 0 } @@ -514,8 +533,6 @@ func mainImpl() int { return 1 } - fatalErrChan := make(chan error, 10) - var valNode *valnode.ValidationNode if sameProcessValidationNodeEnabled { valNode, err = valnode.CreateValidationNode( @@ -644,9 +661,8 @@ func mainImpl() int { // remove previous deferFuncs, StopAndWait closes database and blockchain. deferFuncs = []func(){func() { currentNode.StopAndWait() }} } - if nodeConfig.BlocksReExecutor.Enable && l2BlockChain != nil { - blocksReExecutor := blocksreexecutor.New(&nodeConfig.BlocksReExecutor, l2BlockChain, fatalErrChan) - blocksReExecutor.Start(ctx) + if blocksReExecutor != nil && !nodeConfig.Init.ThenQuit { + blocksReExecutor.Start(ctx, nil) deferFuncs = append(deferFuncs, func() { blocksReExecutor.StopAndWait() }) } diff --git a/system_tests/blocks_reexecutor_test.go b/system_tests/blocks_reexecutor_test.go index c2941ddcc4..66690d1427 100644 --- a/system_tests/blocks_reexecutor_test.go +++ b/system_tests/blocks_reexecutor_test.go @@ -45,16 +45,11 @@ func TestBlocksReExecutorModes(t *testing.T) { } } + // Reexecute blocks at mode full success := make(chan struct{}) + executorFull := blocksreexecutor.New(&blocksreexecutor.TestConfig, blockchain, feedErrChan) + executorFull.Start(ctx, success) - // Reexecute blocks at mode full - go func() { - executorFull := blocksreexecutor.New(&blocksreexecutor.TestConfig, blockchain, feedErrChan) - executorFull.StopWaiter.Start(ctx, executorFull) - executorFull.Impl(ctx) - executorFull.StopAndWait() - success <- struct{}{} - }() select { case err := <-feedErrChan: t.Errorf("error occurred: %v", err) @@ -66,15 +61,12 @@ func TestBlocksReExecutorModes(t *testing.T) { } // Reexecute blocks at mode random - go func() { - c := &blocksreexecutor.TestConfig - c.Mode = "random" - executorRandom := blocksreexecutor.New(c, blockchain, feedErrChan) - executorRandom.StopWaiter.Start(ctx, executorRandom) - executorRandom.Impl(ctx) - executorRandom.StopAndWait() - success <- struct{}{} - }() + success = make(chan struct{}) + c := &blocksreexecutor.TestConfig + c.Mode = "random" + executorRandom := blocksreexecutor.New(c, blockchain, feedErrChan) + executorRandom.Start(ctx, success) + select { case err := <-feedErrChan: t.Errorf("error occurred: %v", err) From 82a68c6338567a6d3cafb1c6761e2b6a68dbd792 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Mon, 29 Apr 2024 16:13:32 +0200 Subject: [PATCH 02/92] validate rpc sub-configs, move out ValidationInputToJson and ValidationInputFromJson into server api package --- staker/block_validator.go | 9 +++++ validator/client/validation_client.go | 33 +++-------------- validator/server_api/json.go | 52 +++++++++++++++++++++++++++ validator/valnode/validation_api.go | 38 ++------------------ 4 files changed, 68 insertions(+), 64 deletions(-) diff --git a/staker/block_validator.go b/staker/block_validator.go index 0cde4423c0..d8258a52ef 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -122,6 +122,12 @@ func (c *BlockValidatorConfig) Validate() error { c.ValidationServerConfigs = validationServersConfigs } } + for _, vc := range c.ValidationServerConfigs { + if err := vc.Validate(); err != nil { + return fmt.Errorf("validating validation server configs: %w", err) + } + } + if len(c.ValidationServerConfigs) == 0 && !streamsEnabled { return fmt.Errorf("block-validator validation-server-configs is empty, need at least one validation server config") } @@ -133,6 +139,9 @@ func (c *BlockValidatorConfig) Validate() error { if err := c.ExecutionServerConfig.Validate(); err != nil { return fmt.Errorf("validating execution server config: %w", err) } + if err := c.ValidationServer.Validate(); err != nil { + return fmt.Errorf("validating validation server config: %w", err) + } return nil } diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go index 24e51230d6..351575d78e 100644 --- a/validator/client/validation_client.go +++ b/validator/client/validation_client.go @@ -7,11 +7,9 @@ import ( "sync/atomic" "time" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/util/containers" - "github.com/offchainlabs/nitro/util/jsonapi" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -39,7 +37,7 @@ func NewValidationClient(config rpcclient.ClientConfigFetcher, stack *node.Node) func (c *ValidationClient) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { atomic.AddInt32(&c.room, -1) promise := stopwaiter.LaunchPromiseThread[validator.GoGlobalState](c, func(ctx context.Context) (validator.GoGlobalState, error) { - input := ValidationInputToJson(entry) + input := server_api.ValidationInputToJson(entry) var res validator.GoGlobalState err := c.client.CallContext(ctx, &res, server_api.Namespace+"_validate", input, moduleRoot) atomic.AddInt32(&c.room, 1) @@ -84,10 +82,7 @@ func (c *ValidationClient) Stop() { } func (c *ValidationClient) Name() string { - if c.Started() { - return c.name - } - return "(not started)" + return c.name } func (c *ValidationClient) Room() int { @@ -111,7 +106,7 @@ func NewExecutionClient(config rpcclient.ClientConfigFetcher, stack *node.Node) func (c *ExecutionClient) CreateExecutionRun(wasmModuleRoot common.Hash, input *validator.ValidationInput) containers.PromiseInterface[validator.ExecutionRun] { return stopwaiter.LaunchPromiseThread[validator.ExecutionRun](c, func(ctx context.Context) (validator.ExecutionRun, error) { var res uint64 - err := c.client.CallContext(ctx, &res, server_api.Namespace+"_createExecutionRun", wasmModuleRoot, ValidationInputToJson(input)) + err := c.client.CallContext(ctx, &res, server_api.Namespace+"_createExecutionRun", wasmModuleRoot, server_api.ValidationInputToJson(input)) if err != nil { return nil, err } @@ -142,7 +137,7 @@ func (c *ExecutionClient) LatestWasmModuleRoot() containers.PromiseInterface[com } func (c *ExecutionClient) WriteToFile(input *validator.ValidationInput, expOut validator.GoGlobalState, moduleRoot common.Hash) containers.PromiseInterface[struct{}] { - jsonInput := ValidationInputToJson(input) + jsonInput := server_api.ValidationInputToJson(input) return stopwaiter.LaunchPromiseThread[struct{}](c, func(ctx context.Context) (struct{}, error) { err := c.client.CallContext(ctx, nil, server_api.Namespace+"_writeToFile", jsonInput, expOut, moduleRoot) return struct{}{}, err @@ -211,23 +206,3 @@ func (r *ExecutionClientRun) Close() { } }) } - -func ValidationInputToJson(entry *validator.ValidationInput) *server_api.InputJSON { - jsonPreimagesMap := make(map[arbutil.PreimageType]*jsonapi.PreimagesMapJson) - for ty, preimages := range entry.Preimages { - jsonPreimagesMap[ty] = jsonapi.NewPreimagesMapJson(preimages) - } - res := &server_api.InputJSON{ - Id: entry.Id, - HasDelayedMsg: entry.HasDelayedMsg, - DelayedMsgNr: entry.DelayedMsgNr, - DelayedMsgB64: base64.StdEncoding.EncodeToString(entry.DelayedMsg), - StartState: entry.StartState, - PreimagesB64: jsonPreimagesMap, - } - for _, binfo := range entry.BatchInfo { - encData := base64.StdEncoding.EncodeToString(binfo.Data) - res.BatchInfo = append(res.BatchInfo, server_api.BatchInfoJson{Number: binfo.Number, DataB64: encData}) - } - return res -} diff --git a/validator/server_api/json.go b/validator/server_api/json.go index 8c80768b14..2417886b07 100644 --- a/validator/server_api/json.go +++ b/validator/server_api/json.go @@ -4,6 +4,7 @@ package server_api import ( + "encoding/base64" "fmt" "github.com/ethereum/go-ethereum/common" @@ -63,3 +64,54 @@ type BatchInfoJson struct { Number uint64 DataB64 string } + +func ValidationInputToJson(entry *validator.ValidationInput) *InputJSON { + jsonPreimagesMap := make(map[arbutil.PreimageType]*jsonapi.PreimagesMapJson) + for ty, preimages := range entry.Preimages { + jsonPreimagesMap[ty] = jsonapi.NewPreimagesMapJson(preimages) + } + res := &InputJSON{ + Id: entry.Id, + HasDelayedMsg: entry.HasDelayedMsg, + DelayedMsgNr: entry.DelayedMsgNr, + DelayedMsgB64: base64.StdEncoding.EncodeToString(entry.DelayedMsg), + StartState: entry.StartState, + PreimagesB64: jsonPreimagesMap, + } + for _, binfo := range entry.BatchInfo { + encData := base64.StdEncoding.EncodeToString(binfo.Data) + res.BatchInfo = append(res.BatchInfo, BatchInfoJson{Number: binfo.Number, DataB64: encData}) + } + return res +} + +func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, error) { + preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) + for ty, jsonPreimages := range entry.PreimagesB64 { + preimages[ty] = jsonPreimages.Map + } + valInput := &validator.ValidationInput{ + Id: entry.Id, + HasDelayedMsg: entry.HasDelayedMsg, + DelayedMsgNr: entry.DelayedMsgNr, + StartState: entry.StartState, + Preimages: preimages, + } + delayed, err := base64.StdEncoding.DecodeString(entry.DelayedMsgB64) + if err != nil { + return nil, err + } + valInput.DelayedMsg = delayed + for _, binfo := range entry.BatchInfo { + data, err := base64.StdEncoding.DecodeString(binfo.DataB64) + if err != nil { + return nil, err + } + decInfo := validator.BatchInfo{ + Number: binfo.Number, + Data: data, + } + valInput.BatchInfo = append(valInput.BatchInfo, decInfo) + } + return valInput, nil +} diff --git a/validator/valnode/validation_api.go b/validator/valnode/validation_api.go index 432e5eedd9..ad5c62e27f 100644 --- a/validator/valnode/validation_api.go +++ b/validator/valnode/validation_api.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_api" @@ -30,7 +29,7 @@ func (a *ValidationServerAPI) Room() int { } func (a *ValidationServerAPI) Validate(ctx context.Context, entry *server_api.InputJSON, moduleRoot common.Hash) (validator.GoGlobalState, error) { - valInput, err := ValidationInputFromJson(entry) + valInput, err := server_api.ValidationInputFromJson(entry) if err != nil { return validator.GoGlobalState{}, err } @@ -70,7 +69,7 @@ func NewExecutionServerAPI(valSpawner validator.ValidationSpawner, execution val } func (a *ExecServerAPI) CreateExecutionRun(ctx context.Context, wasmModuleRoot common.Hash, jsonInput *server_api.InputJSON) (uint64, error) { - input, err := ValidationInputFromJson(jsonInput) + input, err := server_api.ValidationInputFromJson(jsonInput) if err != nil { return 0, err } @@ -108,7 +107,7 @@ func (a *ExecServerAPI) Start(ctx_in context.Context) { } func (a *ExecServerAPI) WriteToFile(ctx context.Context, jsonInput *server_api.InputJSON, expOut validator.GoGlobalState, moduleRoot common.Hash) error { - input, err := ValidationInputFromJson(jsonInput) + input, err := server_api.ValidationInputFromJson(jsonInput) if err != nil { return err } @@ -182,34 +181,3 @@ func (a *ExecServerAPI) CloseExec(execid uint64) { run.run.Close() delete(a.runs, execid) } - -func ValidationInputFromJson(entry *server_api.InputJSON) (*validator.ValidationInput, error) { - preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) - for ty, jsonPreimages := range entry.PreimagesB64 { - preimages[ty] = jsonPreimages.Map - } - valInput := &validator.ValidationInput{ - Id: entry.Id, - HasDelayedMsg: entry.HasDelayedMsg, - DelayedMsgNr: entry.DelayedMsgNr, - StartState: entry.StartState, - Preimages: preimages, - } - delayed, err := base64.StdEncoding.DecodeString(entry.DelayedMsgB64) - if err != nil { - return nil, err - } - valInput.DelayedMsg = delayed - for _, binfo := range entry.BatchInfo { - data, err := base64.StdEncoding.DecodeString(binfo.DataB64) - if err != nil { - return nil, err - } - decInfo := validator.BatchInfo{ - Number: binfo.Number, - Data: data, - } - valInput.BatchInfo = append(valInput.BatchInfo, decInfo) - } - return valInput, nil -} From 6a1f54d59117ef600bd140fd1b50b1b99b1ddc8d Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Tue, 30 Apr 2024 18:44:59 +0100 Subject: [PATCH 03/92] Add option for websocket message size limit This plumbs through the websocket message size limit option for all rpc clients. --- cmd/conf/chain.go | 2 ++ util/rpcclient/rpcclient.go | 29 +++++++++++++++++------------ 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index 531945b4d6..8ad853e7aa 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -25,6 +25,8 @@ var L1ConnectionConfigDefault = rpcclient.ClientConfig{ Timeout: time.Minute, ConnectionWait: time.Minute, ArgLogLimit: 2048, + // Use geth's unexported wsDefaultReadLimit from rpc/websocket.go + WebsocketMessageSizeLimit: 32 * 1024 * 1024, } var L1ConfigDefault = ParentChainConfig{ diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index 02b41cf15d..cc6f11c986 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -21,14 +21,15 @@ import ( ) type ClientConfig struct { - URL string `json:"url,omitempty" koanf:"url"` - JWTSecret string `json:"jwtsecret,omitempty" koanf:"jwtsecret"` - Timeout time.Duration `json:"timeout,omitempty" koanf:"timeout" reload:"hot"` - Retries uint `json:"retries,omitempty" koanf:"retries" reload:"hot"` - ConnectionWait time.Duration `json:"connection-wait,omitempty" koanf:"connection-wait"` - ArgLogLimit uint `json:"arg-log-limit,omitempty" koanf:"arg-log-limit" reload:"hot"` - RetryErrors string `json:"retry-errors,omitempty" koanf:"retry-errors" reload:"hot"` - RetryDelay time.Duration `json:"retry-delay,omitempty" koanf:"retry-delay"` + URL string `json:"url,omitempty" koanf:"url"` + JWTSecret string `json:"jwtsecret,omitempty" koanf:"jwtsecret"` + Timeout time.Duration `json:"timeout,omitempty" koanf:"timeout" reload:"hot"` + Retries uint `json:"retries,omitempty" koanf:"retries" reload:"hot"` + ConnectionWait time.Duration `json:"connection-wait,omitempty" koanf:"connection-wait"` + ArgLogLimit uint `json:"arg-log-limit,omitempty" koanf:"arg-log-limit" reload:"hot"` + RetryErrors string `json:"retry-errors,omitempty" koanf:"retry-errors" reload:"hot"` + RetryDelay time.Duration `json:"retry-delay,omitempty" koanf:"retry-delay"` + WebsocketMessageSizeLimit int64 `json:"websocket-message-size-limit,omitempty" koanf:"websocket-message-size-limit"` retryErrors *regexp.Regexp } @@ -46,8 +47,9 @@ func (c *ClientConfig) Validate() error { type ClientConfigFetcher func() *ClientConfig var TestClientConfig = ClientConfig{ - URL: "self", - JWTSecret: "", + URL: "self", + JWTSecret: "", + WebsocketMessageSizeLimit: 32 * 1024 * 1024, } var DefaultClientConfig = ClientConfig{ @@ -56,6 +58,8 @@ var DefaultClientConfig = ClientConfig{ Retries: 3, RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused", ArgLogLimit: 2048, + // Use geth's unexported wsDefaultReadLimit from rpc/websocket.go + WebsocketMessageSizeLimit: 32 * 1024 * 1024, } func RPCClientAddOptions(prefix string, f *flag.FlagSet, defaultConfig *ClientConfig) { @@ -67,6 +71,7 @@ func RPCClientAddOptions(prefix string, f *flag.FlagSet, defaultConfig *ClientCo f.Uint(prefix+".retries", defaultConfig.Retries, "number of retries in case of failure(0 mean one attempt)") f.String(prefix+".retry-errors", defaultConfig.RetryErrors, "Errors matching this regular expression are automatically retried") f.Duration(prefix+".retry-delay", defaultConfig.RetryDelay, "delay between retries") + f.Int64(prefix+".websocket-message-size-limit", defaultConfig.WebsocketMessageSizeLimit, "websocket message size limit used by the RPC client. 0 means no limit") } type RpcClient struct { @@ -256,9 +261,9 @@ func (c *RpcClient) Start(ctx_in context.Context) error { var err error var client *rpc.Client if jwt == nil { - client, err = rpc.DialContext(ctx, url) + client, err = rpc.DialOptions(ctx, url, rpc.WithWebsocketMessageSizeLimit(c.config().WebsocketMessageSizeLimit)) } else { - client, err = rpc.DialOptions(ctx, url, rpc.WithHTTPAuth(node.NewJWTAuth([32]byte(*jwt)))) + client, err = rpc.DialOptions(ctx, url, rpc.WithHTTPAuth(node.NewJWTAuth([32]byte(*jwt))), rpc.WithWebsocketMessageSizeLimit(c.config().WebsocketMessageSizeLimit)) } cancelCtx() if err == nil { From 4ee9d7afffad46db163690076518b090d6d8fad0 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 2 May 2024 13:02:57 -0500 Subject: [PATCH 04/92] Small fixes to "multi exec servers" --- Dockerfile | 3 ++- staker/stateless_block_validator.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7cba82d4fc..25a5ff7fd4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -276,7 +276,8 @@ USER root RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ - apt-get install -y xxd netcat-traditional + apt-get install -y xxd netcat-traditional && \ + rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* COPY scripts/split-val-entry.sh /usr/local/bin ENTRYPOINT [ "/usr/local/bin/split-val-entry.sh" ] USER user diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 48c638f114..c1324098f8 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -398,7 +398,7 @@ func (v *StatelessBlockValidator) ValidateResult( } } if run == nil { - return false, nil, fmt.Errorf("validation woth WasmModuleRoot %v not supported by node", moduleRoot) + return false, nil, fmt.Errorf("validation with WasmModuleRoot %v not supported by node", moduleRoot) } defer run.Cancel() gsEnd, err := run.Await(ctx) From 64872b9abfbcc4f6f0efe2a132fcb7911fa43d09 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Sun, 5 May 2024 19:34:56 -0600 Subject: [PATCH 05/92] docker: update debian to bookworm --- Dockerfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index f6ecbd7b73..bef042628d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -90,7 +90,7 @@ COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upg COPY --from=contracts-builder workspace/.make/ .make/ RUN PATH="$PATH:/usr/local/go/bin" NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-wasm-bin -FROM rust:1.75-slim-bullseye as prover-header-builder +FROM rust:1.75-slim-bookworm as prover-header-builder WORKDIR /workspace RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -115,15 +115,15 @@ RUN NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-prover-header FROM scratch as prover-header-export COPY --from=prover-header-builder /workspace/target/ / -FROM rust:1.75-slim-bullseye as prover-builder +FROM rust:1.75-slim-bookworm as prover-builder-setup WORKDIR /workspace RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ - apt-get install -y make wget gpg software-properties-common zlib1g-dev libstdc++-10-dev wabt + apt-get install -y make wget gpg software-properties-common zlib1g-dev libstdc++-12-dev wabt RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \ - add-apt-repository 'deb http://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-15 main' && \ + add-apt-repository 'deb http://apt.llvm.org/bookworm/ llvm-toolchain-bookworm-15 main' && \ apt-get update && \ - apt-get install -y llvm-15-dev libclang-common-15-dev libpolly-15-dev + apt-get install -y llvm-15-dev libclang-common-15-dev COPY --from=brotli-library-export / target/ COPY arbitrator/Cargo.* arbitrator/ COPY arbitrator/arbutil arbitrator/arbutil @@ -211,7 +211,7 @@ RUN mkdir 0x965a35130f4e34b7b2339eac03b2eacc659e2dafe850d213ea6a7cdf9edfa99f && wget https://stylus-wasm-17f27dd494229dfd10d4e756f7e2fb953e83bd3d1be8278b33a.s3.us-west-2.amazonaws.com/0x965a35130f4e34b7b2339eac03b2eacc659e2dafe850d213ea6a7cdf9edfa99f/replay.wasm && \ wget https://stylus-wasm-17f27dd494229dfd10d4e756f7e2fb953e83bd3d1be8278b33a.s3.us-west-2.amazonaws.com/0x965a35130f4e34b7b2339eac03b2eacc659e2dafe850d213ea6a7cdf9edfa99f/machine.wavm.br -FROM golang:1.21-bullseye as node-builder +FROM golang:1.21-bookworm as node-builder WORKDIR /workspace ARG version="" ARG datetime="" From 5ad7dc962fe395492e48cdb97d8accb811b333ba Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Sun, 5 May 2024 19:36:31 -0600 Subject: [PATCH 06/92] Docker: nitro-val uses split to validate pre-stylus blocks --- Dockerfile | 10 ++++++++-- scripts/split-val-entry.sh | 5 +++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index bef042628d..bda9173860 100644 --- a/Dockerfile +++ b/Dockerfile @@ -292,7 +292,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ USER user -FROM nitro-node as nitro-node-dev +FROM nitro-node as nitro-node-dev-base USER root # Copy in latest WASM module root RUN rm -f /home/user/target/machines/latest @@ -316,13 +316,19 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ USER user -FROM nitro-node-dev as nitro-node-split +FROM offchainlabs/nitro-node:v2.3.4-rc.5-b4cc111 as nitro-legacy + +FROM nitro-node-dev-base as nitro-node-dev USER root RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ apt-get install -y xxd netcat-traditional COPY scripts/split-val-entry.sh /usr/local/bin +COPY --from=nitro-legacy /home/user/target/machines /home/user/nitro-legacy/machines +RUN rm -rf /workspace/target/legacy-machines/latest +COPY --from=nitro-legacy /usr/local/bin/nitro-val /home/user/nitro-legacy/bin/nitro-val +COPY --from=nitro-legacy /usr/local/bin/jit /home/user/nitro-legacy/bin/jit ENTRYPOINT [ "/usr/local/bin/split-val-entry.sh" ] USER user diff --git a/scripts/split-val-entry.sh b/scripts/split-val-entry.sh index a5ee0709b6..6f56a8ec46 100755 --- a/scripts/split-val-entry.sh +++ b/scripts/split-val-entry.sh @@ -8,11 +8,12 @@ echo launching validation servers # add their port to wait loop # edit validation-server-configs-list to include the other nodes /usr/local/bin/nitro-val --file-logging.enable=false --auth.addr 127.0.0.10 --auth.origins 127.0.0.1 --auth.jwtsecret /tmp/nitro-val.jwt --auth.port 52000 & -for port in 52000; do +/home/user/nitro-legacy/bin/nitro-val --file-logging.enable=false --auth.addr 127.0.0.10 --auth.origins 127.0.0.1 --auth.jwtsecret /tmp/nitro-val.jwt --auth.port 52001 --validation.wasm.root-path /home/user/nitro-legacy/machines & +for port in 52000 52001; do while ! nc -w1 -z 127.0.0.10 $port; do echo waiting for validation port $port sleep 1 done done echo launching nitro-node -/usr/local/bin/nitro --node.block-validator.pending-upgrade-module-root="0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4" --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}]' "$@" +/usr/local/bin/nitro --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}, {"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52001"}]' "$@" From 508a5c4687f2794fda4f392e1e7ccc1708e9856e Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Sun, 5 May 2024 19:36:45 -0600 Subject: [PATCH 07/92] cargo update --- arbitrator/Cargo.lock | 280 +++++++++++++++++++++++------------------- 1 file changed, 153 insertions(+), 127 deletions(-) diff --git a/arbitrator/Cargo.lock b/arbitrator/Cargo.lock index d3732147d4..70bdb1fa85 100644 --- a/arbitrator/Cargo.lock +++ b/arbitrator/Cargo.lock @@ -30,9 +30,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -42,18 +42,18 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "ansi_term" @@ -108,15 +108,15 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -218,9 +218,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.4" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytecheck" @@ -252,9 +252,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "c-kzg" @@ -283,12 +283,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.90" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" +checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -334,7 +335,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "scopeguard", - "windows-sys", + "windows-sys 0.33.0", ] [[package]] @@ -523,7 +524,7 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -545,7 +546,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core 0.20.8", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -555,7 +556,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if 1.0.0", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core", @@ -632,9 +633,9 @@ dependencies = [ [[package]] name = "either" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "enum-iterator" @@ -674,7 +675,7 @@ dependencies = [ "darling 0.20.8", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -732,9 +733,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if 1.0.0", "libc", @@ -775,11 +776,11 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.11", "allocator-api2", ] @@ -837,12 +838,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.5" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -881,9 +882,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jit" @@ -911,9 +912,9 @@ dependencies = [ [[package]] name = "jobserver" -version = "0.1.28" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] @@ -950,9 +951,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" [[package]] name = "libfuzzer-sys" @@ -980,9 +981,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -1000,7 +1001,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -1012,11 +1013,20 @@ dependencies = [ "libc", ] +[[package]] +name = "mach2" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" +dependencies = [ + "libc", +] + [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memmap2" @@ -1095,9 +1105,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41" dependencies = [ "num-bigint", "num-complex", @@ -1135,7 +1145,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -1149,9 +1159,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -1172,9 +1182,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] @@ -1207,7 +1217,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -1242,9 +1252,9 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", "parking_lot_core", @@ -1252,9 +1262,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1265,9 +1275,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "ppv-lite86" @@ -1310,9 +1320,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -1380,9 +1390,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -1434,9 +1444,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -1454,11 +1464,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", ] [[package]] @@ -1475,9 +1485,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", @@ -1498,20 +1508,20 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "region" -version = "3.0.0" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76e189c2369884dce920945e2ddf79b3dff49e071a167dd1817fa9c4c00d512e" +checksum = "e6b6ebd13bc009aef9cd476c1310d49ac354d36e240cf1bd753290f3dc7199a7" dependencies = [ "bitflags 1.3.2", "libc", - "mach", - "winapi", + "mach2", + "windows-sys 0.52.0", ] [[package]] @@ -1606,9 +1616,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "self_cell" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bf37232d3bb9a2c4e641ca2a11d83b5062066f88df7fed36c28772046d65ba" +checksum = "d369a96f978623eb3dc28807c4852d6cc617fed53da5d3c400feff1ef34a714a" [[package]] name = "semver" @@ -1618,9 +1628,9 @@ checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.197" +version = "1.0.200" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f" dependencies = [ "serde_derive", ] @@ -1638,20 +1648,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.200" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "itoa", "ryu", @@ -1756,9 +1766,9 @@ checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" dependencies = [ "serde", ] @@ -1853,9 +1863,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.52" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ "proc-macro2", "quote", @@ -1885,22 +1895,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -1948,7 +1958,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "toml_datetime", "winnow", ] @@ -1972,7 +1982,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -2004,9 +2014,9 @@ checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" [[package]] name = "user-host-trait" @@ -2021,9 +2031,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" [[package]] name = "vec_map" @@ -2064,7 +2074,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", "wasm-bindgen-shared", ] @@ -2086,7 +2096,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2271,7 +2281,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dbe55c8f9d0dbd25d9447a5a889ff90c0cc3feaa7395310d3d826b2c703eaab" dependencies = [ "bitflags 2.5.0", - "indexmap 2.2.5", + "indexmap 2.2.6", "semver", ] @@ -2343,26 +2353,36 @@ dependencies = [ "windows_x86_64_msvc 0.33.0", ] +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + [[package]] name = "windows-targets" -version = "0.48.5" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.48.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.5" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -2372,9 +2392,9 @@ checksum = "cd761fd3eb9ab8cc1ed81e56e567f02dd82c4c837e48ac3b2181b9ffc5060807" [[package]] name = "windows_aarch64_msvc" -version = "0.48.5" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -2384,9 +2404,15 @@ checksum = "cab0cf703a96bab2dc0c02c0fa748491294bf9b7feb27e1f4f96340f208ada0e" [[package]] name = "windows_i686_gnu" -version = "0.48.5" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -2396,9 +2422,9 @@ checksum = "8cfdbe89cc9ad7ce618ba34abc34bbb6c36d99e96cae2245b7943cd75ee773d0" [[package]] name = "windows_i686_msvc" -version = "0.48.5" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -2408,15 +2434,15 @@ checksum = "b4dd9b0c0e9ece7bb22e84d70d01b71c6d6248b81a3c60d11869451b4cb24784" [[package]] name = "windows_x86_64_gnu" -version = "0.48.5" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.5" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -2426,9 +2452,9 @@ checksum = "ff1e4aa646495048ec7f3ffddc411e1d829c026a2ec62b39da15c1055e406eaa" [[package]] name = "windows_x86_64_msvc" -version = "0.48.5" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -2450,22 +2476,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "087eca3c1eaf8c47b94d02790dd086cd594b912d2043d4de4bfdd466b3befb7c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "6f4b6c273f496d8fd4eaf18853e6b448760225dc030ff2c485a786859aea6393" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -2485,5 +2511,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.60", ] From 323ce1503b57864b95f7bc877f011837e19e431a Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 6 May 2024 17:24:29 -0600 Subject: [PATCH 08/92] fix Dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index bda9173860..3f2ef84730 100644 --- a/Dockerfile +++ b/Dockerfile @@ -115,7 +115,7 @@ RUN NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-prover-header FROM scratch as prover-header-export COPY --from=prover-header-builder /workspace/target/ / -FROM rust:1.75-slim-bookworm as prover-builder-setup +FROM rust:1.75-slim-bookworm as prover-builder WORKDIR /workspace RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ From 05092df4d445fe3fba5a661f2d5ddab4470208db Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 7 May 2024 10:42:19 -0500 Subject: [PATCH 09/92] Improve error messages for syncing nitro nodes with a beacon chain client lacking old enough blobs for sync --- util/headerreader/blob_client.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/util/headerreader/blob_client.go b/util/headerreader/blob_client.go index 664dbb5e30..73849d0d3a 100644 --- a/util/headerreader/blob_client.go +++ b/util/headerreader/blob_client.go @@ -13,6 +13,7 @@ import ( "net/url" "os" "path" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -188,8 +189,14 @@ const trailingCharsOfResponse = 25 func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { rawData, err := beaconRequest[json.RawMessage](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) - if err != nil { - return nil, fmt.Errorf("error calling beacon client in blobSidecars: %w", err) + if err != nil || len(rawData) == 0 { + // blobs are pruned after 4096 epochs (1 epoch = 32 slots), we determine if the requested slot were to be pruned by a non-archive endpoint + roughAgeOfSlot := uint64(time.Now().Unix()) - (b.genesisTime + slot*b.secondsPerSlot) + if roughAgeOfSlot > b.secondsPerSlot*32*4096 { + return nil, fmt.Errorf("beacon client in blobSidecars got error or empty response fetching older blobs in slot: %d, an archive endpoint is required, please refer to https://docs.arbitrum.io/run-arbitrum-node/l1-ethereum-beacon-chain-rpc-providers, err: %w", slot, err) + } else { + return nil, fmt.Errorf("beacon client in blobSidecars got error or empty response fetching non-expired blobs in slot: %d, if using a prysm endpoint, try --enable-experimental-backfill flag, err: %w", slot, err) + } } var response []blobResponseItem if err := json.Unmarshal(rawData, &response); err != nil { From 04b9b373b6fb4e529c6a0b27d6fc847de97ee35d Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 7 May 2024 16:01:27 -0500 Subject: [PATCH 10/92] Block reexecutor should not try to reexecute genesis block --- blocks_reexecutor/blocks_reexecutor.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index bedea37776..0ad4337e0f 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -104,7 +104,8 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block end = start + rng } // Inclusive of block reexecution [start, end] - if start > 0 { + // Do not reexecute genesis block i,e chainStart + if start > 0 && start != chainStart { start-- } // Divide work equally among available threads From 836459d68a8021accf285e6e9a503e5266140209 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 9 May 2024 16:20:53 -0600 Subject: [PATCH 11/92] init: open database with separate wasm dir --- cmd/nitro/init.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index a45ec054a1..750bf03516 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -172,13 +172,13 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { if !config.Init.Force { - if readOnlyDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", 0, 0, "", "l2chaindata/", true); err == nil { + if readOnlyDb, err := stack.OpenDatabaseWithFreezerAndWasm("l2chaindata", "wasm", 0, 0, "", "l2chaindata/", true); err == nil { if chainConfig := gethexec.TryReadStoredChainConfig(readOnlyDb); chainConfig != nil { readOnlyDb.Close() if !arbmath.BigEquals(chainConfig.ChainID, chainId) { return nil, nil, fmt.Errorf("database has chain ID %v but config has chain ID %v (are you sure this database is for the right chain?)", chainConfig.ChainID, chainId) } - chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) + chainDb, err := stack.OpenDatabaseWithFreezerAndWasm("l2chaindata", "wasm", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) if err != nil { return chainDb, nil, err } @@ -230,7 +230,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo var initDataReader statetransfer.InitDataReader = nil - chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) + chainDb, err := stack.OpenDatabaseWithFreezerAndWasm("l2chaindata", "wasm", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) if err != nil { return chainDb, nil, err } From e03a347e055fe1995cf97621e0e57a91675ca42a Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 9 May 2024 16:22:05 -0600 Subject: [PATCH 12/92] program: recreate activated stylus entry if needed --- arbos/programs/native.go | 72 ++++++++++++++++++++++++++++++++++---- arbos/programs/programs.go | 8 ++++- arbos/programs/wasm.go | 5 +++ 3 files changed, 78 insertions(+), 7 deletions(-) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 123dda54ce..2fe4822fb6 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" @@ -53,6 +54,24 @@ func activateProgram( debug bool, burner burn.Burner, ) (*activationInfo, error) { + info, asm, module, err := activateProgramInternal(db, program, codehash, wasm, page_limit, version, debug, burner) + if err != nil { + return nil, err + } + db.ActivateWasm(info.moduleHash, asm, module) + return info, nil +} + +func activateProgramInternal( + db vm.StateDB, + program common.Address, + codehash common.Hash, + wasm []byte, + page_limit uint16, + version uint16, + debug bool, + burner burn.Burner, +) (*activationInfo, []byte, []byte, error) { output := &rustBytes{} asmLen := usize(0) moduleHash := &bytes32{} @@ -78,9 +97,9 @@ func activateProgram( log.Warn("activation failed", "err", err, "msg", msg, "program", program) } if errors.Is(err, vm.ErrExecutionReverted) { - return nil, fmt.Errorf("%w: %s", ErrProgramActivation, msg) + return nil, nil, nil, fmt.Errorf("%w: %s", ErrProgramActivation, msg) } - return nil, err + return nil, nil, nil, err } hash := moduleHash.toHash() @@ -95,13 +114,55 @@ func activateProgram( asmEstimate: uint32(stylusData.asm_estimate), footprint: uint16(stylusData.footprint), } - db.ActivateWasm(hash, asm, module) - return info, err + return info, asm, module, err +} + +func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, address common.Address, pagelimit uint16, time uint64, debugMode bool, program Program) ([]byte, error) { + localAsm, err := statedb.TryGetActivatedAsm(moduleHash) + if err == nil || len(localAsm) > 0 { + return localAsm, nil + } + + codeHash := statedb.GetCodeHash(address) + burner := burn.NewSystemBurner(nil, false) + + wasm, err := getWasm(statedb, address) + if err != nil { + return nil, err + } + + // we know program is activated, so it must be in correct version and not use too much memory + info, asm, module, err := activateProgramInternal(statedb, address, codeHash, wasm, pagelimit, program.version, debugMode, burner) + + if err != nil { + return nil, err + } + + if info.moduleHash != moduleHash { + return nil, errors.New("failed to re-activate program not found in database") + } + + currentHoursSince := hoursSinceArbitrum(time) + if currentHoursSince > program.activatedAt { + // stylus program is active on-chain, and was activated in the past + // so we store it directly to database + batch := statedb.Database().WasmStore().NewBatch() + rawdb.WriteActivation(batch, moduleHash, asm, module) + if err := batch.Write(); err != nil { + log.Error("failed writing re-activation to state", "address", address, "err", err) + } + } else { + // program activated recently, possibly in this eth_call + // store it to statedb. It will be stored to database if statedb is commited + statedb.ActivateWasm(info.moduleHash, asm, module) + } + return asm, nil } func callProgram( address common.Address, moduleHash common.Hash, + localAsm []byte, scope *vm.ScopeContext, interpreter *vm.EVMInterpreter, tracingInfo *util.TracingInfo, @@ -111,7 +172,6 @@ func callProgram( memoryModel *MemoryModel, ) ([]byte, error) { db := interpreter.Evm().StateDB - asm := db.GetActivatedAsm(moduleHash) debug := stylusParams.debugMode if db, ok := db.(*state.StateDB); ok { @@ -123,7 +183,7 @@ func callProgram( output := &rustBytes{} status := userStatus(C.stylus_call( - goSlice(asm), + goSlice(localAsm), goSlice(calldata), stylusParams.encode(), evmApi.cNative, diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 779f2d6c67..b277f5d678 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -205,6 +205,12 @@ func (p Programs) CallProgram( statedb.AddStylusPages(program.footprint) defer statedb.SetStylusPagesOpen(open) + localAsm, err := getLocalAsm(statedb, moduleHash, contract.Address(), params.PageLimit, evm.Context.Time, debugMode, program) + if err != nil { + log.Crit("failed to get local wasm for activated program", "program", contract.Address()) + return nil, err + } + evmData := &evmData{ blockBasefee: common.BigToHash(evm.Context.BaseFee), chainId: evm.ChainConfig().ChainID.Uint64(), @@ -227,7 +233,7 @@ func (p Programs) CallProgram( if contract.CodeAddr != nil { address = *contract.CodeAddr } - return callProgram(address, moduleHash, scope, interpreter, tracingInfo, calldata, evmData, goParams, model) + return callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model) } func getWasm(statedb vm.StateDB, program common.Address) ([]byte, error) { diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go index 77eb7e0f2f..105516dc62 100644 --- a/arbos/programs/wasm.go +++ b/arbos/programs/wasm.go @@ -128,9 +128,14 @@ func startProgram(module uint32) uint32 //go:wasmimport programs send_response func sendResponse(req_id uint32) uint32 +func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, address common.Address, pagelimit uint16, debugMode bool, program Program) ([]byte, error) { + return nil, nil +} + func callProgram( address common.Address, moduleHash common.Hash, + _localAsm []byte, scope *vm.ScopeContext, interpreter *vm.EVMInterpreter, tracingInfo *util.TracingInfo, From dc706bf5f48307951a5172205d6fe058dd5f9db4 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 9 May 2024 17:21:29 -0700 Subject: [PATCH 13/92] Increase default websocket size limit --- cmd/conf/chain.go | 13 ++++++------- util/rpcclient/rpcclient.go | 15 +++++++-------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index 8ad853e7aa..ab9a713287 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -20,13 +20,12 @@ type ParentChainConfig struct { } var L1ConnectionConfigDefault = rpcclient.ClientConfig{ - URL: "", - Retries: 2, - Timeout: time.Minute, - ConnectionWait: time.Minute, - ArgLogLimit: 2048, - // Use geth's unexported wsDefaultReadLimit from rpc/websocket.go - WebsocketMessageSizeLimit: 32 * 1024 * 1024, + URL: "", + Retries: 2, + Timeout: time.Minute, + ConnectionWait: time.Minute, + ArgLogLimit: 2048, + WebsocketMessageSizeLimit: 256 * 1024 * 1024, } var L1ConfigDefault = ParentChainConfig{ diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index cc6f11c986..56aebef396 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -49,17 +49,16 @@ type ClientConfigFetcher func() *ClientConfig var TestClientConfig = ClientConfig{ URL: "self", JWTSecret: "", - WebsocketMessageSizeLimit: 32 * 1024 * 1024, + WebsocketMessageSizeLimit: 256 * 1024 * 1024, } var DefaultClientConfig = ClientConfig{ - URL: "self-auth", - JWTSecret: "", - Retries: 3, - RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused", - ArgLogLimit: 2048, - // Use geth's unexported wsDefaultReadLimit from rpc/websocket.go - WebsocketMessageSizeLimit: 32 * 1024 * 1024, + URL: "self-auth", + JWTSecret: "", + Retries: 3, + RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused", + ArgLogLimit: 2048, + WebsocketMessageSizeLimit: 256 * 1024 * 1024, } func RPCClientAddOptions(prefix string, f *flag.FlagSet, defaultConfig *ClientConfig) { From a5b9853583e4947d0c1aed5a49778b8718b1dffc Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 9 May 2024 19:10:13 -0600 Subject: [PATCH 14/92] geth - support separate wasm --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 72f81daa8c..37b6489382 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 72f81daa8c59f044246b6e1f3eca08187edd7417 +Subproject commit 37b6489382bb884dd1216dcb0f6a224ce2ca5fe2 From c9d19bcdc0078dfe7321256bf35fb5085429c251 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 9 May 2024 19:32:32 -0600 Subject: [PATCH 15/92] wasm split: fix native --- arbos/programs/native.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 2fe4822fb6..b657472371 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -54,7 +54,7 @@ func activateProgram( debug bool, burner burn.Burner, ) (*activationInfo, error) { - info, asm, module, err := activateProgramInternal(db, program, codehash, wasm, page_limit, version, debug, burner) + info, asm, module, err := activateProgramInternal(db, program, codehash, wasm, page_limit, version, debug, burner.GasLeft()) if err != nil { return nil, err } @@ -70,7 +70,7 @@ func activateProgramInternal( page_limit uint16, version uint16, debug bool, - burner burn.Burner, + gasLeft *uint64, ) (*activationInfo, []byte, []byte, error) { output := &rustBytes{} asmLen := usize(0) @@ -88,7 +88,7 @@ func activateProgramInternal( &codeHash, moduleHash, stylusData, - (*u64)(burner.GasLeft()), + (*u64)(gasLeft), )) data, msg, err := status.toResult(output.intoBytes(), debug) @@ -124,15 +124,15 @@ func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, address common.Addr } codeHash := statedb.GetCodeHash(address) - burner := burn.NewSystemBurner(nil, false) wasm, err := getWasm(statedb, address) if err != nil { return nil, err } + unlimitedGas := uint64(0xffffffffffff) // we know program is activated, so it must be in correct version and not use too much memory - info, asm, module, err := activateProgramInternal(statedb, address, codeHash, wasm, pagelimit, program.version, debugMode, burner) + info, asm, module, err := activateProgramInternal(statedb, address, codeHash, wasm, pagelimit, program.version, debugMode, &unlimitedGas) if err != nil { return nil, err From 393d1d01db82bbbc660b34a222d9c73a18dd51a4 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 9 May 2024 19:32:56 -0600 Subject: [PATCH 16/92] test wasm split --- system_tests/common_test.go | 4 +- system_tests/program_test.go | 78 ++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 2 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index a9f655ff77..9d461bd48c 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -772,7 +772,7 @@ func createL2BlockChainWithStackConfig( stack, err = node.New(stackConfig) Require(t, err) - chainDb, err := stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + chainDb, err := stack.OpenDatabaseWithFreezerAndWasm("l2chaindata", "wasm", 0, 0, "ancient", "l2chaindata/", false) Require(t, err) arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) Require(t, err) @@ -976,7 +976,7 @@ func Create2ndNodeWithConfig( l2stack, err := node.New(stackConfig) Require(t, err) - l2chainDb, err := l2stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + l2chainDb, err := l2stack.OpenDatabaseWithFreezerAndWasm("l2chaindata", "wasm", 0, 0, "", "l2chaindata/", false) Require(t, err) l2arbDb, err := l2stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) Require(t, err) diff --git a/system_tests/program_test.go b/system_tests/program_test.go index b20efe0740..079b6c0818 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -1458,3 +1458,81 @@ func formatTime(duration time.Duration) string { } return fmt.Sprintf("%.2f%s", span, units[unit]) } + +func TestWasmRecreate(t *testing.T) { + builder, auth, cleanup := setupProgramTest(t, true) + ctx := builder.ctx + l2info := builder.L2Info + l2client := builder.L2.Client + defer cleanup() + + storage := deployWasm(t, ctx, auth, l2client, rustFile("storage")) + + zero := common.Hash{} + val := common.HexToHash("0x121233445566") + + // do an onchain call - store value + storeTx := l2info.PrepareTxTo("Owner", &storage, l2info.TransferGas, nil, argsForStorageWrite(zero, val)) + Require(t, l2client.SendTransaction(ctx, storeTx)) + _, err := EnsureTxSucceeded(ctx, l2client, storeTx) + Require(t, err) + + testDir := t.TempDir() + nodeBStack := createStackConfigForTest(testDir) + nodeB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{stackConfig: nodeBStack}) + + _, err = EnsureTxSucceeded(ctx, nodeB.Client, storeTx) + Require(t, err) + + // make sure reading 2nd value succeeds from 2nd node + loadTx := l2info.PrepareTxTo("Owner", &storage, l2info.TransferGas, nil, argsForStorageRead(zero)) + result, err := arbutil.SendTxAsCall(ctx, nodeB.Client, loadTx, l2info.GetAddress("Owner"), nil, true) + Require(t, err) + if common.BytesToHash(result) != val { + Fatal(t, "got wrong value") + } + // close nodeB + cleanupB() + + // delete wasm dir of nodeB + + wasmPath := filepath.Join(testDir, "system_tests.test", "wasm") + dirContents, err := os.ReadDir(wasmPath) + Require(t, err) + if len(dirContents) == 0 { + Fatal(t, "not contents found before delete") + } + os.RemoveAll(wasmPath) + + // recreate nodeB - using same source dir (wasm deleted) + nodeB, cleanupB = builder.Build2ndNode(t, &SecondNodeParams{stackConfig: nodeBStack}) + + // test nodeB - sees existing transaction + _, err = EnsureTxSucceeded(ctx, nodeB.Client, storeTx) + Require(t, err) + + // test nodeB - answers eth_call (requires reloading wasm) + result, err = arbutil.SendTxAsCall(ctx, nodeB.Client, loadTx, l2info.GetAddress("Owner"), nil, true) + Require(t, err) + if common.BytesToHash(result) != val { + Fatal(t, "got wrong value") + } + + // send new tx (requires wasm) and check nodeB sees it as well + Require(t, l2client.SendTransaction(ctx, loadTx)) + + _, err = EnsureTxSucceeded(ctx, l2client, loadTx) + Require(t, err) + + _, err = EnsureTxSucceeded(ctx, nodeB.Client, loadTx) + Require(t, err) + + cleanupB() + dirContents, err = os.ReadDir(wasmPath) + Require(t, err) + if len(dirContents) == 0 { + Fatal(t, "not contents found before delete") + } + os.RemoveAll(wasmPath) + +} From d56f862d7a9038048c335caa502dbb5d12422805 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 9 May 2024 19:45:47 -0600 Subject: [PATCH 17/92] wasm split: fix wasm mplementation --- arbos/programs/wasm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go index 105516dc62..8a67babc16 100644 --- a/arbos/programs/wasm.go +++ b/arbos/programs/wasm.go @@ -128,7 +128,7 @@ func startProgram(module uint32) uint32 //go:wasmimport programs send_response func sendResponse(req_id uint32) uint32 -func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, address common.Address, pagelimit uint16, debugMode bool, program Program) ([]byte, error) { +func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, address common.Address, pagelimit uint16, time uint64, debugMode bool, program Program) ([]byte, error) { return nil, nil } From b744328c6b4c01434a528d8e93f53eba15dc75ac Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 10 May 2024 00:51:55 -0500 Subject: [PATCH 18/92] Assume stake is elevated if currentRequiredStake reverts --- staker/l1_validator.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/staker/l1_validator.go b/staker/l1_validator.go index deaf4dc2dc..d68365ede0 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -12,6 +12,7 @@ import ( "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/validator" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -187,12 +188,16 @@ func (v *L1Validator) resolveNextNode(ctx context.Context, info *StakerInfo, lat func (v *L1Validator) isRequiredStakeElevated(ctx context.Context) (bool, error) { callOpts := v.getCallOpts(ctx) - requiredStake, err := v.rollup.CurrentRequiredStake(callOpts) + baseStake, err := v.rollup.BaseStake(callOpts) if err != nil { return false, err } - baseStake, err := v.rollup.BaseStake(callOpts) + requiredStake, err := v.rollup.CurrentRequiredStake(callOpts) if err != nil { + if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) { + log.Warn("execution reverted checking if required state is elevated; assuming elevated", "err", err) + return true, nil + } return false, err } return requiredStake.Cmp(baseStake) > 0, nil From 3ab3acffc33747955ece2ec6fb721b507001fbea Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 08:47:24 -0600 Subject: [PATCH 19/92] wasm split: getLocalAsm fixes --- arbos/programs/native.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index b657472371..e1b464b39f 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -119,7 +119,7 @@ func activateProgramInternal( func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, address common.Address, pagelimit uint16, time uint64, debugMode bool, program Program) ([]byte, error) { localAsm, err := statedb.TryGetActivatedAsm(moduleHash) - if err == nil || len(localAsm) > 0 { + if err == nil && len(localAsm) > 0 { return localAsm, nil } @@ -127,19 +127,21 @@ func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, address common.Addr wasm, err := getWasm(statedb, address) if err != nil { - return nil, err + log.Error("Failed to reactivate program: getWasm", "address", address, "expected moduleHash", moduleHash, "err", err) + return nil, fmt.Errorf("failed to reactivate program address: %v err: %w", address, err) } unlimitedGas := uint64(0xffffffffffff) // we know program is activated, so it must be in correct version and not use too much memory info, asm, module, err := activateProgramInternal(statedb, address, codeHash, wasm, pagelimit, program.version, debugMode, &unlimitedGas) - if err != nil { - return nil, err + log.Error("failed to reactivate program", "address", address, "expected moduleHash", moduleHash, "err", err) + return nil, fmt.Errorf("failed to reactivate program address: %v err: %w", address, err) } if info.moduleHash != moduleHash { - return nil, errors.New("failed to re-activate program not found in database") + log.Error("failed to reactivate program", "address", address, "expected moduleHash", moduleHash, "got", info.moduleHash) + return nil, fmt.Errorf("failed to reactivate program. address: %v, expected ModuleHash: %v", address, moduleHash) } currentHoursSince := hoursSinceArbitrum(time) From 8d903c5997dec617961212271ca20bd9f2578603 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 2 May 2024 15:34:55 -0300 Subject: [PATCH 20/92] mv MessageWithMetadataAndBlockHash to arbostypes --- arbnode/transaction_streamer.go | 14 +++++++------- arbos/arbostypes/messagewithmeta.go | 5 +++++ broadcaster/broadcaster.go | 7 +------ 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 0d5ae829b0..b8b35186b2 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -465,9 +465,9 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde return err } - messagesWithBlockHash := make([]broadcaster.MessageWithMetadataAndBlockHash, 0, len(messagesResults)) + messagesWithBlockHash := make([]arbostypes.MessageWithMetadataAndBlockHash, 0, len(messagesResults)) for i := 0; i < len(messagesResults); i++ { - messagesWithBlockHash = append(messagesWithBlockHash, broadcaster.MessageWithMetadataAndBlockHash{ + messagesWithBlockHash = append(messagesWithBlockHash, arbostypes.MessageWithMetadataAndBlockHash{ Message: newMessages[i], BlockHash: &messagesResults[i].BlockHash, }) @@ -1011,11 +1011,11 @@ func (s *TransactionStreamer) WriteMessageFromSequencer( return err } - msgWithBlockHash := broadcaster.MessageWithMetadataAndBlockHash{ + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ Message: msgWithMeta, BlockHash: &msgResult.BlockHash, } - s.broadcastMessages([]broadcaster.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) + s.broadcastMessages([]arbostypes.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) return nil } @@ -1046,7 +1046,7 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty } func (s *TransactionStreamer) broadcastMessages( - msgs []broadcaster.MessageWithMetadataAndBlockHash, + msgs []arbostypes.MessageWithMetadataAndBlockHash, pos arbutil.MessageIndex, ) { if s.broadcastServer == nil { @@ -1145,11 +1145,11 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution return false } - msgWithBlockHash := broadcaster.MessageWithMetadataAndBlockHash{ + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ Message: *msg, BlockHash: &msgResult.BlockHash, } - s.broadcastMessages([]broadcaster.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) + s.broadcastMessages([]arbostypes.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) return pos+1 < msgCount } diff --git a/arbos/arbostypes/messagewithmeta.go b/arbos/arbostypes/messagewithmeta.go index a3d4f5e3c3..e1215e0dd5 100644 --- a/arbos/arbostypes/messagewithmeta.go +++ b/arbos/arbostypes/messagewithmeta.go @@ -18,6 +18,11 @@ type MessageWithMetadata struct { DelayedMessagesRead uint64 `json:"delayedMessagesRead"` } +type MessageWithMetadataAndBlockHash struct { + Message MessageWithMetadata + BlockHash *common.Hash +} + var EmptyTestMessageWithMetadata = MessageWithMetadata{ Message: &EmptyTestIncomingMessage, } diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index ac5c6c39da..da1de6665e 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -22,11 +22,6 @@ import ( "github.com/offchainlabs/nitro/wsbroadcastserver" ) -type MessageWithMetadataAndBlockHash struct { - Message arbostypes.MessageWithMetadata - BlockHash *common.Hash -} - type Broadcaster struct { server *wsbroadcastserver.WSBroadcastServer backlog backlog.Backlog @@ -98,7 +93,7 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { } func (b *Broadcaster) BroadcastMessages( - messagesWithBlockHash []MessageWithMetadataAndBlockHash, + messagesWithBlockHash []arbostypes.MessageWithMetadataAndBlockHash, seq arbutil.MessageIndex, ) (err error) { defer func() { From 1f99ca984c2c3ac9c6f0af11680fe3ee756a7746 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 2 May 2024 17:09:03 -0300 Subject: [PATCH 21/92] pass MessageWithMetadataAndBlockHashes to writeMessages --- arbnode/inbox_test.go | 20 ++++--- arbnode/inbox_tracker.go | 9 ++- arbnode/seq_coordinator.go | 7 ++- arbnode/transaction_streamer.go | 77 ++++++++++++++----------- execution/gethexec/executionengine.go | 6 +- execution/gethexec/node.go | 2 +- execution/interface.go | 2 +- system_tests/contract_tx_test.go | 26 +++++---- system_tests/reorg_resequencing_test.go | 8 ++- system_tests/seq_coordinator_test.go | 5 +- 10 files changed, 92 insertions(+), 70 deletions(-) diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go index 5c879743a4..fbd1dba96a 100644 --- a/arbnode/inbox_test.go +++ b/arbnode/inbox_test.go @@ -128,7 +128,7 @@ func TestTransactionStreamer(t *testing.T) { } state.balances = newBalances - var messages []arbostypes.MessageWithMetadata + var messages []arbostypes.MessageWithMetadataAndBlockHash // TODO replay a random amount of messages too numMessages := rand.Int() % 5 for j := 0; j < numMessages; j++ { @@ -154,16 +154,18 @@ func TestTransactionStreamer(t *testing.T) { l2Message = append(l2Message, arbmath.U256Bytes(value)...) var requestId common.Hash binary.BigEndian.PutUint64(requestId.Bytes()[:8], uint64(i)) - messages = append(messages, arbostypes.MessageWithMetadata{ - Message: &arbostypes.L1IncomingMessage{ - Header: &arbostypes.L1IncomingMessageHeader{ - Kind: arbostypes.L1MessageType_L2Message, - Poster: source, - RequestId: &requestId, + messages = append(messages, arbostypes.MessageWithMetadataAndBlockHash{ + Message: arbostypes.MessageWithMetadata{ + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_L2Message, + Poster: source, + RequestId: &requestId, + }, + L2msg: l2Message, }, - L2msg: l2Message, + DelayedMessagesRead: 1, }, - DelayedMessagesRead: 1, }) state.balances[source].Sub(state.balances[source], value) if state.balances[dest] == nil { diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index ba1b875ec8..e2aa1d5e74 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -652,7 +652,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L pos++ } - var messages []arbostypes.MessageWithMetadata + var messages []arbostypes.MessageWithMetadataAndBlockHash backend := &multiplexerBackend{ batchSeqNum: batches[0].SequenceNumber, batches: batches, @@ -673,7 +673,10 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L if err != nil { return err } - messages = append(messages, *msg) + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ + Message: *msg, + } + messages = append(messages, msgWithBlockHash) batchMessageCounts[batchSeqNum] = currentpos currentpos += 1 } @@ -733,7 +736,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L } var latestTimestamp uint64 if len(messages) > 0 { - latestTimestamp = messages[len(messages)-1].Message.Header.Timestamp + latestTimestamp = messages[len(messages)-1].Message.Message.Header.Timestamp } log.Info( "InboxTracker", diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index ecf38ddf42..0a27d89d40 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -533,7 +533,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { if readUntil > localMsgCount+c.config.MsgPerPoll { readUntil = localMsgCount + c.config.MsgPerPoll } - var messages []arbostypes.MessageWithMetadata + var messages []arbostypes.MessageWithMetadataAndBlockHash msgToRead := localMsgCount var msgReadErr error for msgToRead < readUntil { @@ -592,7 +592,10 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { DelayedMessagesRead: lastDelayedMsg, } } - messages = append(messages, message) + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ + Message: message, + } + messages = append(messages, msgWithBlockHash) msgToRead++ } if len(messages) > 0 { diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index b8b35186b2..411eba965d 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -60,7 +60,7 @@ type TransactionStreamer struct { nextAllowedFeedReorgLog time.Time - broadcasterQueuedMessages []arbostypes.MessageWithMetadata + broadcasterQueuedMessages []arbostypes.MessageWithMetadataAndBlockHash broadcasterQueuedMessagesPos uint64 broadcasterQueuedMessagesActiveReorg bool @@ -371,7 +371,7 @@ func deleteFromRange(ctx context.Context, db ethdb.Database, prefix []byte, star // The insertion mutex must be held. This acquires the reorg mutex. // Note: oldMessages will be empty if reorgHook is nil -func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata) error { +func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadataAndBlockHash) error { if count == 0 { return errors.New("cannot reorg out init message") } @@ -465,14 +465,14 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde return err } - messagesWithBlockHash := make([]arbostypes.MessageWithMetadataAndBlockHash, 0, len(messagesResults)) + messagesWithComputedBlockHash := make([]arbostypes.MessageWithMetadataAndBlockHash, 0, len(messagesResults)) for i := 0; i < len(messagesResults); i++ { - messagesWithBlockHash = append(messagesWithBlockHash, arbostypes.MessageWithMetadataAndBlockHash{ - Message: newMessages[i], + messagesWithComputedBlockHash = append(messagesWithComputedBlockHash, arbostypes.MessageWithMetadataAndBlockHash{ + Message: newMessages[i].Message, BlockHash: &messagesResults[i].BlockHash, }) } - s.broadcastMessages(messagesWithBlockHash, count) + s.broadcastMessages(messagesWithComputedBlockHash, count) if s.validator != nil { err = s.validator.Reorg(s.GetContext(), count) @@ -555,7 +555,7 @@ func (s *TransactionStreamer) GetProcessedMessageCount() (arbutil.MessageIndex, return msgCount, nil } -func (s *TransactionStreamer) AddMessages(pos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadata) error { +func (s *TransactionStreamer) AddMessages(pos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadataAndBlockHash) error { return s.AddMessagesAndEndBatch(pos, messagesAreConfirmed, messages, nil) } @@ -579,7 +579,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe return nil } broadcastStartPos := feedMessages[0].SequenceNumber - var messages []arbostypes.MessageWithMetadata + var messages []arbostypes.MessageWithMetadataAndBlockHash broadcastAfterPos := broadcastStartPos for _, feedMessage := range feedMessages { if broadcastAfterPos != feedMessage.SequenceNumber { @@ -588,7 +588,11 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe if feedMessage.Message.Message == nil || feedMessage.Message.Message.Header == nil { return fmt.Errorf("invalid feed message at sequence number %v", feedMessage.SequenceNumber) } - messages = append(messages, feedMessage.Message) + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ + Message: feedMessage.Message, + BlockHash: feedMessage.BlockHash, + } + messages = append(messages, msgWithBlockHash) broadcastAfterPos++ } @@ -607,7 +611,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe messages = messages[dups:] broadcastStartPos += arbutil.MessageIndex(dups) if oldMsg != nil { - s.logReorg(broadcastStartPos, oldMsg, &messages[0], false) + s.logReorg(broadcastStartPos, oldMsg, &messages[0].Message, false) } if len(messages) == 0 { // No new messages received @@ -681,16 +685,19 @@ func (s *TransactionStreamer) AddFakeInitMessage() error { } chainIdBytes := arbmath.U256Bytes(s.chainConfig.ChainID) msg := append(append(chainIdBytes, 0), chainConfigJson...) - return s.AddMessages(0, false, []arbostypes.MessageWithMetadata{{ - Message: &arbostypes.L1IncomingMessage{ - Header: &arbostypes.L1IncomingMessageHeader{ - Kind: arbostypes.L1MessageType_Initialize, - RequestId: &common.Hash{}, - L1BaseFee: common.Big0, + return s.AddMessages(0, false, []arbostypes.MessageWithMetadataAndBlockHash{{ + Message: arbostypes.MessageWithMetadata{ + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_Initialize, + RequestId: &common.Hash{}, + L1BaseFee: common.Big0, + }, + L2msg: msg, }, - L2msg: msg, + DelayedMessagesRead: 1, }, - DelayedMessagesRead: 1, + BlockHash: nil, }}) } @@ -708,7 +715,7 @@ func endBatch(batch ethdb.Batch) error { return batch.Write() } -func (s *TransactionStreamer) AddMessagesAndEndBatch(pos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { +func (s *TransactionStreamer) AddMessagesAndEndBatch(pos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadataAndBlockHash, batch ethdb.Batch) error { if messagesAreConfirmed { // Trim confirmed messages from l1pricedataCache s.TrimCache(pos + arbutil.MessageIndex(len(messages))) @@ -748,7 +755,7 @@ func (s *TransactionStreamer) getPrevPrevDelayedRead(pos arbutil.MessageIndex) ( func (s *TransactionStreamer) countDuplicateMessages( pos arbutil.MessageIndex, - messages []arbostypes.MessageWithMetadata, + messages []arbostypes.MessageWithMetadataAndBlockHash, batch *ethdb.Batch, ) (int, bool, *arbostypes.MessageWithMetadata, error) { curMsg := 0 @@ -768,7 +775,7 @@ func (s *TransactionStreamer) countDuplicateMessages( if err != nil { return 0, false, nil, err } - nextMessage := messages[curMsg] + nextMessage := messages[curMsg].Message wantMessage, err := rlp.EncodeToBytes(nextMessage) if err != nil { return 0, false, nil, err @@ -842,7 +849,7 @@ func (s *TransactionStreamer) logReorg(pos arbutil.MessageIndex, dbMsg *arbostyp } -func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { +func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadataAndBlockHash, batch ethdb.Batch) error { var confirmedReorg bool var oldMsg *arbostypes.MessageWithMetadata var lastDelayedRead uint64 @@ -860,7 +867,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return err } if duplicates > 0 { - lastDelayedRead = messages[duplicates-1].DelayedMessagesRead + lastDelayedRead = messages[duplicates-1].Message.DelayedMessagesRead messages = messages[duplicates:] messageStartPos += arbutil.MessageIndex(duplicates) } @@ -898,13 +905,13 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return err } if duplicates > 0 { - lastDelayedRead = messages[duplicates-1].DelayedMessagesRead + lastDelayedRead = messages[duplicates-1].Message.DelayedMessagesRead messages = messages[duplicates:] messageStartPos += arbutil.MessageIndex(duplicates) } } if oldMsg != nil { - s.logReorg(messageStartPos, oldMsg, &messages[0], confirmedReorg) + s.logReorg(messageStartPos, oldMsg, &messages[0].Message, confirmedReorg) } if feedReorg { @@ -924,12 +931,12 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil // Validate delayed message counts of remaining messages for i, msg := range messages { msgPos := messageStartPos + arbutil.MessageIndex(i) - diff := msg.DelayedMessagesRead - lastDelayedRead + diff := msg.Message.DelayedMessagesRead - lastDelayedRead if diff != 0 && diff != 1 { - return fmt.Errorf("attempted to insert jump from %v delayed messages read to %v delayed messages read at message index %v", lastDelayedRead, msg.DelayedMessagesRead, msgPos) + return fmt.Errorf("attempted to insert jump from %v delayed messages read to %v delayed messages read at message index %v", lastDelayedRead, msg.Message.DelayedMessagesRead, msgPos) } - lastDelayedRead = msg.DelayedMessagesRead - if msg.Message == nil { + lastDelayedRead = msg.Message.DelayedMessagesRead + if msg.Message.Message == nil { return fmt.Errorf("attempted to insert nil message at position %v", msgPos) } } @@ -1007,14 +1014,14 @@ func (s *TransactionStreamer) WriteMessageFromSequencer( } } - if err := s.writeMessages(pos, []arbostypes.MessageWithMetadata{msgWithMeta}, nil); err != nil { - return err - } - msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ Message: msgWithMeta, BlockHash: &msgResult.BlockHash, } + + if err := s.writeMessages(pos, []arbostypes.MessageWithMetadataAndBlockHash{msgWithBlockHash}, nil); err != nil { + return err + } s.broadcastMessages([]arbostypes.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) return nil @@ -1059,12 +1066,12 @@ func (s *TransactionStreamer) broadcastMessages( // The mutex must be held, and pos must be the latest message count. // `batch` may be nil, which initializes a new batch. The batch is closed out in this function. -func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { +func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages []arbostypes.MessageWithMetadataAndBlockHash, batch ethdb.Batch) error { if batch == nil { batch = s.db.NewBatch() } for i, msg := range messages { - err := s.writeMessage(pos+arbutil.MessageIndex(i), msg, batch) + err := s.writeMessage(pos+arbutil.MessageIndex(i), msg.Message, batch) if err != nil { return err } diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 38569f44ab..c4fbc04712 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -116,7 +116,7 @@ func (s *ExecutionEngine) GetBatchFetcher() execution.BatchFetcher { return s.consensus } -func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { +func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadataAndBlockHash, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { if count == 0 { return nil, errors.New("cannot reorg out genesis") } @@ -149,9 +149,9 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost for i := range newMessages { var msgForPrefetch *arbostypes.MessageWithMetadata if i < len(newMessages)-1 { - msgForPrefetch = &newMessages[i] + msgForPrefetch = &newMessages[i].Message } - msgResult, err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i], msgForPrefetch) + msgResult, err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i].Message, msgForPrefetch) if err != nil { return nil, err } diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index ae76b88530..458d6601c5 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -346,7 +346,7 @@ func (n *ExecutionNode) StopAndWait() { func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*execution.MessageResult, error) { return n.ExecEngine.DigestMessage(num, msg, msgForPrefetch) } -func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { +func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadataAndBlockHash, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { return n.ExecEngine.Reorg(count, newMessages, oldMessages) } func (n *ExecutionNode) HeadMessageNumber() (arbutil.MessageIndex, error) { diff --git a/execution/interface.go b/execution/interface.go index d2a5b58fe5..66aefe9a5e 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -31,7 +31,7 @@ var ErrSequencerInsertLockTaken = errors.New("insert lock taken") // always needed type ExecutionClient interface { DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*MessageResult, error) - Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*MessageResult, error) + Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadataAndBlockHash, oldMessages []*arbostypes.MessageWithMetadata) ([]*MessageResult, error) HeadMessageNumber() (arbutil.MessageIndex, error) HeadMessageNumberSync(t *testing.T) (arbutil.MessageIndex, error) ResultAtPos(pos arbutil.MessageIndex) (*MessageResult, error) diff --git a/system_tests/contract_tx_test.go b/system_tests/contract_tx_test.go index 7d66e516b4..d0f7b153f3 100644 --- a/system_tests/contract_tx_test.go +++ b/system_tests/contract_tx_test.go @@ -69,21 +69,23 @@ func TestContractTxDeploy(t *testing.T) { l2Msg = append(l2Msg, arbmath.U256Bytes(contractTx.Value)...) l2Msg = append(l2Msg, contractTx.Data...) - err = builder.L2.ConsensusNode.TxStreamer.AddMessages(pos, true, []arbostypes.MessageWithMetadata{ + err = builder.L2.ConsensusNode.TxStreamer.AddMessages(pos, true, []arbostypes.MessageWithMetadataAndBlockHash{ { - Message: &arbostypes.L1IncomingMessage{ - Header: &arbostypes.L1IncomingMessageHeader{ - Kind: arbostypes.L1MessageType_L2Message, - Poster: from, - BlockNumber: 0, - Timestamp: 0, - RequestId: &contractTx.RequestId, - L1BaseFee: &big.Int{}, + Message: arbostypes.MessageWithMetadata{ + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_L2Message, + Poster: from, + BlockNumber: 0, + Timestamp: 0, + RequestId: &contractTx.RequestId, + L1BaseFee: &big.Int{}, + }, + L2msg: l2Msg, + BatchGasCost: new(uint64), }, - L2msg: l2Msg, - BatchGasCost: new(uint64), + DelayedMessagesRead: delayedMessagesRead, }, - DelayedMessagesRead: delayedMessagesRead, }, }) Require(t, err) diff --git a/system_tests/reorg_resequencing_test.go b/system_tests/reorg_resequencing_test.go index b188504acb..6d5ecd5e6a 100644 --- a/system_tests/reorg_resequencing_test.go +++ b/system_tests/reorg_resequencing_test.go @@ -72,9 +72,11 @@ func TestReorgResequencing(t *testing.T) { }, L2msg: append(builder.L2Info.GetAddress("User4").Bytes(), arbmath.Uint64ToU256Bytes(params.Ether)...), } - err = builder.L2.ConsensusNode.TxStreamer.AddMessages(startMsgCount, true, []arbostypes.MessageWithMetadata{{ - Message: newMessage, - DelayedMessagesRead: prevMessage.DelayedMessagesRead + 1, + err = builder.L2.ConsensusNode.TxStreamer.AddMessages(startMsgCount, true, []arbostypes.MessageWithMetadataAndBlockHash{{ + Message: arbostypes.MessageWithMetadata{ + Message: newMessage, + DelayedMessagesRead: prevMessage.DelayedMessagesRead + 1, + }, }}) Require(t, err) diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 886a0528c7..5e539a8812 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -91,7 +91,10 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { return false } Require(t, err) - Require(t, node.TxStreamer.AddMessages(curMsgs, false, []arbostypes.MessageWithMetadata{emptyMessage})) + emptyMessageWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ + Message: emptyMessage, + } + Require(t, node.TxStreamer.AddMessages(curMsgs, false, []arbostypes.MessageWithMetadataAndBlockHash{emptyMessageWithBlockHash})) return true } From bf3c9602d822c50277b705c46e22fc151e72fa78 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 2 May 2024 17:21:20 -0300 Subject: [PATCH 22/92] rename Message to MessageWithMeta in MessageWithMetadataAndBlockHash --- arbnode/inbox_test.go | 2 +- arbnode/inbox_tracker.go | 4 +-- arbnode/seq_coordinator.go | 2 +- arbnode/transaction_streamer.go | 38 ++++++++++++------------- arbos/arbostypes/messagewithmeta.go | 4 +-- broadcaster/broadcaster.go | 2 +- execution/gethexec/executionengine.go | 4 +-- system_tests/contract_tx_test.go | 2 +- system_tests/reorg_resequencing_test.go | 2 +- system_tests/seq_coordinator_test.go | 2 +- 10 files changed, 31 insertions(+), 31 deletions(-) diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go index fbd1dba96a..a5d1554cb1 100644 --- a/arbnode/inbox_test.go +++ b/arbnode/inbox_test.go @@ -155,7 +155,7 @@ func TestTransactionStreamer(t *testing.T) { var requestId common.Hash binary.BigEndian.PutUint64(requestId.Bytes()[:8], uint64(i)) messages = append(messages, arbostypes.MessageWithMetadataAndBlockHash{ - Message: arbostypes.MessageWithMetadata{ + MessageWithMeta: arbostypes.MessageWithMetadata{ Message: &arbostypes.L1IncomingMessage{ Header: &arbostypes.L1IncomingMessageHeader{ Kind: arbostypes.L1MessageType_L2Message, diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index e2aa1d5e74..2340df8303 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -674,7 +674,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L return err } msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ - Message: *msg, + MessageWithMeta: *msg, } messages = append(messages, msgWithBlockHash) batchMessageCounts[batchSeqNum] = currentpos @@ -736,7 +736,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L } var latestTimestamp uint64 if len(messages) > 0 { - latestTimestamp = messages[len(messages)-1].Message.Message.Header.Timestamp + latestTimestamp = messages[len(messages)-1].MessageWithMeta.Message.Header.Timestamp } log.Info( "InboxTracker", diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index 0a27d89d40..2fb8c3244b 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -593,7 +593,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { } } msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ - Message: message, + MessageWithMeta: message, } messages = append(messages, msgWithBlockHash) msgToRead++ diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 411eba965d..708dcff41b 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -468,8 +468,8 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde messagesWithComputedBlockHash := make([]arbostypes.MessageWithMetadataAndBlockHash, 0, len(messagesResults)) for i := 0; i < len(messagesResults); i++ { messagesWithComputedBlockHash = append(messagesWithComputedBlockHash, arbostypes.MessageWithMetadataAndBlockHash{ - Message: newMessages[i].Message, - BlockHash: &messagesResults[i].BlockHash, + MessageWithMeta: newMessages[i].MessageWithMeta, + BlockHash: &messagesResults[i].BlockHash, }) } s.broadcastMessages(messagesWithComputedBlockHash, count) @@ -589,8 +589,8 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe return fmt.Errorf("invalid feed message at sequence number %v", feedMessage.SequenceNumber) } msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ - Message: feedMessage.Message, - BlockHash: feedMessage.BlockHash, + MessageWithMeta: feedMessage.Message, + BlockHash: feedMessage.BlockHash, } messages = append(messages, msgWithBlockHash) broadcastAfterPos++ @@ -611,7 +611,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe messages = messages[dups:] broadcastStartPos += arbutil.MessageIndex(dups) if oldMsg != nil { - s.logReorg(broadcastStartPos, oldMsg, &messages[0].Message, false) + s.logReorg(broadcastStartPos, oldMsg, &messages[0].MessageWithMeta, false) } if len(messages) == 0 { // No new messages received @@ -686,7 +686,7 @@ func (s *TransactionStreamer) AddFakeInitMessage() error { chainIdBytes := arbmath.U256Bytes(s.chainConfig.ChainID) msg := append(append(chainIdBytes, 0), chainConfigJson...) return s.AddMessages(0, false, []arbostypes.MessageWithMetadataAndBlockHash{{ - Message: arbostypes.MessageWithMetadata{ + MessageWithMeta: arbostypes.MessageWithMetadata{ Message: &arbostypes.L1IncomingMessage{ Header: &arbostypes.L1IncomingMessageHeader{ Kind: arbostypes.L1MessageType_Initialize, @@ -775,7 +775,7 @@ func (s *TransactionStreamer) countDuplicateMessages( if err != nil { return 0, false, nil, err } - nextMessage := messages[curMsg].Message + nextMessage := messages[curMsg].MessageWithMeta wantMessage, err := rlp.EncodeToBytes(nextMessage) if err != nil { return 0, false, nil, err @@ -867,7 +867,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return err } if duplicates > 0 { - lastDelayedRead = messages[duplicates-1].Message.DelayedMessagesRead + lastDelayedRead = messages[duplicates-1].MessageWithMeta.DelayedMessagesRead messages = messages[duplicates:] messageStartPos += arbutil.MessageIndex(duplicates) } @@ -905,13 +905,13 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return err } if duplicates > 0 { - lastDelayedRead = messages[duplicates-1].Message.DelayedMessagesRead + lastDelayedRead = messages[duplicates-1].MessageWithMeta.DelayedMessagesRead messages = messages[duplicates:] messageStartPos += arbutil.MessageIndex(duplicates) } } if oldMsg != nil { - s.logReorg(messageStartPos, oldMsg, &messages[0].Message, confirmedReorg) + s.logReorg(messageStartPos, oldMsg, &messages[0].MessageWithMeta, confirmedReorg) } if feedReorg { @@ -931,12 +931,12 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil // Validate delayed message counts of remaining messages for i, msg := range messages { msgPos := messageStartPos + arbutil.MessageIndex(i) - diff := msg.Message.DelayedMessagesRead - lastDelayedRead + diff := msg.MessageWithMeta.DelayedMessagesRead - lastDelayedRead if diff != 0 && diff != 1 { - return fmt.Errorf("attempted to insert jump from %v delayed messages read to %v delayed messages read at message index %v", lastDelayedRead, msg.Message.DelayedMessagesRead, msgPos) + return fmt.Errorf("attempted to insert jump from %v delayed messages read to %v delayed messages read at message index %v", lastDelayedRead, msg.MessageWithMeta.DelayedMessagesRead, msgPos) } - lastDelayedRead = msg.Message.DelayedMessagesRead - if msg.Message.Message == nil { + lastDelayedRead = msg.MessageWithMeta.DelayedMessagesRead + if msg.MessageWithMeta.Message == nil { return fmt.Errorf("attempted to insert nil message at position %v", msgPos) } } @@ -1015,8 +1015,8 @@ func (s *TransactionStreamer) WriteMessageFromSequencer( } msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ - Message: msgWithMeta, - BlockHash: &msgResult.BlockHash, + MessageWithMeta: msgWithMeta, + BlockHash: &msgResult.BlockHash, } if err := s.writeMessages(pos, []arbostypes.MessageWithMetadataAndBlockHash{msgWithBlockHash}, nil); err != nil { @@ -1071,7 +1071,7 @@ func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages [ batch = s.db.NewBatch() } for i, msg := range messages { - err := s.writeMessage(pos+arbutil.MessageIndex(i), msg.Message, batch) + err := s.writeMessage(pos+arbutil.MessageIndex(i), msg.MessageWithMeta, batch) if err != nil { return err } @@ -1153,8 +1153,8 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution } msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ - Message: *msg, - BlockHash: &msgResult.BlockHash, + MessageWithMeta: *msg, + BlockHash: &msgResult.BlockHash, } s.broadcastMessages([]arbostypes.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) diff --git a/arbos/arbostypes/messagewithmeta.go b/arbos/arbostypes/messagewithmeta.go index e1215e0dd5..79b7c4f9d2 100644 --- a/arbos/arbostypes/messagewithmeta.go +++ b/arbos/arbostypes/messagewithmeta.go @@ -19,8 +19,8 @@ type MessageWithMetadata struct { } type MessageWithMetadataAndBlockHash struct { - Message MessageWithMetadata - BlockHash *common.Hash + MessageWithMeta MessageWithMetadata + BlockHash *common.Hash } var EmptyTestMessageWithMetadata = MessageWithMetadata{ diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index da1de6665e..ba95f2d8af 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -104,7 +104,7 @@ func (b *Broadcaster) BroadcastMessages( }() var feedMessages []*m.BroadcastFeedMessage for i, msg := range messagesWithBlockHash { - bfm, err := b.NewBroadcastFeedMessage(msg.Message, seq+arbutil.MessageIndex(i), msg.BlockHash) + bfm, err := b.NewBroadcastFeedMessage(msg.MessageWithMeta, seq+arbutil.MessageIndex(i), msg.BlockHash) if err != nil { return err } diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index c4fbc04712..b31209b882 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -149,9 +149,9 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost for i := range newMessages { var msgForPrefetch *arbostypes.MessageWithMetadata if i < len(newMessages)-1 { - msgForPrefetch = &newMessages[i].Message + msgForPrefetch = &newMessages[i].MessageWithMeta } - msgResult, err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i].Message, msgForPrefetch) + msgResult, err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i].MessageWithMeta, msgForPrefetch) if err != nil { return nil, err } diff --git a/system_tests/contract_tx_test.go b/system_tests/contract_tx_test.go index d0f7b153f3..c4ae326df1 100644 --- a/system_tests/contract_tx_test.go +++ b/system_tests/contract_tx_test.go @@ -71,7 +71,7 @@ func TestContractTxDeploy(t *testing.T) { err = builder.L2.ConsensusNode.TxStreamer.AddMessages(pos, true, []arbostypes.MessageWithMetadataAndBlockHash{ { - Message: arbostypes.MessageWithMetadata{ + MessageWithMeta: arbostypes.MessageWithMetadata{ Message: &arbostypes.L1IncomingMessage{ Header: &arbostypes.L1IncomingMessageHeader{ Kind: arbostypes.L1MessageType_L2Message, diff --git a/system_tests/reorg_resequencing_test.go b/system_tests/reorg_resequencing_test.go index 6d5ecd5e6a..28d1b3bd66 100644 --- a/system_tests/reorg_resequencing_test.go +++ b/system_tests/reorg_resequencing_test.go @@ -73,7 +73,7 @@ func TestReorgResequencing(t *testing.T) { L2msg: append(builder.L2Info.GetAddress("User4").Bytes(), arbmath.Uint64ToU256Bytes(params.Ether)...), } err = builder.L2.ConsensusNode.TxStreamer.AddMessages(startMsgCount, true, []arbostypes.MessageWithMetadataAndBlockHash{{ - Message: arbostypes.MessageWithMetadata{ + MessageWithMeta: arbostypes.MessageWithMetadata{ Message: newMessage, DelayedMessagesRead: prevMessage.DelayedMessagesRead + 1, }, diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 5e539a8812..36c7be7251 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -92,7 +92,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { } Require(t, err) emptyMessageWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ - Message: emptyMessage, + MessageWithMeta: emptyMessage, } Require(t, node.TxStreamer.AddMessages(curMsgs, false, []arbostypes.MessageWithMetadataAndBlockHash{emptyMessageWithBlockHash})) return true From 44e44c7a10a642fe66eac3159e0712038e57f45f Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 2 May 2024 17:59:42 -0300 Subject: [PATCH 23/92] write block hash alongsing with message with metadata --- arbnode/schema.go | 1 + arbnode/transaction_streamer.go | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/arbnode/schema.go b/arbnode/schema.go index ddc7cf54fd..2854b7e785 100644 --- a/arbnode/schema.go +++ b/arbnode/schema.go @@ -5,6 +5,7 @@ package arbnode var ( messagePrefix []byte = []byte("m") // maps a message sequence number to a message + blockHashInputFeedPrefix []byte = []byte("b") // maps a message sequence number to a block hash received through the input feed legacyDelayedMessagePrefix []byte = []byte("d") // maps a delayed sequence number to an accumulator and a message as serialized on L1 rlpDelayedMessagePrefix []byte = []byte("e") // maps a delayed sequence number to an accumulator and an RLP encoded message parentChainBlockNumberPrefix []byte = []byte("p") // maps a delayed sequence number to a parent chain block number diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 708dcff41b..5debe0c41f 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -775,8 +775,8 @@ func (s *TransactionStreamer) countDuplicateMessages( if err != nil { return 0, false, nil, err } - nextMessage := messages[curMsg].MessageWithMeta - wantMessage, err := rlp.EncodeToBytes(nextMessage) + nextMessage := messages[curMsg] + wantMessage, err := rlp.EncodeToBytes(nextMessage.MessageWithMeta) if err != nil { return 0, false, nil, err } @@ -792,12 +792,12 @@ func (s *TransactionStreamer) countDuplicateMessages( return curMsg, true, nil, nil } var duplicateMessage bool - if nextMessage.Message != nil { - if dbMessageParsed.Message.BatchGasCost == nil || nextMessage.Message.BatchGasCost == nil { + if nextMessage.MessageWithMeta.Message != nil { + if dbMessageParsed.Message.BatchGasCost == nil || nextMessage.MessageWithMeta.Message.BatchGasCost == nil { // Remove both of the batch gas costs and see if the messages still differ - nextMessageCopy := nextMessage + nextMessageCopy := nextMessage.MessageWithMeta nextMessageCopy.Message = new(arbostypes.L1IncomingMessage) - *nextMessageCopy.Message = *nextMessage.Message + *nextMessageCopy.Message = *nextMessage.MessageWithMeta.Message batchGasCostBkup := dbMessageParsed.Message.BatchGasCost dbMessageParsed.Message.BatchGasCost = nil nextMessageCopy.Message.BatchGasCost = nil @@ -805,7 +805,7 @@ func (s *TransactionStreamer) countDuplicateMessages( // Actually this isn't a reorg; only the batch gas costs differed duplicateMessage = true // If possible - update the message in the database to add the gas cost cache. - if batch != nil && nextMessage.Message.BatchGasCost != nil { + if batch != nil && nextMessage.MessageWithMeta.Message.BatchGasCost != nil { if *batch == nil { *batch = s.db.NewBatch() } @@ -1043,9 +1043,20 @@ func (s *TransactionStreamer) PopulateFeedBacklog() error { return s.inboxReader.tracker.PopulateFeedBacklog(s.broadcastServer) } -func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbostypes.MessageWithMetadata, batch ethdb.Batch) error { +func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbostypes.MessageWithMetadataAndBlockHash, batch ethdb.Batch) error { + // write message with metadata key := dbKey(messagePrefix, uint64(pos)) - msgBytes, err := rlp.EncodeToBytes(msg) + msgBytes, err := rlp.EncodeToBytes(msg.MessageWithMeta) + if err != nil { + return err + } + if err := batch.Put(key, msgBytes); err != nil { + return err + } + + // write block hash + key = dbKey(blockHashInputFeedPrefix, uint64(pos)) + msgBytes, err = rlp.EncodeToBytes(msg.BlockHash) if err != nil { return err } @@ -1071,7 +1082,7 @@ func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages [ batch = s.db.NewBatch() } for i, msg := range messages { - err := s.writeMessage(pos+arbutil.MessageIndex(i), msg.MessageWithMeta, batch) + err := s.writeMessage(pos+arbutil.MessageIndex(i), msg, batch) if err != nil { return err } From da0b605ad80d65ca5ae00c438a412ccf4af8a968 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 2 May 2024 18:58:59 -0300 Subject: [PATCH 24/92] prune block hashes in db --- arbnode/message_pruner.go | 25 +++++++++++++++++-------- arbnode/message_pruner_test.go | 6 ++++-- arbnode/transaction_streamer.go | 4 ++++ 3 files changed, 25 insertions(+), 10 deletions(-) diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index 31bf1a63ff..c31dbc496d 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -23,13 +23,14 @@ import ( type MessagePruner struct { stopwaiter.StopWaiter - transactionStreamer *TransactionStreamer - inboxTracker *InboxTracker - config MessagePrunerConfigFetcher - pruningLock sync.Mutex - lastPruneDone time.Time - cachedPrunedMessages uint64 - cachedPrunedDelayedMessages uint64 + transactionStreamer *TransactionStreamer + inboxTracker *InboxTracker + config MessagePrunerConfigFetcher + pruningLock sync.Mutex + lastPruneDone time.Time + cachedPrunedMessages uint64 + cachedPrunedBlockHashesInputFeed uint64 + cachedPrunedDelayedMessages uint64 } type MessagePrunerConfig struct { @@ -115,7 +116,15 @@ func (m *MessagePruner) prune(ctx context.Context, count arbutil.MessageIndex, g } func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCount arbutil.MessageIndex, delayedMessageCount uint64) error { - prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, &m.cachedPrunedMessages, uint64(messageCount)) + prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, &m.cachedPrunedBlockHashesInputFeed, uint64(messageCount)) + if err != nil { + return fmt.Errorf("error deleting last batch messages' block hashes: %w", err) + } + if len(prunedKeysRange) > 0 { + log.Info("Pruned last batch messages' block hashes:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) + } + + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, &m.cachedPrunedMessages, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting last batch messages: %w", err) } diff --git a/arbnode/message_pruner_test.go b/arbnode/message_pruner_test.go index 0212ed2364..ed85c0ebce 100644 --- a/arbnode/message_pruner_test.go +++ b/arbnode/message_pruner_test.go @@ -22,8 +22,8 @@ func TestMessagePrunerWithPruningEligibleMessagePresent(t *testing.T) { Require(t, err) checkDbKeys(t, messagesCount, transactionStreamerDb, messagePrefix) + checkDbKeys(t, messagesCount, transactionStreamerDb, blockHashInputFeedPrefix) checkDbKeys(t, messagesCount, inboxTrackerDb, rlpDelayedMessagePrefix) - } func TestMessagePrunerTwoHalves(t *testing.T) { @@ -71,16 +71,18 @@ func TestMessagePrunerWithNoPruningEligibleMessagePresent(t *testing.T) { Require(t, err) checkDbKeys(t, uint64(messagesCount), transactionStreamerDb, messagePrefix) + checkDbKeys(t, uint64(messagesCount), transactionStreamerDb, blockHashInputFeedPrefix) checkDbKeys(t, messagesCount, inboxTrackerDb, rlpDelayedMessagePrefix) } func setupDatabase(t *testing.T, messageCount, delayedMessageCount uint64) (ethdb.Database, ethdb.Database, *MessagePruner) { - transactionStreamerDb := rawdb.NewMemoryDatabase() for i := uint64(0); i < uint64(messageCount); i++ { err := transactionStreamerDb.Put(dbKey(messagePrefix, i), []byte{}) Require(t, err) + err = transactionStreamerDb.Put(dbKey(blockHashInputFeedPrefix, i), []byte{}) + Require(t, err) } inboxTrackerDb := rawdb.NewMemoryDatabase() diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 5debe0c41f..0d02dc27dc 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -481,6 +481,10 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde } } + err = deleteStartingAt(s.db, batch, blockHashInputFeedPrefix, uint64ToKey(uint64(count))) + if err != nil { + return err + } err = deleteStartingAt(s.db, batch, messagePrefix, uint64ToKey(uint64(count))) if err != nil { return err From 7ad286d12eeeae3eb8a723c6ce2f5d9f3651fef9 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Fri, 3 May 2024 12:37:42 -0300 Subject: [PATCH 25/92] retrieve block hash and compare it when executing next message --- arbnode/transaction_streamer.go | 46 ++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 0d02dc27dc..9da0b3ea17 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -530,6 +530,30 @@ func (s *TransactionStreamer) GetMessage(seqNum arbutil.MessageIndex) (*arbostyp return &message, nil } +func (s *TransactionStreamer) getMessageWithMetadataAndBlockHash(seqNum arbutil.MessageIndex) (*arbostypes.MessageWithMetadataAndBlockHash, error) { + msg, err := s.GetMessage(seqNum) + if err != nil { + return nil, err + } + + key := dbKey(blockHashInputFeedPrefix, uint64(seqNum)) + data, err := s.db.Get(key) + if err != nil { + return nil, err + } + var blockHash *common.Hash + err = rlp.DecodeBytes(data, &blockHash) + if err != nil { + return nil, err + } + + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ + MessageWithMeta: *msg, + BlockHash: blockHash, + } + return &msgWithBlockHash, nil +} + // Note: if changed to acquire the mutex, some internal users may need to be updated to a non-locking version. func (s *TransactionStreamer) GetMessageCount() (arbutil.MessageIndex, error) { posBytes, err := s.db.Get(messageCountKey) @@ -1117,6 +1141,20 @@ func (s *TransactionStreamer) ResultAtCount(count arbutil.MessageIndex) (*execut return s.exec.ResultAtPos(count - 1) } +func (s *TransactionStreamer) checkResult(msgResult *execution.MessageResult, expectedBlockHash *common.Hash) { + if expectedBlockHash == nil { + return + } + if msgResult.BlockHash != *expectedBlockHash { + log.Error( + "block_hash_mismatch", + "expected", expectedBlockHash, + "actual", msgResult.BlockHash, + ) + return + } +} + // exposed for testing // return value: true if should be called again immediately func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution.ExecutionSequencer) bool { @@ -1143,7 +1181,7 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution if pos >= msgCount { return false } - msg, err := s.GetMessage(pos) + msgAndBlockHash, err := s.getMessageWithMetadataAndBlockHash(pos) if err != nil { log.Error("feedOneMsg failed to readMessage", "err", err, "pos", pos) return false @@ -1157,7 +1195,7 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution } msgForPrefetch = msg } - msgResult, err := s.exec.DigestMessage(pos, msg, msgForPrefetch) + msgResult, err := s.exec.DigestMessage(pos, &msgAndBlockHash.MessageWithMeta, msgForPrefetch) if err != nil { logger := log.Warn if prevMessageCount < msgCount { @@ -1167,8 +1205,10 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution return false } + s.checkResult(msgResult, msgAndBlockHash.BlockHash) + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ - MessageWithMeta: *msg, + MessageWithMeta: msgAndBlockHash.MessageWithMeta, BlockHash: &msgResult.BlockHash, } s.broadcastMessages([]arbostypes.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) From 95690fbe1f7ce122ca94e474b78eec1b7c15a802 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Fri, 3 May 2024 15:35:53 -0300 Subject: [PATCH 26/92] use MessageWithMetadata instead of MessageWithMetadataAndBlockHash in places where BlockHash will definitely be nil --- arbnode/inbox_test.go | 20 +++++++-------- arbnode/inbox_tracker.go | 9 +++---- arbnode/seq_coordinator.go | 7 ++--- arbnode/transaction_streamer.go | 34 ++++++++++++++----------- system_tests/contract_tx_test.go | 26 +++++++++---------- system_tests/reorg_resequencing_test.go | 8 +++--- system_tests/seq_coordinator_test.go | 5 +--- 7 files changed, 49 insertions(+), 60 deletions(-) diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go index a5d1554cb1..5c879743a4 100644 --- a/arbnode/inbox_test.go +++ b/arbnode/inbox_test.go @@ -128,7 +128,7 @@ func TestTransactionStreamer(t *testing.T) { } state.balances = newBalances - var messages []arbostypes.MessageWithMetadataAndBlockHash + var messages []arbostypes.MessageWithMetadata // TODO replay a random amount of messages too numMessages := rand.Int() % 5 for j := 0; j < numMessages; j++ { @@ -154,18 +154,16 @@ func TestTransactionStreamer(t *testing.T) { l2Message = append(l2Message, arbmath.U256Bytes(value)...) var requestId common.Hash binary.BigEndian.PutUint64(requestId.Bytes()[:8], uint64(i)) - messages = append(messages, arbostypes.MessageWithMetadataAndBlockHash{ - MessageWithMeta: arbostypes.MessageWithMetadata{ - Message: &arbostypes.L1IncomingMessage{ - Header: &arbostypes.L1IncomingMessageHeader{ - Kind: arbostypes.L1MessageType_L2Message, - Poster: source, - RequestId: &requestId, - }, - L2msg: l2Message, + messages = append(messages, arbostypes.MessageWithMetadata{ + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_L2Message, + Poster: source, + RequestId: &requestId, }, - DelayedMessagesRead: 1, + L2msg: l2Message, }, + DelayedMessagesRead: 1, }) state.balances[source].Sub(state.balances[source], value) if state.balances[dest] == nil { diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 2340df8303..ba1b875ec8 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -652,7 +652,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L pos++ } - var messages []arbostypes.MessageWithMetadataAndBlockHash + var messages []arbostypes.MessageWithMetadata backend := &multiplexerBackend{ batchSeqNum: batches[0].SequenceNumber, batches: batches, @@ -673,10 +673,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L if err != nil { return err } - msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ - MessageWithMeta: *msg, - } - messages = append(messages, msgWithBlockHash) + messages = append(messages, *msg) batchMessageCounts[batchSeqNum] = currentpos currentpos += 1 } @@ -736,7 +733,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L } var latestTimestamp uint64 if len(messages) > 0 { - latestTimestamp = messages[len(messages)-1].MessageWithMeta.Message.Header.Timestamp + latestTimestamp = messages[len(messages)-1].Message.Header.Timestamp } log.Info( "InboxTracker", diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index 2fb8c3244b..ecf38ddf42 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -533,7 +533,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { if readUntil > localMsgCount+c.config.MsgPerPoll { readUntil = localMsgCount + c.config.MsgPerPoll } - var messages []arbostypes.MessageWithMetadataAndBlockHash + var messages []arbostypes.MessageWithMetadata msgToRead := localMsgCount var msgReadErr error for msgToRead < readUntil { @@ -592,10 +592,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { DelayedMessagesRead: lastDelayedMsg, } } - msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ - MessageWithMeta: message, - } - messages = append(messages, msgWithBlockHash) + messages = append(messages, message) msgToRead++ } if len(messages) > 0 { diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 9da0b3ea17..7ff565ec16 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -583,7 +583,7 @@ func (s *TransactionStreamer) GetProcessedMessageCount() (arbutil.MessageIndex, return msgCount, nil } -func (s *TransactionStreamer) AddMessages(pos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadataAndBlockHash) error { +func (s *TransactionStreamer) AddMessages(pos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadata) error { return s.AddMessagesAndEndBatch(pos, messagesAreConfirmed, messages, nil) } @@ -713,19 +713,16 @@ func (s *TransactionStreamer) AddFakeInitMessage() error { } chainIdBytes := arbmath.U256Bytes(s.chainConfig.ChainID) msg := append(append(chainIdBytes, 0), chainConfigJson...) - return s.AddMessages(0, false, []arbostypes.MessageWithMetadataAndBlockHash{{ - MessageWithMeta: arbostypes.MessageWithMetadata{ - Message: &arbostypes.L1IncomingMessage{ - Header: &arbostypes.L1IncomingMessageHeader{ - Kind: arbostypes.L1MessageType_Initialize, - RequestId: &common.Hash{}, - L1BaseFee: common.Big0, - }, - L2msg: msg, + return s.AddMessages(0, false, []arbostypes.MessageWithMetadata{{ + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_Initialize, + RequestId: &common.Hash{}, + L1BaseFee: common.Big0, }, - DelayedMessagesRead: 1, + L2msg: msg, }, - BlockHash: nil, + DelayedMessagesRead: 1, }}) } @@ -743,12 +740,19 @@ func endBatch(batch ethdb.Batch) error { return batch.Write() } -func (s *TransactionStreamer) AddMessagesAndEndBatch(pos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadataAndBlockHash, batch ethdb.Batch) error { +func (s *TransactionStreamer) AddMessagesAndEndBatch(pos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { + messagesWithBlockHash := make([]arbostypes.MessageWithMetadataAndBlockHash, 0, len(messages)) + for _, message := range messages { + messagesWithBlockHash = append(messagesWithBlockHash, arbostypes.MessageWithMetadataAndBlockHash{ + MessageWithMeta: message, + }) + } + if messagesAreConfirmed { // Trim confirmed messages from l1pricedataCache s.TrimCache(pos + arbutil.MessageIndex(len(messages))) s.reorgMutex.RLock() - dups, _, _, err := s.countDuplicateMessages(pos, messages, nil) + dups, _, _, err := s.countDuplicateMessages(pos, messagesWithBlockHash, nil) s.reorgMutex.RUnlock() if err != nil { return err @@ -765,7 +769,7 @@ func (s *TransactionStreamer) AddMessagesAndEndBatch(pos arbutil.MessageIndex, m s.insertionMutex.Lock() defer s.insertionMutex.Unlock() - return s.addMessagesAndEndBatchImpl(pos, messagesAreConfirmed, messages, batch) + return s.addMessagesAndEndBatchImpl(pos, messagesAreConfirmed, messagesWithBlockHash, batch) } func (s *TransactionStreamer) getPrevPrevDelayedRead(pos arbutil.MessageIndex) (uint64, error) { diff --git a/system_tests/contract_tx_test.go b/system_tests/contract_tx_test.go index c4ae326df1..7d66e516b4 100644 --- a/system_tests/contract_tx_test.go +++ b/system_tests/contract_tx_test.go @@ -69,23 +69,21 @@ func TestContractTxDeploy(t *testing.T) { l2Msg = append(l2Msg, arbmath.U256Bytes(contractTx.Value)...) l2Msg = append(l2Msg, contractTx.Data...) - err = builder.L2.ConsensusNode.TxStreamer.AddMessages(pos, true, []arbostypes.MessageWithMetadataAndBlockHash{ + err = builder.L2.ConsensusNode.TxStreamer.AddMessages(pos, true, []arbostypes.MessageWithMetadata{ { - MessageWithMeta: arbostypes.MessageWithMetadata{ - Message: &arbostypes.L1IncomingMessage{ - Header: &arbostypes.L1IncomingMessageHeader{ - Kind: arbostypes.L1MessageType_L2Message, - Poster: from, - BlockNumber: 0, - Timestamp: 0, - RequestId: &contractTx.RequestId, - L1BaseFee: &big.Int{}, - }, - L2msg: l2Msg, - BatchGasCost: new(uint64), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_L2Message, + Poster: from, + BlockNumber: 0, + Timestamp: 0, + RequestId: &contractTx.RequestId, + L1BaseFee: &big.Int{}, }, - DelayedMessagesRead: delayedMessagesRead, + L2msg: l2Msg, + BatchGasCost: new(uint64), }, + DelayedMessagesRead: delayedMessagesRead, }, }) Require(t, err) diff --git a/system_tests/reorg_resequencing_test.go b/system_tests/reorg_resequencing_test.go index 28d1b3bd66..b188504acb 100644 --- a/system_tests/reorg_resequencing_test.go +++ b/system_tests/reorg_resequencing_test.go @@ -72,11 +72,9 @@ func TestReorgResequencing(t *testing.T) { }, L2msg: append(builder.L2Info.GetAddress("User4").Bytes(), arbmath.Uint64ToU256Bytes(params.Ether)...), } - err = builder.L2.ConsensusNode.TxStreamer.AddMessages(startMsgCount, true, []arbostypes.MessageWithMetadataAndBlockHash{{ - MessageWithMeta: arbostypes.MessageWithMetadata{ - Message: newMessage, - DelayedMessagesRead: prevMessage.DelayedMessagesRead + 1, - }, + err = builder.L2.ConsensusNode.TxStreamer.AddMessages(startMsgCount, true, []arbostypes.MessageWithMetadata{{ + Message: newMessage, + DelayedMessagesRead: prevMessage.DelayedMessagesRead + 1, }}) Require(t, err) diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 36c7be7251..886a0528c7 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -91,10 +91,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { return false } Require(t, err) - emptyMessageWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ - MessageWithMeta: emptyMessage, - } - Require(t, node.TxStreamer.AddMessages(curMsgs, false, []arbostypes.MessageWithMetadataAndBlockHash{emptyMessageWithBlockHash})) + Require(t, node.TxStreamer.AddMessages(curMsgs, false, []arbostypes.MessageWithMetadata{emptyMessage})) return true } From 8671d06f3d69a4a27777ec0370c500601381adac Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 8 May 2024 12:04:22 -0300 Subject: [PATCH 27/92] fix storing and retrieving block hash --- arbnode/transaction_streamer.go | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 7ff565ec16..0973ca942e 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -536,15 +536,24 @@ func (s *TransactionStreamer) getMessageWithMetadataAndBlockHash(seqNum arbutil. return nil, err } + // get block hash key := dbKey(blockHashInputFeedPrefix, uint64(seqNum)) - data, err := s.db.Get(key) + hasBlockHash, err := s.db.Has(key) if err != nil { return nil, err } var blockHash *common.Hash - err = rlp.DecodeBytes(data, &blockHash) - if err != nil { - return nil, err + if hasBlockHash { + data, err := s.db.Get(key) + if err != nil { + return nil, err + } + var storedBlockHash common.Hash + err = rlp.DecodeBytes(data, &storedBlockHash) + if err != nil { + return nil, err + } + blockHash = &storedBlockHash } msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ @@ -1087,8 +1096,13 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty } // write block hash + if msg.BlockHash == nil { + // don't write nil block hash to avoid issues with rlp decoder that + // doesn't produce nil values by default + return nil + } key = dbKey(blockHashInputFeedPrefix, uint64(pos)) - msgBytes, err = rlp.EncodeToBytes(msg.BlockHash) + msgBytes, err = rlp.EncodeToBytes(*msg.BlockHash) if err != nil { return err } From 0fc92af157ceb77d86c0f646d36dd06be3f3105d Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 8 May 2024 12:04:44 -0300 Subject: [PATCH 28/92] check block_hash_mismatch in sequencer system tests --- system_tests/seqfeed_test.go | 85 +++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index 749a91e3b1..f9cca03616 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -11,10 +11,16 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/broadcastclient" + "github.com/offchainlabs/nitro/broadcaster/backlog" + "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/relay" "github.com/offchainlabs/nitro/util/signature" + "github.com/offchainlabs/nitro/util/testhelpers" "github.com/offchainlabs/nitro/wsbroadcastserver" ) @@ -38,7 +44,8 @@ func newBroadcastClientConfigTest(port int) *broadcastclient.Config { } func TestSequencerFeed(t *testing.T) { - t.Parallel() + logHandler := testhelpers.InitTestLog(t, log.LvlTrace) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -73,6 +80,10 @@ func TestSequencerFeed(t *testing.T) { if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) } + + if logHandler.WasLogged("block_hash_mismatch") { + t.Fatal("block_hash_mismatch was logged unexpectedly") + } } func TestRelayedSequencerFeed(t *testing.T) { @@ -250,3 +261,75 @@ func TestLyingSequencer(t *testing.T) { func TestLyingSequencerLocalDAS(t *testing.T) { testLyingSequencer(t, "files") } + +func TestBlockHashFeedMismatch(t *testing.T) { + logHandler := testhelpers.InitTestLog(t, log.LvlTrace) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + backlogConfiFetcher := func() *backlog.Config { + return &backlog.DefaultTestConfig + } + bklg := backlog.NewBacklog(backlogConfiFetcher) + + feedErrChan := make(chan error) + wsBroadcastServer := wsbroadcastserver.NewWSBroadcastServer( + newBroadcasterConfigTest, + bklg, + 412346, + feedErrChan, + ) + err := wsBroadcastServer.Initialize() + if err != nil { + t.Fatal("error initializing wsBroadcastServer:", err) + } + err = wsBroadcastServer.Start(ctx) + if err != nil { + t.Fatal("error starting wsBroadcastServer:", err) + } + defer wsBroadcastServer.StopAndWait() + + port := wsBroadcastServer.ListenerAddr().(*net.TCPAddr).Port + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) + cleanup := builder.Build(t) + defer cleanup() + + poster := common.HexToAddress("0xa4b000000000000000000073657175656e636572") + blockHash := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + l2msg := []byte{4, 2, 248, 111, 131, 6, 74, 186, 128, 128, 132, 11, 235, 194, 0, 131, 122, 18, 0, 148, 12, 112, 159, 52, 15, 11, 178, 227, 97, 34, 158, 52, 91, 126, 38, 153, 157, 9, 105, 171, 133, 232, 212, 165, 16, 0, 128, 192, 1, 160, 75, 109, 200, 183, 223, 114, 85, 128, 133, 94, 26, 103, 145, 247, 47, 0, 114, 132, 133, 234, 222, 235, 102, 45, 2, 109, 83, 65, 210, 142, 242, 209, 160, 96, 90, 108, 188, 197, 195, 43, 222, 103, 155, 153, 81, 119, 74, 177, 103, 110, 134, 94, 221, 72, 236, 20, 86, 94, 226, 94, 5, 206, 196, 122, 119} + broadcastMessage := message.BroadcastMessage{ + Version: 1, + Messages: []*message.BroadcastFeedMessage{ + { + // SequenceNumber: 1, + SequenceNumber: 2, + Message: arbostypes.MessageWithMetadata{ + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_L2Message, + Poster: poster, + BlockNumber: 29, + Timestamp: 1715136502, + RequestId: nil, + L1BaseFee: big.NewInt(0), + }, + L2msg: l2msg, + }, + DelayedMessagesRead: 1, + }, + BlockHash: &blockHash, + Signature: nil, + }, + }, + } + wsBroadcastServer.Broadcast(&broadcastMessage) + + time.Sleep(time.Second * 2) + + if !logHandler.WasLogged("block_hash_mismatch") { + t.Fatal("Failed to log block_hash_mismatch") + } +} From 202efe1c8b7b8dc19229950d4317e8ca8ba4a1f0 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 8 May 2024 14:30:58 -0300 Subject: [PATCH 29/92] stores block hash even it is nil, which enables to iterate through range of keys to properly prune a range of block hashes from the db --- arbnode/transaction_streamer.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 0973ca942e..9e6ae7d92e 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -140,6 +140,12 @@ type L1PriceData struct { currentEstimateOfL1GasPrice uint64 } +// Represents a block's hash in the database. +// Necessary because RLP decoder doesn't produce nil values by default. +type blockHashDBValue struct { + BlockHash *common.Hash `rlp:"nil"` +} + func (s *TransactionStreamer) CurrentEstimateOfL1GasPrice() uint64 { s.cachedL1PriceDataMutex.Lock() defer s.cachedL1PriceDataMutex.Unlock() @@ -548,12 +554,12 @@ func (s *TransactionStreamer) getMessageWithMetadataAndBlockHash(seqNum arbutil. if err != nil { return nil, err } - var storedBlockHash common.Hash - err = rlp.DecodeBytes(data, &storedBlockHash) + var blockHashDBVal blockHashDBValue + err = rlp.DecodeBytes(data, &blockHashDBVal) if err != nil { return nil, err } - blockHash = &storedBlockHash + blockHash = blockHashDBVal.BlockHash } msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ @@ -1096,13 +1102,11 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty } // write block hash - if msg.BlockHash == nil { - // don't write nil block hash to avoid issues with rlp decoder that - // doesn't produce nil values by default - return nil + blockHashDBVal := blockHashDBValue{ + BlockHash: msg.BlockHash, } key = dbKey(blockHashInputFeedPrefix, uint64(pos)) - msgBytes, err = rlp.EncodeToBytes(*msg.BlockHash) + msgBytes, err = rlp.EncodeToBytes(blockHashDBVal) if err != nil { return err } From 2a56769f37d138185a7c92dfc9c0802128761037 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 8 May 2024 15:04:59 -0300 Subject: [PATCH 30/92] add comment on why verifying if block hash key exists in DB before retrieving it --- arbnode/transaction_streamer.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 9e6ae7d92e..dc375d642e 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -542,7 +542,9 @@ func (s *TransactionStreamer) getMessageWithMetadataAndBlockHash(seqNum arbutil. return nil, err } - // get block hash + // Get block hash. + // First check if key exists in database so this procedure is backwards compatible + // with databases' snapshots that don't have block hashes stored. key := dbKey(blockHashInputFeedPrefix, uint64(seqNum)) hasBlockHash, err := s.db.Has(key) if err != nil { From b2bb3da34c58597f0045bf077dfdaa7a00571f9e Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 8 May 2024 15:41:35 -0300 Subject: [PATCH 31/92] rm unwanted comment --- system_tests/seqfeed_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index f9cca03616..e56514a6d5 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -304,7 +304,6 @@ func TestBlockHashFeedMismatch(t *testing.T) { Version: 1, Messages: []*message.BroadcastFeedMessage{ { - // SequenceNumber: 1, SequenceNumber: 2, Message: arbostypes.MessageWithMetadata{ Message: &arbostypes.L1IncomingMessage{ From 881e2fd34ec60bd2c922d9871c1080c26c7de3f1 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 9 May 2024 16:23:56 -0300 Subject: [PATCH 32/92] Test that the output feed of a node that isn't the sequencer is properly processed --- system_tests/seq_coordinator_test.go | 31 ++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 886a0528c7..a069a2d5a1 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -8,12 +8,14 @@ import ( "errors" "fmt" "math/big" + "net" "testing" "time" "github.com/go-redis/redis/v8" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -21,6 +23,7 @@ import ( "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/redisutil" + "github.com/offchainlabs/nitro/util/testhelpers" ) func initRedisForTest(t *testing.T, ctx context.Context, redisUrl string, nodeNames []string) { @@ -270,6 +273,8 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { } func testCoordinatorMessageSync(t *testing.T, successCase bool) { + logHandler := testhelpers.InitTestLog(t, log.LvlTrace) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -304,16 +309,25 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { nodeConfigDup := *builder.nodeConfig builder.nodeConfig = &nodeConfigDup - + builder.nodeConfig.Feed.Output = *newBroadcasterConfigTest() builder.nodeConfig.SeqCoordinator.MyUrl = nodeNames[1] if !successCase { builder.nodeConfig.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false builder.nodeConfig.SeqCoordinator.Signer.ECDSA.AllowedAddresses = []string{builder.L2Info.GetAddress("User2").Hex()} } - testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: builder.nodeConfig}) defer cleanupB() + // Build nodeBOutputFeedReader. + // nodeB doesn't sequence transactions, but adds messages related to them to its output feed. + // nodeBOutputFeedReader reads those messages from this feed and processes them. + // nodeBOutputFeedReader doesn't read messages from L1 since none of the nodes posts to L1. + nodeBPort := testClientB.ConsensusNode.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port + nodeConfigNodeBOutputFeedReader := arbnode.ConfigDefaultL1NonSequencerTest() + nodeConfigNodeBOutputFeedReader.Feed.Input = *newBroadcastClientConfigTest(nodeBPort) + testClientNodeBOutputFeedReader, cleanupNodeBOutputFeedReader := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfigNodeBOutputFeedReader}) + defer cleanupNodeBOutputFeedReader() + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) err = builder.L2.Client.SendTransaction(ctx, tx) @@ -330,6 +344,19 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) } + + // check that nodeBOutputFeedReader also processed the transaction + _, err = WaitForTx(ctx, testClientNodeBOutputFeedReader.Client, tx.Hash(), time.Second*5) + Require(t, err) + l2balance, err = testClientNodeBOutputFeedReader.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) + Require(t, err) + if l2balance.Cmp(big.NewInt(1e12)) != 0 { + t.Fatal("Unexpected balance:", l2balance) + } + + if logHandler.WasLogged("block_hash_mismatch") { + t.Fatal("block_hash_mismatch was logged unexpectedly") + } } else { _, err = WaitForTx(ctx, testClientB.Client, tx.Hash(), time.Second) if err == nil { From 0df0c201ce107e666e15f744f6c3d604182c3fb8 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 9 May 2024 19:16:48 -0300 Subject: [PATCH 33/92] improve get block hash from db --- arbnode/transaction_streamer.go | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index dc375d642e..d9c7fc2163 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -520,6 +520,10 @@ func dbKey(prefix []byte, pos uint64) []byte { return key } +func isErrNotFound(err error) bool { + return errors.Is(err, leveldb.ErrNotFound) || errors.Is(err, pebble.ErrNotFound) +} + // Note: if changed to acquire the mutex, some internal users may need to be updated to a non-locking version. func (s *TransactionStreamer) GetMessage(seqNum arbutil.MessageIndex) (*arbostypes.MessageWithMetadata, error) { key := dbKey(messagePrefix, uint64(seqNum)) @@ -543,25 +547,20 @@ func (s *TransactionStreamer) getMessageWithMetadataAndBlockHash(seqNum arbutil. } // Get block hash. - // First check if key exists in database so this procedure is backwards compatible - // with databases' snapshots that don't have block hashes stored. + // To keep it backwards compatible it is possible that a message related + // to a sequence number exists in the database but the block hash doesn't. key := dbKey(blockHashInputFeedPrefix, uint64(seqNum)) - hasBlockHash, err := s.db.Has(key) - if err != nil { - return nil, err - } var blockHash *common.Hash - if hasBlockHash { - data, err := s.db.Get(key) - if err != nil { - return nil, err - } + data, err := s.db.Get(key) + if err == nil { var blockHashDBVal blockHashDBValue err = rlp.DecodeBytes(data, &blockHashDBVal) if err != nil { return nil, err } blockHash = blockHashDBVal.BlockHash + } else if !isErrNotFound(err) { + return nil, err } msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ @@ -706,7 +705,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe if broadcastStartPos > 0 { _, err := s.GetMessage(broadcastStartPos - 1) if err != nil { - if !errors.Is(err, leveldb.ErrNotFound) && !errors.Is(err, pebble.ErrNotFound) { + if !isErrNotFound(err) { return err } // Message before current message doesn't exist in database, so don't add current messages yet From 61c7d376bcbf88bd8694d8383a4281cacecc5400 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 9 May 2024 20:24:10 -0300 Subject: [PATCH 34/92] improve TestBlockHashFeedMismatch --- system_tests/seqfeed_test.go | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index e56514a6d5..2aa64a801d 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -273,12 +273,11 @@ func TestBlockHashFeedMismatch(t *testing.T) { } bklg := backlog.NewBacklog(backlogConfiFetcher) - feedErrChan := make(chan error) wsBroadcastServer := wsbroadcastserver.NewWSBroadcastServer( newBroadcasterConfigTest, bklg, 412346, - feedErrChan, + nil, ) err := wsBroadcastServer.Initialize() if err != nil { @@ -292,11 +291,16 @@ func TestBlockHashFeedMismatch(t *testing.T) { port := wsBroadcastServer.ListenerAddr().(*net.TCPAddr).Port - builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) builder.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) cleanup := builder.Build(t) defer cleanup() + testClient := builder.L2 + // related to: + // - builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + userAccount := "User2" + txHash := common.HexToHash("0x633f62b463cc0e52d842406995fb590654db40aace77bfca863ba0e8d2290f97") poster := common.HexToAddress("0xa4b000000000000000000073657175656e636572") blockHash := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") l2msg := []byte{4, 2, 248, 111, 131, 6, 74, 186, 128, 128, 132, 11, 235, 194, 0, 131, 122, 18, 0, 148, 12, 112, 159, 52, 15, 11, 178, 227, 97, 34, 158, 52, 91, 126, 38, 153, 157, 9, 105, 171, 133, 232, 212, 165, 16, 0, 128, 192, 1, 160, 75, 109, 200, 183, 223, 114, 85, 128, 133, 94, 26, 103, 145, 247, 47, 0, 114, 132, 133, 234, 222, 235, 102, 45, 2, 109, 83, 65, 210, 142, 242, 209, 160, 96, 90, 108, 188, 197, 195, 43, 222, 103, 155, 153, 81, 119, 74, 177, 103, 110, 134, 94, 221, 72, 236, 20, 86, 94, 226, 94, 5, 206, 196, 122, 119} @@ -304,16 +308,16 @@ func TestBlockHashFeedMismatch(t *testing.T) { Version: 1, Messages: []*message.BroadcastFeedMessage{ { - SequenceNumber: 2, + SequenceNumber: 1, Message: arbostypes.MessageWithMetadata{ Message: &arbostypes.L1IncomingMessage{ Header: &arbostypes.L1IncomingMessageHeader{ Kind: arbostypes.L1MessageType_L2Message, Poster: poster, BlockNumber: 29, - Timestamp: 1715136502, + Timestamp: 1715295980, RequestId: nil, - L1BaseFee: big.NewInt(0), + L1BaseFee: nil, }, L2msg: l2msg, }, @@ -326,8 +330,21 @@ func TestBlockHashFeedMismatch(t *testing.T) { } wsBroadcastServer.Broadcast(&broadcastMessage) - time.Sleep(time.Second * 2) + // By now, even though block hash mismatch, the transaction should still be processed + builder.L2Info.GenerateAccount(userAccount) + _, err = WaitForTx(ctx, testClient.Client, txHash, time.Second*15) + if err != nil { + t.Fatal("error waiting for tx:", err) + } + l2balance, err := testClient.Client.BalanceAt(ctx, builder.L2Info.GetAddress(userAccount), nil) + if err != nil { + t.Fatal("error getting balance:", err) + } + if l2balance.Cmp(big.NewInt(1e12)) != 0 { + t.Fatal("Unexpected balance:", l2balance) + } + // check that block hash mismatch if !logHandler.WasLogged("block_hash_mismatch") { t.Fatal("Failed to log block_hash_mismatch") } From 70818c5f559a0acfbc87f6d08920090df351dff9 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 9 May 2024 20:31:49 -0300 Subject: [PATCH 35/92] add TestBlockHashFeedNil --- system_tests/seqfeed_test.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index 2aa64a801d..946194f17d 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -262,7 +262,7 @@ func TestLyingSequencerLocalDAS(t *testing.T) { testLyingSequencer(t, "files") } -func TestBlockHashFeedMismatch(t *testing.T) { +func testBlockHashComparison(t *testing.T, blockHash *common.Hash, mustMismatch bool) { logHandler := testhelpers.InitTestLog(t, log.LvlTrace) ctx, cancel := context.WithCancel(context.Background()) @@ -302,7 +302,6 @@ func TestBlockHashFeedMismatch(t *testing.T) { userAccount := "User2" txHash := common.HexToHash("0x633f62b463cc0e52d842406995fb590654db40aace77bfca863ba0e8d2290f97") poster := common.HexToAddress("0xa4b000000000000000000073657175656e636572") - blockHash := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") l2msg := []byte{4, 2, 248, 111, 131, 6, 74, 186, 128, 128, 132, 11, 235, 194, 0, 131, 122, 18, 0, 148, 12, 112, 159, 52, 15, 11, 178, 227, 97, 34, 158, 52, 91, 126, 38, 153, 157, 9, 105, 171, 133, 232, 212, 165, 16, 0, 128, 192, 1, 160, 75, 109, 200, 183, 223, 114, 85, 128, 133, 94, 26, 103, 145, 247, 47, 0, 114, 132, 133, 234, 222, 235, 102, 45, 2, 109, 83, 65, 210, 142, 242, 209, 160, 96, 90, 108, 188, 197, 195, 43, 222, 103, 155, 153, 81, 119, 74, 177, 103, 110, 134, 94, 221, 72, 236, 20, 86, 94, 226, 94, 5, 206, 196, 122, 119} broadcastMessage := message.BroadcastMessage{ Version: 1, @@ -323,7 +322,7 @@ func TestBlockHashFeedMismatch(t *testing.T) { }, DelayedMessagesRead: 1, }, - BlockHash: &blockHash, + BlockHash: blockHash, Signature: nil, }, }, @@ -344,8 +343,19 @@ func TestBlockHashFeedMismatch(t *testing.T) { t.Fatal("Unexpected balance:", l2balance) } - // check that block hash mismatch - if !logHandler.WasLogged("block_hash_mismatch") { + mismatched := logHandler.WasLogged("block_hash_mismatch") + if mustMismatch && !mismatched { t.Fatal("Failed to log block_hash_mismatch") + } else if !mustMismatch && mismatched { + t.Fatal("block_hash_mismatch was logged unexpectedly") } } + +func TestBlockHashFeedMismatch(t *testing.T) { + blockHash := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + testBlockHashComparison(t, &blockHash, true) +} + +func TestBlockHashFeedNil(t *testing.T) { + testBlockHashComparison(t, nil, false) +} From 8326613d0a2365b52fe96d23b450df8a20237505 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 10:33:07 -0600 Subject: [PATCH 36/92] wrap l2chaindata with wasm --- cmd/nitro/init.go | 20 +++++++++++++++----- system_tests/common_test.go | 11 +++++++++-- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 750bf03516..e9d6a5d3fa 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -172,16 +172,21 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { if !config.Init.Force { - if readOnlyDb, err := stack.OpenDatabaseWithFreezerAndWasm("l2chaindata", "wasm", 0, 0, "", "l2chaindata/", true); err == nil { + if readOnlyDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", 0, 0, "", "l2chaindata/", true); err == nil { if chainConfig := gethexec.TryReadStoredChainConfig(readOnlyDb); chainConfig != nil { readOnlyDb.Close() if !arbmath.BigEquals(chainConfig.ChainID, chainId) { return nil, nil, fmt.Errorf("database has chain ID %v but config has chain ID %v (are you sure this database is for the right chain?)", chainConfig.ChainID, chainId) } - chainDb, err := stack.OpenDatabaseWithFreezerAndWasm("l2chaindata", "wasm", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) + chainData, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) if err != nil { - return chainDb, nil, err + return nil, nil, err + } + wasmDb, err := stack.OpenDatabase("wasm", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, "wasm/", false) + if err != nil { + return nil, nil, err } + chainDb := stack.WrapDatabaseWithWasm(chainData, wasmDb) err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) @@ -230,10 +235,15 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo var initDataReader statetransfer.InitDataReader = nil - chainDb, err := stack.OpenDatabaseWithFreezerAndWasm("l2chaindata", "wasm", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) + chainData, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) if err != nil { - return chainDb, nil, err + return nil, nil, err + } + wasmDb, err := stack.OpenDatabase("wasm", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, "wasm/", false) + if err != nil { + return nil, nil, err } + chainDb := stack.WrapDatabaseWithWasm(chainData, wasmDb) if config.Init.ImportFile != "" { initDataReader, err = statetransfer.NewJsonInitDataReader(config.Init.ImportFile) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 9d461bd48c..0ffe3990c3 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -772,8 +772,11 @@ func createL2BlockChainWithStackConfig( stack, err = node.New(stackConfig) Require(t, err) - chainDb, err := stack.OpenDatabaseWithFreezerAndWasm("l2chaindata", "wasm", 0, 0, "ancient", "l2chaindata/", false) + chainData, err := stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) Require(t, err) + wasmData, err := stack.OpenDatabase("wasm", 0, 0, "wasm/", false) + Require(t, err) + chainDb := stack.WrapDatabaseWithWasm(chainData, wasmData) arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) Require(t, err) @@ -976,8 +979,12 @@ func Create2ndNodeWithConfig( l2stack, err := node.New(stackConfig) Require(t, err) - l2chainDb, err := l2stack.OpenDatabaseWithFreezerAndWasm("l2chaindata", "wasm", 0, 0, "", "l2chaindata/", false) + l2chainData, err := l2stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + Require(t, err) + wasmData, err := l2stack.OpenDatabase("wasm", 0, 0, "wasm/", false) Require(t, err) + l2chainDb := l2stack.WrapDatabaseWithWasm(l2chainData, wasmData) + l2arbDb, err := l2stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) Require(t, err) initReader := statetransfer.NewMemoryInitDataReader(l2InitData) From cfd24dcdd83b00610d107fe77234c08247a58ba0 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 10:33:50 -0600 Subject: [PATCH 37/92] geth: update pin --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 37b6489382..45145108b1 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 37b6489382bb884dd1216dcb0f6a224ce2ca5fe2 +Subproject commit 45145108b11e39ea57ee5b0a1238440edd1fa3ab From 1e153dd9c0b5171f058984c696a1f9ce26f8f8ad Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 11:53:53 -0600 Subject: [PATCH 38/92] program: compilation fix --- arbos/programs/native.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 4a3b8992cf..1111dc7233 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -176,7 +176,7 @@ func callProgram( db := interpreter.Evm().StateDB debug := stylusParams.debugMode - if len(asm) == 0 { + if len(localAsm) == 0 { log.Error("missing asm", "program", address, "module", moduleHash) panic("missing asm") } From 566e72cfdd42c9111bf43ac0d9c424f41c27c087 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 15:23:50 -0600 Subject: [PATCH 39/92] cache program could also recalc asm --- arbos/programs/native.go | 12 ++++++++---- arbos/programs/programs.go | 10 +++++++--- arbos/programs/wasm.go | 2 +- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 1111dc7233..18f6e8e48d 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -221,11 +221,15 @@ func handleReqImpl(apiId usize, req_type u32, data *rustSlice, costPtr *u64, out // Caches a program in Rust. We write a record so that we can undo on revert. // For gas estimation and eth_call, we ignore permanent updates and rely on Rust's LRU. -func cacheProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, runMode core.MessageRunMode) { +func cacheProgram(db vm.StateDB, module common.Hash, program Program, params *StylusParams, debug bool, time uint64, runMode core.MessageRunMode) { if runMode == core.MessageCommitMode { - asm := db.GetActivatedAsm(module) - state.CacheWasmRust(asm, module, version, debug) - db.RecordCacheWasm(state.CacheWasm{ModuleHash: module, Version: version, Debug: debug}) + // address is only used for logging + asm, err := getLocalAsm(db, module, common.Address{}, params.PageLimit, time, debug, program) + if err != nil { + panic("unable to recreate wasm") + } + state.CacheWasmRust(asm, module, program.version, debug) + db.RecordCacheWasm(state.CacheWasm{ModuleHash: module, Version: program.version, Debug: debug}) } } diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index b277f5d678..8395980f68 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -120,14 +120,13 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, runMode c return 0, codeHash, common.Hash{}, nil, true, err } - // replace the cached asm + // remove prev asm if cached { oldModuleHash, err := p.moduleHashes.Get(codeHash) if err != nil { return 0, codeHash, common.Hash{}, nil, true, err } evictProgram(statedb, oldModuleHash, currentVersion, debugMode, runMode, expired) - cacheProgram(statedb, info.moduleHash, stylusVersion, debugMode, runMode) } if err := p.moduleHashes.Set(codeHash, info.moduleHash); err != nil { return 0, codeHash, common.Hash{}, nil, true, err @@ -152,6 +151,11 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, runMode c activatedAt: hoursSinceArbitrum(time), cached: cached, } + // replace the cached asm + if cached { + cacheProgram(statedb, info.moduleHash, programData, params, debugMode, time, runMode) + } + return stylusVersion, codeHash, info.moduleHash, dataFee, false, p.setProgram(codeHash, programData) } @@ -386,7 +390,7 @@ func (p Programs) SetProgramCached( return err } if cache { - cacheProgram(db, moduleHash, program.version, debug, runMode) + cacheProgram(db, moduleHash, program, params, debug, time, runMode) } else { evictProgram(db, moduleHash, program.version, debug, runMode, expired) } diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go index 8a67babc16..ee8da44934 100644 --- a/arbos/programs/wasm.go +++ b/arbos/programs/wasm.go @@ -95,7 +95,7 @@ func activateProgram( } // stub any non-consensus, Rust-side caching updates -func cacheProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, mode core.MessageRunMode) { +func cacheProgram(db vm.StateDB, module common.Hash, program Program, params *StylusParams, debug bool, time uint64, runMode core.MessageRunMode) { } func evictProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, mode core.MessageRunMode, forever bool) { } From 97db9c98fe5d72ba7064310c4ccfa2191cc082be Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 15:24:35 -0600 Subject: [PATCH 40/92] geth: update pin for TryGetActivated --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 5ac25eda5d..8d94d2b164 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 5ac25eda5df447c061e33d74a6a85b03f0a7d85d +Subproject commit 8d94d2b164ea3b277a1e87dcdc6f61c5beeaefd8 From de022e8655ada2eaa6ab2f50b12e728ade4ecdb3 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 16:20:02 -0600 Subject: [PATCH 41/92] dbWithWasm moved --- cmd/nitro/init.go | 4 ++-- system_tests/common_test.go | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index e9d6a5d3fa..c52c87732c 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -186,7 +186,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return nil, nil, err } - chainDb := stack.WrapDatabaseWithWasm(chainData, wasmDb) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb) err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) @@ -243,7 +243,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return nil, nil, err } - chainDb := stack.WrapDatabaseWithWasm(chainData, wasmDb) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb) if config.Init.ImportFile != "" { initDataReader, err = statetransfer.NewJsonInitDataReader(config.Init.ImportFile) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 3e5f6fd228..f6bfde2108 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -44,6 +44,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -776,7 +777,7 @@ func createL2BlockChainWithStackConfig( Require(t, err) wasmData, err := stack.OpenDatabase("wasm", 0, 0, "wasm/", false) Require(t, err) - chainDb := stack.WrapDatabaseWithWasm(chainData, wasmData) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData) arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) Require(t, err) @@ -983,7 +984,7 @@ func Create2ndNodeWithConfig( Require(t, err) wasmData, err := l2stack.OpenDatabase("wasm", 0, 0, "wasm/", false) Require(t, err) - l2chainDb := l2stack.WrapDatabaseWithWasm(l2chainData, wasmData) + l2chainDb := rawdb.WrapDatabaseWithWasm(l2chainData, wasmData) l2arbDb, err := l2stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) Require(t, err) From 90697dcab330c509ae3505a5597bb40d06cf9ce4 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 16:20:26 -0600 Subject: [PATCH 42/92] geth: update pin, move dbWithWasm --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 8d94d2b164..f8917436fc 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 8d94d2b164ea3b277a1e87dcdc6f61c5beeaefd8 +Subproject commit f8917436fcfa6a6a2b15c0ec7e6f318687491a8c From 5a6a8ab5348e0a9afad0e434613094ed9ab2c878 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 18:26:57 -0600 Subject: [PATCH 43/92] multicall stylus: add capabilities --- arbitrator/stylus/tests/multicall/Cargo.lock | 312 +++++++++++++++++- arbitrator/stylus/tests/multicall/Cargo.toml | 4 + arbitrator/stylus/tests/multicall/src/main.rs | 109 ++++-- 3 files changed, 389 insertions(+), 36 deletions(-) diff --git a/arbitrator/stylus/tests/multicall/Cargo.lock b/arbitrator/stylus/tests/multicall/Cargo.lock index 67b375d746..ca70689bf7 100644 --- a/arbitrator/stylus/tests/multicall/Cargo.lock +++ b/arbitrator/stylus/tests/multicall/Cargo.lock @@ -17,16 +17,29 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e416903084d3392ebd32d94735c395d6709415b76c7728e594d3f996f2b03e65" dependencies = [ + "alloy-rlp", "bytes", - "cfg-if", + "cfg-if 1.0.0", "const-hex", "derive_more", "hex-literal", "itoa", + "proptest", "ruint", + "serde", "tiny-keccak", ] +[[package]] +name = "alloy-rlp" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +dependencies = [ + "arrayvec", + "bytes", +] + [[package]] name = "alloy-sol-macro" version = "0.3.1" @@ -51,20 +64,48 @@ dependencies = [ "alloy-primitives", "alloy-sol-macro", "const-hex", + "serde", ] +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" + [[package]] name = "block-buffer" version = "0.10.4" @@ -86,6 +127,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + [[package]] name = "cfg-if" version = "1.0.0" @@ -98,7 +145,7 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "268f52aae268980d03dd9544c1ea591965b2735b038d6998d6e4ab37c8c24445" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "hex", "serde", @@ -184,6 +231,28 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "fastrand" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "generic-array" version = "0.14.7" @@ -194,6 +263,17 @@ dependencies = [ "version_check", ] +[[package]] +name = "getrandom" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi", +] + [[package]] name = "heck" version = "0.4.1" @@ -241,9 +321,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libm" @@ -251,18 +331,44 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +[[package]] +name = "linux-raw-sys" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" + [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +[[package]] +name = "memory_units" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" + +[[package]] +name = "mini-alloc" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9993556d3850cdbd0da06a3dc81297edcfa050048952d84d75e8b944e8f5af" +dependencies = [ + "cfg-if 1.0.0", + "wee_alloc", +] + [[package]] name = "multicall" version = "0.1.0" dependencies = [ + "alloy-primitives", + "alloy-sol-types", "hex", + "mini-alloc", "stylus-sdk", + "wee_alloc", ] [[package]] @@ -296,15 +402,26 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" dependencies = [ - "bitflags", + "bit-set", + "bitflags 1.3.2", "byteorder", + "lazy_static", "num-traits", "rand", "rand_chacha", "rand_xorshift", + "regex-syntax 0.6.29", + "rusty-fork", + "tempfile", "unarray", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.29" @@ -320,6 +437,8 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ + "libc", + "rand_chacha", "rand_core", ] @@ -338,6 +457,9 @@ name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] [[package]] name = "rand_xorshift" @@ -357,7 +479,7 @@ dependencies = [ "aho-corasick", "memchr", "regex-automata", - "regex-syntax", + "regex-syntax 0.7.4", ] [[package]] @@ -368,9 +490,15 @@ checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.4", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.7.4" @@ -406,6 +534,31 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.38.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +dependencies = [ + "bitflags 2.5.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "semver" version = "1.0.17" @@ -417,6 +570,20 @@ name = "serde" version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.171" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] [[package]] name = "sha3" @@ -434,7 +601,7 @@ version = "0.4.2" dependencies = [ "alloy-primitives", "alloy-sol-types", - "cfg-if", + "cfg-if 1.0.0", "convert_case 0.6.0", "lazy_static", "proc-macro2", @@ -451,7 +618,7 @@ version = "0.4.2" dependencies = [ "alloy-primitives", "alloy-sol-types", - "cfg-if", + "cfg-if 1.0.0", "derivative", "hex", "keccak-const", @@ -492,6 +659,18 @@ dependencies = [ "syn 2.0.25", ] +[[package]] +name = "tempfile" +version = "3.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +dependencies = [ + "cfg-if 1.0.0", + "fastrand", + "rustix", + "windows-sys", +] + [[package]] name = "tiny-keccak" version = "2.0.2" @@ -537,6 +716,121 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wee_alloc" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "memory_units", + "winapi", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" + [[package]] name = "zeroize" version = "1.6.0" diff --git a/arbitrator/stylus/tests/multicall/Cargo.toml b/arbitrator/stylus/tests/multicall/Cargo.toml index 2ab5728d12..3bc48c6826 100644 --- a/arbitrator/stylus/tests/multicall/Cargo.toml +++ b/arbitrator/stylus/tests/multicall/Cargo.toml @@ -4,8 +4,12 @@ version = "0.1.0" edition = "2021" [dependencies] +alloy-primitives = "0.3.1" +alloy-sol-types = "0.3.1" +mini-alloc = "0.4.2" stylus-sdk = { path = "../../../langs/rust/stylus-sdk", features = ["reentrant"] } hex = "0.4.3" +wee_alloc = "0.4.5" [profile.release] codegen-units = 1 diff --git a/arbitrator/stylus/tests/multicall/src/main.rs b/arbitrator/stylus/tests/multicall/src/main.rs index 1f255cd99c..fd6929b8f1 100644 --- a/arbitrator/stylus/tests/multicall/src/main.rs +++ b/arbitrator/stylus/tests/multicall/src/main.rs @@ -3,13 +3,28 @@ #![no_main] +extern crate alloc; + use stylus_sdk::{ + storage::{StorageCache, GlobalStorage}, alloy_primitives::{Address, B256}, + alloy_sol_types::sol, call::RawCall, console, + evm, prelude::*, }; +use wee_alloc::WeeAlloc; + +#[global_allocator] +static ALLOC: WeeAlloc = WeeAlloc::INIT; + +sol!{ + event Called(address addr, uint8 count, bool success, bytes return_data); + event Storage(bytes32 slot, bytes32 data, bool write); +} + #[entrypoint] fn user_main(input: Vec) -> Result, Vec> { let mut input = input.as_slice(); @@ -19,7 +34,7 @@ fn user_main(input: Vec) -> Result, Vec> { // combined output of all calls let mut output = vec![]; - console!("Calling {count} contract(s)"); + console!("Performing {count} action(s)"); for _ in 0..count { let length = u32::from_be_bytes(input[..4].try_into().unwrap()) as usize; input = &input[4..]; @@ -30,35 +45,75 @@ fn user_main(input: Vec) -> Result, Vec> { let kind = curr[0]; curr = &curr[1..]; - let mut value = None; - if kind == 0 { - value = Some(B256::try_from(&curr[..32]).unwrap()); - curr = &curr[32..]; - } + if kind & 0xf0 == 0 { + // caller + let mut value = None; + if kind & 0x3 == 0 { + value = Some(B256::try_from(&curr[..32]).unwrap()); + curr = &curr[32..]; + }; - let addr = Address::try_from(&curr[..20]).unwrap(); - let data = &curr[20..]; - match value { - Some(value) if !value.is_zero() => console!( - "Calling {addr} with {} bytes and value {} {kind}", - data.len(), - hex::encode(value) - ), - _ => console!("Calling {addr} with {} bytes {kind}", curr.len()), - } + let addr = Address::try_from(&curr[..20]).unwrap(); + let data = &curr[20..]; + match value { + Some(value) if !value.is_zero() => console!( + "Calling {addr} with {} bytes and value {} {kind}", + data.len(), + hex::encode(value) + ), + _ => console!("Calling {addr} with {} bytes {kind}", curr.len()), + } - let raw_call = match kind { - 0 => RawCall::new_with_value(value.unwrap_or_default().into()), - 1 => RawCall::new_delegate(), - 2 => RawCall::new_static(), - x => panic!("unknown call kind {x}"), - }; - let return_data = unsafe { raw_call.call(addr, data)? }; - - if !return_data.is_empty() { - console!("Contract {addr} returned {} bytes", return_data.len()); + let raw_call = match kind & 0x3 { + 0 => RawCall::new_with_value(value.unwrap_or_default().into()), + 1 => RawCall::new_delegate(), + 2 => RawCall::new_static(), + x => panic!("unknown call kind {x}"), + }; + let (success, return_data) = match unsafe { raw_call.call(addr, data) } { + Ok(return_data) => (true, return_data), + Err(revert_data) => { + if kind & 0x4 == 0 { + return Err(revert_data) + } + (false, vec![]) + }, + }; + + if !return_data.is_empty() { + console!("Contract {addr} returned {} bytes", return_data.len()); + } + if kind & 0x8 != 0 { + evm::log(Called { addr, count, success, return_data: return_data.clone() }) + } + output.extend(return_data); + } else if kind & 0xf0 == 0x10 { + // storage + let slot = B256::try_from(&curr[..32]).unwrap(); + curr = &curr[32..]; + let data; + let write; + if kind & 0x7 == 0 { + console!("writing slot {}", curr.len()); + data = B256::try_from(&curr[..32]).unwrap(); + write = true; + unsafe { StorageCache::set_word(slot.into(), data.into()) }; + StorageCache::flush(); + } else if kind & 0x7 == 1{ + console!("reading slot"); + write = false; + data = StorageCache::get_word(slot.into()); + output.extend(data.clone()); + } else { + panic!("unknown storage kind {kind}") + } + if kind & 0x8 != 0 { + console!("slot: {}, data: {}, write {write}", slot, data); + evm::log(Storage { slot: slot.into(), data: data.into(), write }) + } + } else { + panic!("unknown action {kind}") } - output.extend(return_data); input = next; } From 34e17677a660454c279bc39b4b2ade52b21b4efe Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 18:34:49 -0600 Subject: [PATCH 44/92] create stylus recursive tests --- system_tests/program_recursive_test.go | 195 +++++++++++++++++++++++++ system_tests/stylus_test.go | 41 ++++++ 2 files changed, 236 insertions(+) create mode 100644 system_tests/program_recursive_test.go diff --git a/system_tests/program_recursive_test.go b/system_tests/program_recursive_test.go new file mode 100644 index 0000000000..d4cab510d3 --- /dev/null +++ b/system_tests/program_recursive_test.go @@ -0,0 +1,195 @@ +package arbtest + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +type multiCallRecurse struct { + Name string + opcode vm.OpCode +} + +func printRecurse(recurse []multiCallRecurse) string { + result := "" + for _, contract := range recurse { + result = result + "(" + contract.Name + "," + contract.opcode.String() + ")" + } + return result +} + +func testProgramRecursiveCall(t *testing.T, builder *NodeBuilder, slotVals map[string]common.Hash, rander *testhelpers.PseudoRandomDataSource, recurse []multiCallRecurse) uint64 { + ctx := builder.ctx + slot := common.HexToHash("0x11223344556677889900aabbccddeeff") + val := common.Hash{} + args := []byte{} + if recurse[0].opcode == vm.SSTORE { + // send event from storage on sstore + val = rander.GetHash() + args = append([]byte{0x1, 0, 0, 0, 65, 0x18}, slot[:]...) + args = append(args, val[:]...) + } else if recurse[0].opcode == vm.SLOAD { + args = append([]byte{0x1, 0, 0, 0, 33, 0x11}, slot[:]...) + } else { + t.Fatal("first level must be sload or sstore") + } + shouldSucceed := true + delegateChangesStorageDest := true + storageDest := recurse[0].Name + for i := 1; i < len(recurse); i++ { + call := recurse[i] + prev := recurse[i-1] + args = argsForMulticall(call.opcode, builder.L2Info.GetAddress(prev.Name), nil, args) + if call.opcode == vm.STATICCALL && recurse[0].opcode == vm.SSTORE { + shouldSucceed = false + } + if delegateChangesStorageDest && call.opcode == vm.DELEGATECALL { + storageDest = call.Name + } else { + delegateChangesStorageDest = false + } + } + if recurse[0].opcode == vm.SLOAD { + // send event from caller on sload + args[5] = args[5] | 0x8 + } + multiCaller, err := mocksgen.NewMultiCallTest(builder.L2Info.GetAddress(recurse[len(recurse)-1].Name), builder.L2.Client) + Require(t, err) + ownerTransact := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + ownerTransact.GasLimit = 10000000 + tx, err := multiCaller.Fallback(&ownerTransact, args) + Require(t, err) + receipt, err := WaitForTx(ctx, builder.L2.Client, tx.Hash(), time.Second*3) + Require(t, err) + + if shouldSucceed { + if receipt.Status != types.ReceiptStatusSuccessful { + log.Error("error when shouldn't", "case", printRecurse(recurse)) + Fatal(t, arbutil.DetailTxError(ctx, builder.L2.Client, tx, receipt)) + } + if len(receipt.Logs) != 1 { + Fatal(t, "incorrect number of logs: ", len(receipt.Logs)) + } + if recurse[0].opcode == vm.SSTORE { + slotVals[storageDest] = val + storageEvt, err := multiCaller.ParseStorage(*receipt.Logs[0]) + Require(t, err) + gotData := common.BytesToHash(storageEvt.Data[:]) + gotSlot := common.BytesToHash(storageEvt.Slot[:]) + if gotData != val || gotSlot != slot || storageEvt.Write != (recurse[0].opcode == vm.SSTORE) { + Fatal(t, "unexpected event", gotData, val, gotSlot, slot, storageEvt.Write, recurse[0].opcode) + } + } else { + calledEvt, err := multiCaller.ParseCalled(*receipt.Logs[0]) + Require(t, err) + gotData := common.BytesToHash(calledEvt.ReturnData) + if gotData != slotVals[storageDest] { + Fatal(t, "unexpected event", gotData, val, slotVals[storageDest]) + } + } + } else if receipt.Status == types.ReceiptStatusSuccessful { + Fatal(t, "should have failed") + } + for contract, expected := range slotVals { + found, err := builder.L2.Client.StorageAt(ctx, builder.L2Info.GetAddress(contract), slot, receipt.BlockNumber) + Require(t, err) + foundHash := common.BytesToHash(found) + if expected != foundHash { + Fatal(t, "contract", contract, "expected", expected, "found", foundHash) + } + } + return receipt.BlockNumber.Uint64() +} + +func testProgramResursiveCalls(t *testing.T, tests [][]multiCallRecurse, jit bool) { + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + // set-up contracts + callsAddr := deployWasm(t, ctx, auth, l2client, rustFile("multicall")) + builder.L2Info.SetContract("multicall-rust", callsAddr) + multiCallWasm, _ := readWasmFile(t, rustFile("multicall")) + auth.GasLimit = 32000000 // skip gas estimation + multicallB := deployContract(t, ctx, auth, l2client, multiCallWasm) + builder.L2Info.SetContract("multicall-rust-b", multicallB) + multiAddr, tx, _, err := mocksgen.DeployMultiCallTest(&auth, builder.L2.Client) + builder.L2Info.SetContract("multicall-evm", multiAddr) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) + Require(t, err) + slotVals := make(map[string]common.Hash) + rander := testhelpers.NewPseudoRandomDataSource(t, 0) + + // set-up validator + validatorConfig := arbnode.ConfigDefaultL1NonSequencerTest() + validatorConfig.BlockValidator.Enable = true + AddDefaultValNode(t, ctx, validatorConfig, jit, "") + valClient, valCleanup := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: validatorConfig}) + defer valCleanup() + + // store initial values + for _, contract := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + storeRecure := []multiCallRecurse{ + { + Name: contract, + opcode: vm.SSTORE, + }, + } + testProgramRecursiveCall(t, builder, slotVals, rander, storeRecure) + } + + // execute transactions + blockNum := uint64(0) + for { + item := int(rander.GetUint64()/4) % len(tests) + blockNum = testProgramRecursiveCall(t, builder, slotVals, rander, tests[item]) + tests[item] = tests[len(tests)-1] + tests = tests[:len(tests)-1] + if len(tests)%100 == 0 { + log.Error("running transactions..", "blockNum", blockNum, "remaining", len(tests)) + } + if len(tests) == 0 { + break + } + } + + // wait for validation + for { + got := valClient.ConsensusNode.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(blockNum), time.Second*10) + if got { + break + } + log.Error("validating blocks..", "waiting for", blockNum, "validated", valClient.ConsensusNode.BlockValidator.GetValidated()) + } +} + +func TestProgramCallSimple(t *testing.T) { + tests := [][]multiCallRecurse{ + { + { + Name: "multicall-rust", + opcode: vm.SLOAD, + }, + { + Name: "multicall-rust", + opcode: vm.STATICCALL, + }, + { + Name: "multicall-rust", + opcode: vm.DELEGATECALL, + }, + }, + } + testProgramResursiveCalls(t, tests, true) +} diff --git a/system_tests/stylus_test.go b/system_tests/stylus_test.go index 46a9103b04..97f3041196 100644 --- a/system_tests/stylus_test.go +++ b/system_tests/stylus_test.go @@ -8,6 +8,8 @@ package arbtest import ( "testing" + + "github.com/ethereum/go-ethereum/core/vm" ) func TestProgramArbitratorKeccak(t *testing.T) { @@ -67,3 +69,42 @@ func TestProgramArbitratorActivateFails(t *testing.T) { func TestProgramArbitratorEarlyExit(t *testing.T) { testEarlyExit(t, false) } + +func fullRecurseTest() [][]multiCallRecurse { + result := make([][]multiCallRecurse, 0) + for _, op0 := range []vm.OpCode{vm.SSTORE, vm.SLOAD} { + for _, contract0 := range []string{"multicall-rust", "multicall-evm"} { + for _, op1 := range []vm.OpCode{vm.CALL, vm.STATICCALL, vm.DELEGATECALL} { + for _, contract1 := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + for _, op2 := range []vm.OpCode{vm.CALL, vm.STATICCALL, vm.DELEGATECALL} { + for _, contract2 := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + for _, op3 := range []vm.OpCode{vm.CALL, vm.STATICCALL, vm.DELEGATECALL} { + for _, contract3 := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + recurse := make([]multiCallRecurse, 4) + recurse[0].opcode = op0 + recurse[0].Name = contract0 + recurse[1].opcode = op1 + recurse[1].Name = contract1 + recurse[2].opcode = op2 + recurse[2].Name = contract2 + recurse[3].opcode = op3 + recurse[3].Name = contract3 + result = append(result, recurse) + } + } + } + } + } + } + } + } + return result +} + +func TestProgramLongCall(t *testing.T) { + testProgramResursiveCalls(t, fullRecurseTest(), true) +} + +func TestProgramLongArbitratorCall(t *testing.T) { + testProgramResursiveCalls(t, fullRecurseTest(), false) +} From 2eee7224f1e1c2588fe8fe3768b9d3de4fb89c95 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 18:35:12 -0600 Subject: [PATCH 45/92] ci: add step to run recursive tes --- .github/workflows/ci.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 69731435bb..4d97b5bfd5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - test-mode: [defaults, race, challenge, stylus] + test-mode: [defaults, race, challenge, stylus, long] steps: - name: Checkout @@ -167,8 +167,14 @@ jobs: if: matrix.test-mode == 'stylus' run: | packages=`go list ./...` - stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -parallel=8 -tags=stylustest -run=TestProgramArbitrator > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") + stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -parallel=8 -tags=stylustest -run="TestProgramArbitrator" > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") + - name: run long stylus tests + if: matrix.test-mode == 'long' + run: | + packages=`go list ./...` + stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -parallel=8 -tags=stylustest -run="TestProgramLong" > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") + - name: Archive detailed run log uses: actions/upload-artifact@v3 with: From 3b925be92b0bfe24b80c85cefcd46a6486eecd70 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 18:35:45 -0600 Subject: [PATCH 46/92] contracts: update pin for multicall EVM-equivalent --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 77ee9de042..7a41cd59cd 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 77ee9de042de225fab560096f7624f3d13bd12cb +Subproject commit 7a41cd59cdf2eb01cf31c2351b8d1ff6fbf52178 From fcd2370d2295412d0e8f0361cdaa143a84488c48 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 18:38:55 -0600 Subject: [PATCH 47/92] test-cases/user.wat: add call to storage_load_bytes --- arbitrator/prover/test-cases/dynamic.wat | 3 +-- arbitrator/prover/test-cases/link.wat | 2 +- arbitrator/prover/test-cases/user.wat | 13 +++++++++++++ 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arbitrator/prover/test-cases/dynamic.wat b/arbitrator/prover/test-cases/dynamic.wat index 97c55ba80b..8771bde87c 100644 --- a/arbitrator/prover/test-cases/dynamic.wat +++ b/arbitrator/prover/test-cases/dynamic.wat @@ -12,8 +12,7 @@ ;; WAVM Module hash (data (i32.const 0x000) - "\87\12\6b\19\8a\ce\0c\ba\00\6a\ab\9b\b7\45\bb\0a\ac\48\4d\6b\b8\b5\f9\03\a2\99\8f\64\00\9f\e2\04") ;; user - + "\a1\49\cf\81\13\ff\9c\95\f2\c8\c2\a1\42\35\75\36\7d\e8\6d\d4\22\d8\71\14\bb\9e\a4\7b\af\53\5d\d7") ;; user (func $start (local $user i32) (local $internals i32) ;; link in user.wat i32.const 0 diff --git a/arbitrator/prover/test-cases/link.wat b/arbitrator/prover/test-cases/link.wat index e033bf0e98..ef15326481 100644 --- a/arbitrator/prover/test-cases/link.wat +++ b/arbitrator/prover/test-cases/link.wat @@ -30,7 +30,7 @@ (data (i32.const 0x140) "\47\f7\4f\9c\21\51\4f\52\24\ea\d3\37\5c\bf\a9\1b\1a\5f\ef\22\a5\2a\60\30\c5\52\18\90\6b\b1\51\e5") ;; iops (data (i32.const 0x160) - "\87\12\6b\19\8a\ce\0c\ba\00\6a\ab\9b\b7\45\bb\0a\ac\48\4d\6b\b8\b5\f9\03\a2\99\8f\64\00\9f\e2\04") ;; user + "\a1\49\cf\81\13\ff\9c\95\f2\c8\c2\a1\42\35\75\36\7d\e8\6d\d4\22\d8\71\14\bb\9e\a4\7b\af\53\5d\d7") ;; user (data (i32.const 0x180) "\ee\47\08\f6\47\b2\10\88\1f\89\86\e7\e3\79\6b\b2\77\43\f1\4e\ee\cf\45\4a\9b\7c\d7\c4\5b\63\b6\d7") ;; return diff --git a/arbitrator/prover/test-cases/user.wat b/arbitrator/prover/test-cases/user.wat index d159339f66..9ecb4dcc45 100644 --- a/arbitrator/prover/test-cases/user.wat +++ b/arbitrator/prover/test-cases/user.wat @@ -2,6 +2,14 @@ ;; For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE (module + (import "vm_hooks" "storage_load_bytes32" (func $storage_load_bytes32 (param i32 i32))) + + (func $storage_load (result i32) + i32.const 0 + i32.const 32 + call $storage_load_bytes32 + i32.const 0 + ) (func $safe (result i32) i32.const 5 ) @@ -35,6 +43,11 @@ (then (call $out_of_bounds) (return)) ) + (i32.eq (local.get $args_len) (i32.const 32)) + (if + (then (call $storage_load) (return)) + ) + unreachable ) (memory (export "memory") 1 1)) From 5dd329a9da4f9716b88234a36fc76a0ef8388b5a Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 18:40:09 -0600 Subject: [PATCH 48/92] prover backoff by data for SwitchThread there is an important difference between 0 and non-zero (and only two non-zero values used) --- arbitrator/prover/src/main.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs index 697d178fc7..9ddd5020c8 100644 --- a/arbitrator/prover/src/main.rs +++ b/arbitrator/prover/src/main.rs @@ -263,7 +263,10 @@ fn main() -> Result<()> { if opts.proving_backoff { let mut extra_data = 0; - if matches!(next_opcode, Opcode::ReadInboxMessage | Opcode::ReadPreImage) { + if matches!( + next_opcode, + Opcode::ReadInboxMessage | Opcode::ReadPreImage | Opcode::SwitchThread + ) { extra_data = next_inst.argument_data; } let count_entry = proving_backoff From df5dcb53905eeeccf1d97ddcb86db3ec2b97185a Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 18:41:31 -0600 Subject: [PATCH 49/92] arbos: make more types and lgic public, to be used by tests --- arbos/programs/native.go | 16 ++++++++-------- arbos/programs/programs.go | 28 ++++++++++++++-------------- arbos/programs/wasm.go | 25 ++++++++++++++++++------- arbos/programs/wasm_api.go | 8 ++++---- 4 files changed, 44 insertions(+), 33 deletions(-) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 09989f3380..c44f8f56cb 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -106,13 +106,13 @@ func callProgram( interpreter *vm.EVMInterpreter, tracingInfo *util.TracingInfo, calldata []byte, - evmData *evmData, - stylusParams *goParams, + evmData *EvmData, + stylusParams *ProgParams, memoryModel *MemoryModel, ) ([]byte, error) { db := interpreter.Evm().StateDB asm := db.GetActivatedAsm(moduleHash) - debug := stylusParams.debugMode + debug := stylusParams.DebugMode if len(asm) == 0 { log.Error("missing asm", "program", address, "module", moduleHash) @@ -236,18 +236,18 @@ func goSlice(slice []byte) C.GoSliceData { } } -func (params *goParams) encode() C.StylusConfig { +func (params *ProgParams) encode() C.StylusConfig { pricing := C.PricingParams{ - ink_price: u32(params.inkPrice.ToUint32()), + ink_price: u32(params.InkPrice.ToUint32()), } return C.StylusConfig{ - version: u16(params.version), - max_depth: u32(params.maxDepth), + version: u16(params.Version), + max_depth: u32(params.MaxDepth), pricing: pricing, } } -func (data *evmData) encode() C.EvmData { +func (data *EvmData) encode() C.EvmData { return C.EvmData{ block_basefee: hashToBytes32(data.blockBasefee), chainid: u64(data.chainId), diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 779f2d6c67..3f7bdc39ca 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -181,7 +181,7 @@ func (p Programs) CallProgram( if err != nil { return nil, err } - goParams := p.goParams(program.version, debugMode, params) + goParams := p.progParams(program.version, debugMode, params) l1BlockNumber, err := evm.ProcessingHook.L1BlockNumber(evm.Context) if err != nil { return nil, err @@ -205,7 +205,7 @@ func (p Programs) CallProgram( statedb.AddStylusPages(program.footprint) defer statedb.SetStylusPagesOpen(open) - evmData := &evmData{ + evmData := &EvmData{ blockBasefee: common.BigToHash(evm.Context.BaseFee), chainId: evm.ChainConfig().ChainID.Uint64(), blockCoinbase: evm.Context.Coinbase, @@ -444,23 +444,23 @@ func (p Program) cachedGas(params *StylusParams) uint64 { return am.SaturatingUAdd(base, am.DivCeil(dyno, 100)) } -type goParams struct { - version uint16 - maxDepth uint32 - inkPrice uint24 - debugMode bool +type ProgParams struct { + Version uint16 + MaxDepth uint32 + InkPrice uint24 + DebugMode bool } -func (p Programs) goParams(version uint16, debug bool, params *StylusParams) *goParams { - return &goParams{ - version: version, - maxDepth: params.MaxStackDepth, - inkPrice: params.InkPrice, - debugMode: debug, +func (p Programs) progParams(version uint16, debug bool, params *StylusParams) *ProgParams { + return &ProgParams{ + Version: version, + MaxDepth: params.MaxStackDepth, + InkPrice: params.InkPrice, + DebugMode: debug, } } -type evmData struct { +type EvmData struct { blockBasefee common.Hash chainId uint64 blockCoinbase common.Address diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go index 77eb7e0f2f..1e9b5e680b 100644 --- a/arbos/programs/wasm.go +++ b/arbos/programs/wasm.go @@ -135,14 +135,26 @@ func callProgram( interpreter *vm.EVMInterpreter, tracingInfo *util.TracingInfo, calldata []byte, - evmData *evmData, - params *goParams, + evmData *EvmData, + params *ProgParams, memoryModel *MemoryModel, ) ([]byte, error) { reqHandler := newApiClosures(interpreter, tracingInfo, scope, memoryModel) + gasLeft, retData, err := CallProgramLoop(moduleHash, calldata, scope.Contract.Gas, evmData, params, reqHandler) + scope.Contract.Gas = gasLeft + return retData, err +} + +func CallProgramLoop( + moduleHash common.Hash, + calldata []byte, + gas uint64, + evmData *EvmData, + params *ProgParams, + reqHandler RequestHandler) (uint64, []byte, error) { configHandler := params.createHandler() dataHandler := evmData.createHandler() - debug := params.debugMode + debug := params.DebugMode module := newProgram( unsafe.Pointer(&moduleHash[0]), @@ -150,7 +162,7 @@ func callProgram( uint32(len(calldata)), configHandler, dataHandler, - scope.Contract.Gas, + gas, ) reqId := startProgram(module) for { @@ -162,12 +174,11 @@ func callProgram( popProgram() status := userStatus(reqTypeId) gasLeft := arbmath.BytesToUint(reqData[:8]) - scope.Contract.Gas = gasLeft data, msg, err := status.toResult(reqData[8:], debug) if status == userFailure && debug { - log.Warn("program failure", "err", err, "msg", msg, "program", address) + log.Warn("program failure", "err", err, "msg", msg, "moduleHash", moduleHash) } - return data, err + return gasLeft, data, err } reqType := RequestType(reqTypeId - EvmApiMethodReqOffset) diff --git a/arbos/programs/wasm_api.go b/arbos/programs/wasm_api.go index fb0f731402..d7bac056c0 100644 --- a/arbos/programs/wasm_api.go +++ b/arbos/programs/wasm_api.go @@ -38,12 +38,12 @@ func createEvmData( reentrant uint32, ) evmDataHandler -func (params *goParams) createHandler() stylusConfigHandler { - debug := arbmath.BoolToUint32(params.debugMode) - return createStylusConfig(uint32(params.version), params.maxDepth, params.inkPrice.ToUint32(), debug) +func (params *ProgParams) createHandler() stylusConfigHandler { + debug := arbmath.BoolToUint32(params.DebugMode) + return createStylusConfig(uint32(params.Version), params.MaxDepth, params.InkPrice.ToUint32(), debug) } -func (data *evmData) createHandler() evmDataHandler { +func (data *EvmData) createHandler() evmDataHandler { return createEvmData( arbutil.SliceToUnsafePointer(data.blockBasefee[:]), data.chainId, From f2448fede36f35df82db94930ea35b7ca1ebe426 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 10 May 2024 18:42:57 -0600 Subject: [PATCH 50/92] test-cases/go: add stylus contract this is the only test that checks 5 recursive stylus calls with one-step-proofs for each relevant SwitchThread --- Makefile | 10 ++++-- arbitrator/prover/test-cases/go/main.go | 44 +++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 36b55bed6e..53b89c8d72 100644 --- a/Makefile +++ b/Makefile @@ -289,7 +289,7 @@ $(arbitrator_jit): $(DEP_PREDICATE) $(jit_files) $(arbitrator_cases)/rust/$(wasm32_wasi)/%.wasm: $(arbitrator_cases)/rust/src/bin/%.rs $(arbitrator_cases)/rust/src/lib.rs cargo build --manifest-path $(arbitrator_cases)/rust/Cargo.toml --release --target wasm32-wasi --bin $(patsubst $(arbitrator_cases)/rust/$(wasm32_wasi)/%.wasm,%, $@) -$(arbitrator_cases)/go/testcase.wasm: $(arbitrator_cases)/go/*.go +$(arbitrator_cases)/go/testcase.wasm: $(arbitrator_cases)/go/*.go .make/solgen cd $(arbitrator_cases)/go && GOOS=wasip1 GOARCH=wasm go build -o testcase.wasm $(arbitrator_generated_header): $(DEP_PREDICATE) $(stylus_files) @@ -439,8 +439,12 @@ target/testdata/preimages.bin: contracts/test/prover/proofs/rust-%.json: $(arbitrator_cases)/rust/$(wasm32_wasi)/%.wasm $(prover_bin) $(arbitrator_wasm_libs) target/testdata/preimages.bin $(prover_bin) $< $(arbitrator_wasm_lib_flags) -o $@ -b --allow-hostapi --require-success --inbox-add-stub-headers --inbox $(arbitrator_cases)/rust/data/msg0.bin --inbox $(arbitrator_cases)/rust/data/msg1.bin --delayed-inbox $(arbitrator_cases)/rust/data/msg0.bin --delayed-inbox $(arbitrator_cases)/rust/data/msg1.bin --preimages target/testdata/preimages.bin -contracts/test/prover/proofs/go.json: $(arbitrator_cases)/go/testcase.wasm $(prover_bin) $(arbitrator_wasm_libs) target/testdata/preimages.bin $(arbitrator_tests_link_deps) - $(prover_bin) $< $(arbitrator_wasm_lib_flags) -o $@ -i 50000000 --require-success --preimages target/testdata/preimages.bin +contracts/test/prover/proofs/go.json: $(arbitrator_cases)/go/testcase.wasm $(prover_bin) $(arbitrator_wasm_libs) target/testdata/preimages.bin $(arbitrator_tests_link_deps) $(arbitrator_cases)/user.wasm + $(prover_bin) $< $(arbitrator_wasm_lib_flags) -o $@ -b --require-success --preimages target/testdata/preimages.bin --stylus-modules $(arbitrator_cases)/user.wasm + +# avoid testing user.wasm in onestepproofs. It can only run as stylus program. +contracts/test/prover/proofs/user.json: + echo "[]" > $@ # avoid testing read-inboxmsg-10 in onestepproofs. It's used for go challenge testing. contracts/test/prover/proofs/read-inboxmsg-10.json: diff --git a/arbitrator/prover/test-cases/go/main.go b/arbitrator/prover/test-cases/go/main.go index 0df8010449..1f81553af2 100644 --- a/arbitrator/prover/test-cases/go/main.go +++ b/arbitrator/prover/test-cases/go/main.go @@ -1,6 +1,9 @@ // Copyright 2021-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE +//go:build wasm +// +build wasm + package main import ( @@ -19,6 +22,7 @@ import ( merkletree "github.com/wealdtech/go-merkletree" "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbos/programs" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/wavmio" ) @@ -69,11 +73,51 @@ const BYTES_PER_FIELD_ELEMENT = 32 var BLS_MODULUS, _ = new(big.Int).SetString("52435875175126190479447740508185965837690552500527637822603658699938581184513", 10) +var stylusModuleHash = common.HexToHash("a149cf8113ff9c95f2c8c2a1423575367de86dd422d87114bb9ea47baf535dd7") // user.wat + +func callStylusProgram(recurse int) { + evmData := programs.EvmData{} + progParams := programs.ProgParams{ + MaxDepth: 10000, + InkPrice: 1, + DebugMode: true, + } + reqHandler := func(req programs.RequestType, input []byte) ([]byte, []byte, uint64) { + fmt.Printf("got request type %d req %v\n", req, input) + if req == programs.GetBytes32 { + if recurse > 0 { + callStylusProgram(recurse - 1) + } + answer := common.Hash{} + return answer[:], nil, 1 + } + + panic("unsupported call") + } + calldata := common.Hash{}.Bytes() + _, _, err := programs.CallProgramLoop( + stylusModuleHash, + calldata, + 160000000, + &evmData, + &progParams, + reqHandler) + if err != nil { + panic(err) + } +} + func main() { fmt.Printf("starting executable with %v arg(s): %v\n", len(os.Args), os.Args) runtime.GC() time.Sleep(time.Second) + fmt.Printf("Stylus test\n") + + callStylusProgram(5) + + fmt.Printf("Stylus test done!\n") + // Data for the tree data := [][]byte{ []byte("Foo"), From 75112329a44848791ba7fc50b9b0b97463a0c59d Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 13 May 2024 09:38:27 -0500 Subject: [PATCH 51/92] Fix zero bid in data poster --- arbnode/dataposter/data_poster.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index b34552a9b9..6b7644c49e 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -592,7 +592,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u targetBlobCost := arbmath.BigMulByUint(newBlobFeeCap, blobGasUsed) targetNonBlobCost := arbmath.BigSub(targetMaxCost, targetBlobCost) newBaseFeeCap := arbmath.BigDivByUint(targetNonBlobCost, gasLimit) - if lastTx != nil && numBlobs > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { + if lastTx != nil && numBlobs > 0 && lastTx.GasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { // Increase the non-blob fee cap to the minimum rbf increase newBaseFeeCap = arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease) newNonBlobCost := arbmath.BigMulByUint(newBaseFeeCap, gasLimit) @@ -665,6 +665,10 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u return lastTx.GasFeeCap(), lastTx.GasTipCap(), lastTx.BlobGasFeeCap(), nil } + // Ensure we bid at least 1 wei to prevent division by zero + newBaseFeeCap = arbmath.BigMax(newBaseFeeCap, common.Big1) + newBlobFeeCap = arbmath.BigMax(newBlobFeeCap, common.Big1) + return newBaseFeeCap, newTipCap, newBlobFeeCap, nil } @@ -934,8 +938,8 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa } newTx := *prevTx - if arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease || - (prevTx.FullTx.BlobGasFeeCap() != nil && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { + if (prevTx.FullTx.GasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease) || + (prevTx.FullTx.BlobGasFeeCap() != nil && prevTx.FullTx.BlobGasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { log.Debug( "no need to replace by fee transaction", "nonce", prevTx.FullTx.Nonce(), From 4d164e4c8b5be72010da9d370ccf5adfc7837d24 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 13 May 2024 10:04:57 -0500 Subject: [PATCH 52/92] Don't return common.Big1 to prevent mutation issues --- arbnode/dataposter/data_poster.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 6b7644c49e..2b0275c735 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -666,8 +666,12 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u } // Ensure we bid at least 1 wei to prevent division by zero - newBaseFeeCap = arbmath.BigMax(newBaseFeeCap, common.Big1) - newBlobFeeCap = arbmath.BigMax(newBlobFeeCap, common.Big1) + if newBaseFeeCap.Sign() == 0 { + newBaseFeeCap.SetInt64(1) + } + if newBlobFeeCap.Sign() == 0 { + newBlobFeeCap.SetInt64(1) + } return newBaseFeeCap, newTipCap, newBlobFeeCap, nil } From 9348855a2f900c24069d378c822a9becaba0c0fa Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 13 May 2024 10:13:56 -0500 Subject: [PATCH 53/92] Also don't mutate new*feecap --- arbnode/dataposter/data_poster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 2b0275c735..7bc18a2121 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -667,10 +667,10 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u // Ensure we bid at least 1 wei to prevent division by zero if newBaseFeeCap.Sign() == 0 { - newBaseFeeCap.SetInt64(1) + newBaseFeeCap = big.NewInt(1) } if newBlobFeeCap.Sign() == 0 { - newBlobFeeCap.SetInt64(1) + newBlobFeeCap = big.NewInt(1) } return newBaseFeeCap, newTipCap, newBlobFeeCap, nil From 448c96947a0156409d88819628d844c324082c8c Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Mon, 13 May 2024 23:22:41 +0200 Subject: [PATCH 54/92] Revert code incorrectly dropped during the merge --- staker/block_validator.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/staker/block_validator.go b/staker/block_validator.go index eada1025a7..e494b3da10 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -129,6 +129,11 @@ func (c *BlockValidatorConfig) Validate() error { c.ValidationServerConfigs = executionServersConfigs } } + for i := range c.ValidationServerConfigs { + if err := c.ValidationServerConfigs[i].Validate(); err != nil { + return fmt.Errorf("failed to validate one of the block-validator validation-server-configs. url: %s, err: %w", c.ValidationServerConfigs[i].URL, err) + } + } return nil } From 45180a7e8e4b43592edb75b43bbd32d1e3cc6148 Mon Sep 17 00:00:00 2001 From: Jeremy Date: Tue, 14 May 2024 14:28:17 +0800 Subject: [PATCH 55/92] Update testconstants.go --- arbos/programs/testconstants.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arbos/programs/testconstants.go b/arbos/programs/testconstants.go index 215b5fb8a7..1ab0e6e93b 100644 --- a/arbos/programs/testconstants.go +++ b/arbos/programs/testconstants.go @@ -1,6 +1,9 @@ // Copyright 2024, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE +//go:build !wasm +// +build !wasm + package programs // This file exists because cgo isn't allowed in tests From 9cb4ced08e691bfbf8241e5d8b9014ae0ef45161 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Tue, 14 May 2024 11:56:19 +0200 Subject: [PATCH 56/92] Update ValidationInputToJson/ValidationInputFromJson --- validator/client/validation_client.go | 31 ----------------- validator/server_api/json.go | 27 +++++++++++++++ validator/valnode/validation_api.go | 50 --------------------------- 3 files changed, 27 insertions(+), 81 deletions(-) diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go index ee3e3c75e8..38f044ab89 100644 --- a/validator/client/validation_client.go +++ b/validator/client/validation_client.go @@ -11,11 +11,9 @@ import ( "sync/atomic" "time" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/util/containers" - "github.com/offchainlabs/nitro/util/jsonapi" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -228,32 +226,3 @@ func (r *ExecutionClientRun) Close() { } }) } - -func ValidationInputToJson(entry *validator.ValidationInput) *server_api.InputJSON { - jsonPreimagesMap := make(map[arbutil.PreimageType]*jsonapi.PreimagesMapJson) - for ty, preimages := range entry.Preimages { - jsonPreimagesMap[ty] = jsonapi.NewPreimagesMapJson(preimages) - } - res := &server_api.InputJSON{ - Id: entry.Id, - HasDelayedMsg: entry.HasDelayedMsg, - DelayedMsgNr: entry.DelayedMsgNr, - DelayedMsgB64: base64.StdEncoding.EncodeToString(entry.DelayedMsg), - StartState: entry.StartState, - PreimagesB64: jsonPreimagesMap, - UserWasms: make(map[common.Hash]server_api.UserWasmJson), - DebugChain: entry.DebugChain, - } - for _, binfo := range entry.BatchInfo { - encData := base64.StdEncoding.EncodeToString(binfo.Data) - res.BatchInfo = append(res.BatchInfo, server_api.BatchInfoJson{Number: binfo.Number, DataB64: encData}) - } - for moduleHash, info := range entry.UserWasms { - encWasm := server_api.UserWasmJson{ - Asm: base64.StdEncoding.EncodeToString(info.Asm), - Module: base64.StdEncoding.EncodeToString(info.Module), - } - res.UserWasms[moduleHash] = encWasm - } - return res -} diff --git a/validator/server_api/json.go b/validator/server_api/json.go index ba1ecb4c68..e30a4c72f7 100644 --- a/validator/server_api/json.go +++ b/validator/server_api/json.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/jsonapi" @@ -85,11 +86,20 @@ func ValidationInputToJson(entry *validator.ValidationInput) *InputJSON { DelayedMsgB64: base64.StdEncoding.EncodeToString(entry.DelayedMsg), StartState: entry.StartState, PreimagesB64: jsonPreimagesMap, + UserWasms: make(map[common.Hash]UserWasmJson), + DebugChain: entry.DebugChain, } for _, binfo := range entry.BatchInfo { encData := base64.StdEncoding.EncodeToString(binfo.Data) res.BatchInfo = append(res.BatchInfo, BatchInfoJson{Number: binfo.Number, DataB64: encData}) } + for moduleHash, info := range entry.UserWasms { + encWasm := UserWasmJson{ + Asm: base64.StdEncoding.EncodeToString(info.Asm), + Module: base64.StdEncoding.EncodeToString(info.Module), + } + res.UserWasms[moduleHash] = encWasm + } return res } @@ -104,6 +114,8 @@ func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, erro DelayedMsgNr: entry.DelayedMsgNr, StartState: entry.StartState, Preimages: preimages, + UserWasms: make(state.UserWasms), + DebugChain: entry.DebugChain, } delayed, err := base64.StdEncoding.DecodeString(entry.DelayedMsgB64) if err != nil { @@ -121,5 +133,20 @@ func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, erro } valInput.BatchInfo = append(valInput.BatchInfo, decInfo) } + for moduleHash, info := range entry.UserWasms { + asm, err := base64.StdEncoding.DecodeString(info.Asm) + if err != nil { + return nil, err + } + module, err := base64.StdEncoding.DecodeString(info.Module) + if err != nil { + return nil, err + } + decInfo := state.ActivatedWasm{ + Asm: asm, + Module: module, + } + valInput.UserWasms[moduleHash] = decInfo + } return valInput, nil } diff --git a/validator/valnode/validation_api.go b/validator/valnode/validation_api.go index 3f64d9a051..a67299b1a1 100644 --- a/validator/valnode/validation_api.go +++ b/validator/valnode/validation_api.go @@ -12,9 +12,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_api" @@ -190,51 +188,3 @@ func (a *ExecServerAPI) CloseExec(execid uint64) { run.run.Close() delete(a.runs, execid) } - -func ValidationInputFromJson(entry *server_api.InputJSON) (*validator.ValidationInput, error) { - preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) - for ty, jsonPreimages := range entry.PreimagesB64 { - preimages[ty] = jsonPreimages.Map - } - valInput := &validator.ValidationInput{ - Id: entry.Id, - HasDelayedMsg: entry.HasDelayedMsg, - DelayedMsgNr: entry.DelayedMsgNr, - StartState: entry.StartState, - Preimages: preimages, - UserWasms: make(state.UserWasms), - DebugChain: entry.DebugChain, - } - delayed, err := base64.StdEncoding.DecodeString(entry.DelayedMsgB64) - if err != nil { - return nil, err - } - valInput.DelayedMsg = delayed - for _, binfo := range entry.BatchInfo { - data, err := base64.StdEncoding.DecodeString(binfo.DataB64) - if err != nil { - return nil, err - } - decInfo := validator.BatchInfo{ - Number: binfo.Number, - Data: data, - } - valInput.BatchInfo = append(valInput.BatchInfo, decInfo) - } - for moduleHash, info := range entry.UserWasms { - asm, err := base64.StdEncoding.DecodeString(info.Asm) - if err != nil { - return nil, err - } - module, err := base64.StdEncoding.DecodeString(info.Module) - if err != nil { - return nil, err - } - decInfo := state.ActivatedWasm{ - Asm: asm, - Module: module, - } - valInput.UserWasms[moduleHash] = decInfo - } - return valInput, nil -} From 03ee1dc52e2f163b569b17b36edb96c65a04d9c2 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 14 May 2024 10:38:30 -0500 Subject: [PATCH 57/92] address PR comments --- blocks_reexecutor/blocks_reexecutor.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index 0ad4337e0f..a03b29fefd 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -25,6 +25,8 @@ type Config struct { EndBlock uint64 `koanf:"end-block"` Room int `koanf:"room"` BlocksPerThread uint64 `koanf:"blocks-per-thread"` + + blocksPerThread uint64 } func (c *Config) Validate() error { @@ -35,8 +37,13 @@ func (c *Config) Validate() error { if c.EndBlock < c.StartBlock { return errors.New("invalid block range for blocks re-execution") } - if c.Room == 0 { - return errors.New("room for blocks re-execution cannot be zero") + if c.Room < 0 { + return errors.New("room for blocks re-execution should be greater than 0") + } + if c.BlocksPerThread != 0 { + c.blocksPerThread = c.BlocksPerThread + } else { + c.blocksPerThread = 10000 } return nil } @@ -52,6 +59,7 @@ var TestConfig = Config{ Mode: "full", Room: runtime.NumCPU(), BlocksPerThread: 10, + blocksPerThread: 10, } func ConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -93,10 +101,7 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block } if c.Mode == "random" && end != start { // Reexecute a range of 10000 or (non-zero) c.BlocksPerThread number of blocks between start to end picked randomly - rng := uint64(10000) - if c.BlocksPerThread != 0 { - rng = c.BlocksPerThread - } + rng := c.blocksPerThread if rng > end-start { rng = end - start } @@ -108,12 +113,11 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block if start > 0 && start != chainStart { start-- } - // Divide work equally among available threads + // Divide work equally among available threads when BlocksPerThread is zero if c.BlocksPerThread == 0 { - c.BlocksPerThread = 10000 work := (end - start) / uint64(c.Room) if work > 0 { - c.BlocksPerThread = work + c.blocksPerThread = work } } return &BlocksReExecutor{ @@ -132,12 +136,10 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block // LaunchBlocksReExecution launches the thread to apply blocks of range [currentBlock-s.config.BlocksPerThread, currentBlock] to the last available valid state func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentBlock uint64) uint64 { - start := arbmath.SaturatingUSub(currentBlock, s.config.BlocksPerThread) + start := arbmath.SaturatingUSub(currentBlock, s.config.blocksPerThread) if start < s.startBlock { start = s.startBlock } - // we don't use state release pattern here - // TODO do we want to use release pattern here? startState, startHeader, release, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) if err != nil { s.fatalErrChan <- fmt.Errorf("blocksReExecutor failed to get last available state while searching for state at %d, err: %w", start, err) From 952d2253065590dbf846af103360e87c4eacc3e3 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 14 May 2024 19:11:19 -0600 Subject: [PATCH 58/92] geth: update pin --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index f8917436fc..8048ac4bed 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit f8917436fcfa6a6a2b15c0ec7e6f318687491a8c +Subproject commit 8048ac4bed2eda18284e3c022ea5ee4cce771134 From a8254afca62deb6bdcda7533df434093df438734 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 15 May 2024 19:19:25 -0300 Subject: [PATCH 59/92] avoid hardicoding adresses and l2msg in tests --- execution/gethexec/executionengine.go | 4 +-- system_tests/seqfeed_test.go | 41 ++++++++++++++------------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index b31209b882..96dca6c63e 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -197,7 +197,7 @@ func (s *ExecutionEngine) NextDelayedMessageNumber() (uint64, error) { return currentHeader.Nonce.Uint64(), nil } -func messageFromTxes(header *arbostypes.L1IncomingMessageHeader, txes types.Transactions, txErrors []error) (*arbostypes.L1IncomingMessage, error) { +func MessageFromTxes(header *arbostypes.L1IncomingMessageHeader, txes types.Transactions, txErrors []error) (*arbostypes.L1IncomingMessage, error) { var l2Message []byte if len(txes) == 1 && txErrors[0] == nil { txBytes, err := txes[0].MarshalBinary() @@ -368,7 +368,7 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. return nil, nil } - msg, err := messageFromTxes(header, txes, hooks.TxErrors) + msg, err := MessageFromTxes(header, txes, hooks.TxErrors) if err != nil { return nil, err } diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index 946194f17d..ed0398c40e 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -12,12 +12,15 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcaster/backlog" "github.com/offchainlabs/nitro/broadcaster/message" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/relay" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/testhelpers" @@ -297,41 +300,41 @@ func testBlockHashComparison(t *testing.T, blockHash *common.Hash, mustMismatch defer cleanup() testClient := builder.L2 - // related to: - // - builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) userAccount := "User2" - txHash := common.HexToHash("0x633f62b463cc0e52d842406995fb590654db40aace77bfca863ba0e8d2290f97") - poster := common.HexToAddress("0xa4b000000000000000000073657175656e636572") - l2msg := []byte{4, 2, 248, 111, 131, 6, 74, 186, 128, 128, 132, 11, 235, 194, 0, 131, 122, 18, 0, 148, 12, 112, 159, 52, 15, 11, 178, 227, 97, 34, 158, 52, 91, 126, 38, 153, 157, 9, 105, 171, 133, 232, 212, 165, 16, 0, 128, 192, 1, 160, 75, 109, 200, 183, 223, 114, 85, 128, 133, 94, 26, 103, 145, 247, 47, 0, 114, 132, 133, 234, 222, 235, 102, 45, 2, 109, 83, 65, 210, 142, 242, 209, 160, 96, 90, 108, 188, 197, 195, 43, 222, 103, 155, 153, 81, 119, 74, 177, 103, 110, 134, 94, 221, 72, 236, 20, 86, 94, 226, 94, 5, 206, 196, 122, 119} + builder.L2Info.GenerateAccount(userAccount) + tx := builder.L2Info.PrepareTx("Owner", userAccount, builder.L2Info.TransferGas, big.NewInt(1e12), nil) + l1IncomingMsgHeader := arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_L2Message, + Poster: l1pricing.BatchPosterAddress, + BlockNumber: 29, + Timestamp: 1715295980, + RequestId: nil, + L1BaseFee: nil, + } + l1IncomingMsg, err := gethexec.MessageFromTxes( + &l1IncomingMsgHeader, + types.Transactions{tx}, + []error{nil}, + ) + Require(t, err) + broadcastMessage := message.BroadcastMessage{ Version: 1, Messages: []*message.BroadcastFeedMessage{ { SequenceNumber: 1, Message: arbostypes.MessageWithMetadata{ - Message: &arbostypes.L1IncomingMessage{ - Header: &arbostypes.L1IncomingMessageHeader{ - Kind: arbostypes.L1MessageType_L2Message, - Poster: poster, - BlockNumber: 29, - Timestamp: 1715295980, - RequestId: nil, - L1BaseFee: nil, - }, - L2msg: l2msg, - }, + Message: l1IncomingMsg, DelayedMessagesRead: 1, }, BlockHash: blockHash, - Signature: nil, }, }, } wsBroadcastServer.Broadcast(&broadcastMessage) // By now, even though block hash mismatch, the transaction should still be processed - builder.L2Info.GenerateAccount(userAccount) - _, err = WaitForTx(ctx, testClient.Client, txHash, time.Second*15) + _, err = WaitForTx(ctx, testClient.Client, tx.Hash(), time.Second*15) if err != nil { t.Fatal("error waiting for tx:", err) } From b03d7b34eac6f0c4456914ee6b2fbe6ef7ba03a4 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 15 May 2024 19:39:04 -0300 Subject: [PATCH 60/92] improve log message when computed block hash doesn't match hash provided through input feed --- arbnode/transaction_streamer.go | 10 +++++++--- system_tests/seq_coordinator_test.go | 4 ++-- system_tests/seqfeed_test.go | 6 +++--- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index d9c7fc2163..b79b1aa963 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -146,6 +146,10 @@ type blockHashDBValue struct { BlockHash *common.Hash `rlp:"nil"` } +const ( + BlockHashMismatchLogMsg = "BlockHash from feed doesn't match locally computed hash. Check feed source." +) + func (s *TransactionStreamer) CurrentEstimateOfL1GasPrice() uint64 { s.cachedL1PriceDataMutex.Lock() defer s.cachedL1PriceDataMutex.Unlock() @@ -547,8 +551,8 @@ func (s *TransactionStreamer) getMessageWithMetadataAndBlockHash(seqNum arbutil. } // Get block hash. - // To keep it backwards compatible it is possible that a message related - // to a sequence number exists in the database but the block hash doesn't. + // To keep it backwards compatible, since it is possible that a message related + // to a sequence number exists in the database, but the block hash doesn't. key := dbKey(blockHashInputFeedPrefix, uint64(seqNum)) var blockHash *common.Hash data, err := s.db.Get(key) @@ -1170,7 +1174,7 @@ func (s *TransactionStreamer) checkResult(msgResult *execution.MessageResult, ex } if msgResult.BlockHash != *expectedBlockHash { log.Error( - "block_hash_mismatch", + BlockHashMismatchLogMsg, "expected", expectedBlockHash, "actual", msgResult.BlockHash, ) diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index a069a2d5a1..43d55f40c9 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -354,8 +354,8 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { t.Fatal("Unexpected balance:", l2balance) } - if logHandler.WasLogged("block_hash_mismatch") { - t.Fatal("block_hash_mismatch was logged unexpectedly") + if logHandler.WasLogged(arbnode.BlockHashMismatchLogMsg) { + t.Fatal("BlockHashMismatchLogMsg was logged unexpectedly") } } else { _, err = WaitForTx(ctx, testClientB.Client, tx.Hash(), time.Second) diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index ed0398c40e..589a48d3af 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -346,11 +346,11 @@ func testBlockHashComparison(t *testing.T, blockHash *common.Hash, mustMismatch t.Fatal("Unexpected balance:", l2balance) } - mismatched := logHandler.WasLogged("block_hash_mismatch") + mismatched := logHandler.WasLogged(arbnode.BlockHashMismatchLogMsg) if mustMismatch && !mismatched { - t.Fatal("Failed to log block_hash_mismatch") + t.Fatal("Failed to log BlockHashMismatchLogMsg") } else if !mustMismatch && mismatched { - t.Fatal("block_hash_mismatch was logged unexpectedly") + t.Fatal("BlockHashMismatchLogMsg was logged unexpectedly") } } From 4f72ebb168ad60d8868c0de1d82898ff8f33f9a7 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 15 May 2024 23:45:23 -0500 Subject: [PATCH 61/92] Fix signed saturating math functions --- util/arbmath/math.go | 44 ++++++++----- util/arbmath/math_fuzz_test.go | 112 +++++++++++++++++++++++++++++++++ util/arbmath/math_test.go | 109 ++++++++++++++++++++++++++++++++ 3 files changed, 250 insertions(+), 15 deletions(-) create mode 100644 util/arbmath/math_fuzz_test.go diff --git a/util/arbmath/math.go b/util/arbmath/math.go index 1c11c6ad58..8f93caa87d 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -74,14 +74,6 @@ func MaxInt[T Number](values ...T) T { return max } -// AbsValue the absolute value of a number -func AbsValue[T Number](value T) T { - if value < 0 { - return -value // never happens for unsigned types - } - return value -} - // Checks if two ints are sufficiently close to one another func Within[T Unsigned](a, b, bound T) bool { min := MinInt(a, b) @@ -267,14 +259,32 @@ func BigFloatMulByUint(multiplicand *big.Float, multiplier uint64) *big.Float { return new(big.Float).Mul(multiplicand, UintToBigFloat(multiplier)) } +func MaxIntValue[T Integer]() T { + allBits := ^T(0) + if allBits < 0 { + // This is a signed integer + return T((uint64(1) << (8*unsafe.Sizeof(allBits) - 1)) - 1) + } + return allBits +} + +func MinIntValue[T Integer]() T { + allBits := ^T(0) + if allBits < 0 { + // This is a signed integer + return T(uint64(1) << ((8 * unsafe.Sizeof(allBits)) - 1)) + } + return 0 +} + // SaturatingAdd add two integers without overflow func SaturatingAdd[T Signed](a, b T) T { sum := a + b if b > 0 && sum < a { - sum = ^T(0) >> 1 + sum = MaxIntValue[T]() } if b < 0 && sum > a { - sum = (^T(0) >> 1) + 1 + sum = MinIntValue[T]() } return sum } @@ -290,7 +300,11 @@ func SaturatingUAdd[T Unsigned](a, b T) T { // SaturatingSub subtract an int64 from another without overflow func SaturatingSub(minuend, subtrahend int64) int64 { - return SaturatingAdd(minuend, -subtrahend) + if subtrahend == math.MinInt64 { + // The absolute value of MinInt64 is one greater than MaxInt64 + return SaturatingAdd(SaturatingAdd(minuend, math.MaxInt64), 1) + } + return SaturatingAdd(minuend, SaturatingNeg(subtrahend)) } // SaturatingUSub subtract an integer from another without underflow @@ -315,9 +329,9 @@ func SaturatingMul[T Signed](a, b T) T { product := a * b if b != 0 && product/b != a { if (a > 0 && b > 0) || (a < 0 && b < 0) { - product = ^T(0) >> 1 + product = MaxIntValue[T]() } else { - product = (^T(0) >> 1) + 1 + product = MinIntValue[T]() } } return product @@ -367,8 +381,8 @@ func SaturatingCastToUint(value *big.Int) uint64 { // Negates an int without underflow func SaturatingNeg[T Signed](value T) T { - if value == ^T(0) { - return (^T(0)) >> 1 + if value < 0 && value == MinIntValue[T]() { + return MaxIntValue[T]() } return -value } diff --git a/util/arbmath/math_fuzz_test.go b/util/arbmath/math_fuzz_test.go new file mode 100644 index 0000000000..6e27f2b70a --- /dev/null +++ b/util/arbmath/math_fuzz_test.go @@ -0,0 +1,112 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbmath + +import ( + "math/big" + "testing" +) + +func toBig[T Signed](a T) *big.Int { + return big.NewInt(int64(a)) +} + +func saturatingBigToInt[T Signed](a *big.Int) T { + // MinIntValue and MaxIntValue are already separately tested + if a.Cmp(toBig(MaxIntValue[T]())) > 0 { + return MaxIntValue[T]() + } + if a.Cmp(toBig(MinIntValue[T]())) < 0 { + return MinIntValue[T]() + } + return T(a.Int64()) +} + +func fuzzSaturatingAdd[T Signed](f *testing.F) { + f.Fuzz(func(t *testing.T, a, b T) { + got := SaturatingAdd(a, b) + expected := saturatingBigToInt[T](new(big.Int).Add(toBig(a), toBig(b))) + if got != expected { + t.Errorf("SaturatingAdd(%v, %v) = %v, expected %v", a, b, got, expected) + } + }) +} + +func fuzzSaturatingMul[T Signed](f *testing.F) { + f.Fuzz(func(t *testing.T, a, b T) { + got := SaturatingMul(a, b) + expected := saturatingBigToInt[T](new(big.Int).Mul(toBig(a), toBig(b))) + if got != expected { + t.Errorf("SaturatingMul(%v, %v) = %v, expected %v", a, b, got, expected) + } + }) +} + +func fuzzSaturatingNeg[T Signed](f *testing.F) { + f.Fuzz(func(t *testing.T, a T) { + got := SaturatingNeg(a) + expected := saturatingBigToInt[T](new(big.Int).Neg(toBig(a))) + if got != expected { + t.Errorf("SaturatingNeg(%v) = %v, expected %v", a, got, expected) + } + }) +} + +func FuzzSaturatingAddInt8(f *testing.F) { + fuzzSaturatingAdd[int8](f) +} + +func FuzzSaturatingAddInt16(f *testing.F) { + fuzzSaturatingAdd[int16](f) +} + +func FuzzSaturatingAddInt32(f *testing.F) { + fuzzSaturatingAdd[int32](f) +} + +func FuzzSaturatingAddInt64(f *testing.F) { + fuzzSaturatingAdd[int64](f) +} + +func FuzzSaturatingSub(f *testing.F) { + f.Fuzz(func(t *testing.T, a, b int64) { + got := SaturatingSub(a, b) + expected := saturatingBigToInt[int64](new(big.Int).Sub(toBig(a), toBig(b))) + if got != expected { + t.Errorf("SaturatingSub(%v, %v) = %v, expected %v", a, b, got, expected) + } + }) +} + +func FuzzSaturatingMulInt8(f *testing.F) { + fuzzSaturatingMul[int8](f) +} + +func FuzzSaturatingMulInt16(f *testing.F) { + fuzzSaturatingMul[int16](f) +} + +func FuzzSaturatingMulInt32(f *testing.F) { + fuzzSaturatingMul[int32](f) +} + +func FuzzSaturatingMulInt64(f *testing.F) { + fuzzSaturatingMul[int64](f) +} + +func FuzzSaturatingNegInt8(f *testing.F) { + fuzzSaturatingNeg[int8](f) +} + +func FuzzSaturatingNegInt16(f *testing.F) { + fuzzSaturatingNeg[int16](f) +} + +func FuzzSaturatingNegInt32(f *testing.F) { + fuzzSaturatingNeg[int32](f) +} + +func FuzzSaturatingNegInt64(f *testing.F) { + fuzzSaturatingNeg[int64](f) +} diff --git a/util/arbmath/math_test.go b/util/arbmath/math_test.go index 2e2f14795a..194d6d7c86 100644 --- a/util/arbmath/math_test.go +++ b/util/arbmath/math_test.go @@ -5,6 +5,7 @@ package arbmath import ( "bytes" + "fmt" "math" "math/rand" "testing" @@ -120,6 +121,114 @@ func TestSlices(t *testing.T) { assert_eq(SliceWithRunoff(data, 7, 8), []uint8{}) } +func testMinMaxValues[T Integer](t *testing.T, min T, max T) { + gotMin := MinIntValue[T]() + if gotMin != min { + Fail(t, "expected min", min, "but got", gotMin) + } + gotMax := MaxIntValue[T]() + if gotMax != max { + Fail(t, "expected max", max, "but got", gotMax) + } +} + +func TestMinMaxValues(t *testing.T) { + testMinMaxValues[uint8](t, 0, math.MaxUint8) + testMinMaxValues[uint16](t, 0, math.MaxUint16) + testMinMaxValues[uint32](t, 0, math.MaxUint32) + testMinMaxValues[uint64](t, 0, math.MaxUint64) + testMinMaxValues[int8](t, math.MinInt8, math.MaxInt8) + testMinMaxValues[int16](t, math.MinInt16, math.MaxInt16) + testMinMaxValues[int32](t, math.MinInt32, math.MaxInt32) + testMinMaxValues[int64](t, math.MinInt64, math.MaxInt64) +} + +func TestSaturatingAdd(t *testing.T) { + tests := []struct { + a, b, expected int64 + }{ + {2, 3, 5}, + {-1, -2, -3}, + {math.MaxInt64, 1, math.MaxInt64}, + {math.MaxInt64, math.MaxInt64, math.MaxInt64}, + {math.MinInt64, -1, math.MinInt64}, + {math.MinInt64, math.MinInt64, math.MinInt64}, + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("%v + %v = %v", tc.a, tc.b, tc.expected), func(t *testing.T) { + sum := SaturatingAdd(int64(tc.a), int64(tc.b)) + if sum != tc.expected { + t.Errorf("SaturatingAdd(%v, %v) = %v; want %v", tc.a, tc.b, sum, tc.expected) + } + }) + } +} + +func TestSaturatingSub(t *testing.T) { + tests := []struct { + a, b, expected int64 + }{ + {5, 3, 2}, + {-3, -2, -1}, + {math.MinInt64, 1, math.MinInt64}, + {math.MinInt64, -1, math.MinInt64 + 1}, + {math.MinInt64, math.MinInt64, 0}, + {0, math.MinInt64, math.MaxInt64}, + } + + for _, tc := range tests { + t.Run("", func(t *testing.T) { + sum := SaturatingSub(int64(tc.a), int64(tc.b)) + if sum != tc.expected { + t.Errorf("SaturatingSub(%v, %v) = %v; want %v", tc.a, tc.b, sum, tc.expected) + } + }) + } +} + +func TestSaturatingMul(t *testing.T) { + tests := []struct { + a, b, expected int64 + }{ + {5, 3, 15}, + {-3, -2, 6}, + {math.MaxInt64, 2, math.MaxInt64}, + {math.MinInt64, 2, math.MinInt64}, + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("%v - %v = %v", tc.a, tc.b, tc.expected), func(t *testing.T) { + sum := SaturatingMul(int64(tc.a), int64(tc.b)) + if sum != tc.expected { + t.Errorf("SaturatingMul(%v, %v) = %v; want %v", tc.a, tc.b, sum, tc.expected) + } + }) + } +} + +func TestSaturatingNeg(t *testing.T) { + tests := []struct { + value int64 + expected int64 + }{ + {0, 0}, + {5, -5}, + {-5, 5}, + {math.MinInt64, math.MaxInt64}, + {math.MaxInt64, math.MinInt64 + 1}, + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("-%v = %v", tc.value, tc.expected), func(t *testing.T) { + result := SaturatingNeg(tc.value) + if result != tc.expected { + t.Errorf("SaturatingNeg(%v) = %v: expected %v", tc.value, result, tc.expected) + } + }) + } +} + func Fail(t *testing.T, printables ...interface{}) { t.Helper() testhelpers.FailImpl(t, printables...) From bd880e9c340667c08e9fe9cbb9577b8067198016 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 16 May 2024 10:37:55 -0500 Subject: [PATCH 62/92] Limit min/max functions to Signed --- util/arbmath/math.go | 30 ++++++++++-------------------- util/arbmath/math_fuzz_test.go | 10 +++++----- util/arbmath/math_test.go | 20 ++++++++------------ 3 files changed, 23 insertions(+), 37 deletions(-) diff --git a/util/arbmath/math.go b/util/arbmath/math.go index 8f93caa87d..d7a0d1f523 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -259,32 +259,22 @@ func BigFloatMulByUint(multiplicand *big.Float, multiplier uint64) *big.Float { return new(big.Float).Mul(multiplicand, UintToBigFloat(multiplier)) } -func MaxIntValue[T Integer]() T { - allBits := ^T(0) - if allBits < 0 { - // This is a signed integer - return T((uint64(1) << (8*unsafe.Sizeof(allBits) - 1)) - 1) - } - return allBits +func MaxSignedValue[T Signed]() T { + return T((uint64(1) << (8*unsafe.Sizeof(T(0)) - 1)) - 1) } -func MinIntValue[T Integer]() T { - allBits := ^T(0) - if allBits < 0 { - // This is a signed integer - return T(uint64(1) << ((8 * unsafe.Sizeof(allBits)) - 1)) - } - return 0 +func MinSignedValue[T Signed]() T { + return T(uint64(1) << ((8 * unsafe.Sizeof(T(0))) - 1)) } // SaturatingAdd add two integers without overflow func SaturatingAdd[T Signed](a, b T) T { sum := a + b if b > 0 && sum < a { - sum = MaxIntValue[T]() + sum = MaxSignedValue[T]() } if b < 0 && sum > a { - sum = MinIntValue[T]() + sum = MinSignedValue[T]() } return sum } @@ -329,9 +319,9 @@ func SaturatingMul[T Signed](a, b T) T { product := a * b if b != 0 && product/b != a { if (a > 0 && b > 0) || (a < 0 && b < 0) { - product = MaxIntValue[T]() + product = MaxSignedValue[T]() } else { - product = MinIntValue[T]() + product = MinSignedValue[T]() } } return product @@ -381,8 +371,8 @@ func SaturatingCastToUint(value *big.Int) uint64 { // Negates an int without underflow func SaturatingNeg[T Signed](value T) T { - if value < 0 && value == MinIntValue[T]() { - return MaxIntValue[T]() + if value < 0 && value == MinSignedValue[T]() { + return MaxSignedValue[T]() } return -value } diff --git a/util/arbmath/math_fuzz_test.go b/util/arbmath/math_fuzz_test.go index 6e27f2b70a..591d699de0 100644 --- a/util/arbmath/math_fuzz_test.go +++ b/util/arbmath/math_fuzz_test.go @@ -13,12 +13,12 @@ func toBig[T Signed](a T) *big.Int { } func saturatingBigToInt[T Signed](a *big.Int) T { - // MinIntValue and MaxIntValue are already separately tested - if a.Cmp(toBig(MaxIntValue[T]())) > 0 { - return MaxIntValue[T]() + // MinSignedValue and MaxSignedValue are already separately tested + if a.Cmp(toBig(MaxSignedValue[T]())) > 0 { + return MaxSignedValue[T]() } - if a.Cmp(toBig(MinIntValue[T]())) < 0 { - return MinIntValue[T]() + if a.Cmp(toBig(MinSignedValue[T]())) < 0 { + return MinSignedValue[T]() } return T(a.Int64()) } diff --git a/util/arbmath/math_test.go b/util/arbmath/math_test.go index 194d6d7c86..1be60dc58b 100644 --- a/util/arbmath/math_test.go +++ b/util/arbmath/math_test.go @@ -121,26 +121,22 @@ func TestSlices(t *testing.T) { assert_eq(SliceWithRunoff(data, 7, 8), []uint8{}) } -func testMinMaxValues[T Integer](t *testing.T, min T, max T) { - gotMin := MinIntValue[T]() +func testMinMaxSignedValues[T Signed](t *testing.T, min T, max T) { + gotMin := MinSignedValue[T]() if gotMin != min { Fail(t, "expected min", min, "but got", gotMin) } - gotMax := MaxIntValue[T]() + gotMax := MaxSignedValue[T]() if gotMax != max { Fail(t, "expected max", max, "but got", gotMax) } } -func TestMinMaxValues(t *testing.T) { - testMinMaxValues[uint8](t, 0, math.MaxUint8) - testMinMaxValues[uint16](t, 0, math.MaxUint16) - testMinMaxValues[uint32](t, 0, math.MaxUint32) - testMinMaxValues[uint64](t, 0, math.MaxUint64) - testMinMaxValues[int8](t, math.MinInt8, math.MaxInt8) - testMinMaxValues[int16](t, math.MinInt16, math.MaxInt16) - testMinMaxValues[int32](t, math.MinInt32, math.MaxInt32) - testMinMaxValues[int64](t, math.MinInt64, math.MaxInt64) +func TestMinMaxSignedValues(t *testing.T) { + testMinMaxSignedValues[int8](t, math.MinInt8, math.MaxInt8) + testMinMaxSignedValues[int16](t, math.MinInt16, math.MaxInt16) + testMinMaxSignedValues[int32](t, math.MinInt32, math.MaxInt32) + testMinMaxSignedValues[int64](t, math.MinInt64, math.MaxInt64) } func TestSaturatingAdd(t *testing.T) { From bf8f40a57cefea50899bb15caa989c6ec0bc40c6 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 16 May 2024 18:09:52 +0200 Subject: [PATCH 63/92] Skip tls verification when making requests to secure signer from Dataposter --- arbnode/dataposter/data_poster.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 7bc18a2121..04789d0fda 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -217,6 +217,10 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error) { tlsCfg := &tls.Config{ MinVersion: tls.VersionTLS12, + // Dataposter verifies that signed transaction was signed by the account + // that it expects to be signed with. So signer is already authenticated + // on application level and does not need to rely on TLS for authentication. + InsecureSkipVerify: true, // #nosec G402 } if opts.ClientCert != "" && opts.ClientPrivateKey != "" { From 761e98dfe76f381b30fac569ce6f16d3b36e6f3c Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 16 May 2024 20:23:00 +0200 Subject: [PATCH 64/92] Expose InsecureSkipVerify as a flag in external signer config --- arbnode/dataposter/data_poster.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 04789d0fda..35826620f8 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -220,7 +220,7 @@ func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error // Dataposter verifies that signed transaction was signed by the account // that it expects to be signed with. So signer is already authenticated // on application level and does not need to rely on TLS for authentication. - InsecureSkipVerify: true, // #nosec G402 + InsecureSkipVerify: opts.InsecureSkipVerify, // #nosec G402 } if opts.ClientCert != "" && opts.ClientPrivateKey != "" { @@ -1227,6 +1227,8 @@ type ExternalSignerCfg struct { // (Optional) Client certificate key for mtls. // This is required when client-cert is set. ClientPrivateKey string `koanf:"client-private-key"` + // TLS config option, when enabled skips certificate verification of external signer. + InsecureSkipVerify bool `koanf:"insecure-skip-verify"` } type DangerousConfig struct { @@ -1280,6 +1282,7 @@ func addExternalSignerOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".root-ca", DefaultDataPosterConfig.ExternalSigner.RootCA, "external signer root CA") f.String(prefix+".client-cert", DefaultDataPosterConfig.ExternalSigner.ClientCert, "rpc client cert") f.String(prefix+".client-private-key", DefaultDataPosterConfig.ExternalSigner.ClientPrivateKey, "rpc client private key") + f.Bool(prefix+".client-private-key", DefaultDataPosterConfig.ExternalSigner.InsecureSkipVerify, "skip TLS certificate verification") } var DefaultDataPosterConfig = DataPosterConfig{ @@ -1301,7 +1304,7 @@ var DefaultDataPosterConfig = DataPosterConfig{ UseNoOpStorage: false, LegacyStorageEncoding: false, Dangerous: DangerousConfig{ClearDBStorage: false}, - ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction"}, + ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction", InsecureSkipVerify: true}, MaxFeeCapFormula: "((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei", ElapsedTimeBase: 10 * time.Minute, ElapsedTimeImportance: 10, @@ -1334,7 +1337,7 @@ var TestDataPosterConfig = DataPosterConfig{ UseDBStorage: false, UseNoOpStorage: false, LegacyStorageEncoding: false, - ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction"}, + ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction", InsecureSkipVerify: true}, MaxFeeCapFormula: "((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei", ElapsedTimeBase: 10 * time.Minute, ElapsedTimeImportance: 10, From b8503d8c2581d1c065800b01bc8be7740e41a9af Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 16 May 2024 20:24:10 +0200 Subject: [PATCH 65/92] Fix flag initialization --- arbnode/dataposter/data_poster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 35826620f8..8137cbac60 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -1282,7 +1282,7 @@ func addExternalSignerOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".root-ca", DefaultDataPosterConfig.ExternalSigner.RootCA, "external signer root CA") f.String(prefix+".client-cert", DefaultDataPosterConfig.ExternalSigner.ClientCert, "rpc client cert") f.String(prefix+".client-private-key", DefaultDataPosterConfig.ExternalSigner.ClientPrivateKey, "rpc client private key") - f.Bool(prefix+".client-private-key", DefaultDataPosterConfig.ExternalSigner.InsecureSkipVerify, "skip TLS certificate verification") + f.Bool(prefix+".insecure-skip-verify", DefaultDataPosterConfig.ExternalSigner.InsecureSkipVerify, "skip TLS certificate verification") } var DefaultDataPosterConfig = DataPosterConfig{ From 7b7159cb7de9ee78d318a4ea1f4f74df5781fbf7 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 16 May 2024 20:25:59 +0200 Subject: [PATCH 66/92] Keep insecureSkipVerify false by default in prod config --- arbnode/dataposter/data_poster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 8137cbac60..fb35ac3c8d 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -1304,7 +1304,7 @@ var DefaultDataPosterConfig = DataPosterConfig{ UseNoOpStorage: false, LegacyStorageEncoding: false, Dangerous: DangerousConfig{ClearDBStorage: false}, - ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction", InsecureSkipVerify: true}, + ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction", InsecureSkipVerify: false}, MaxFeeCapFormula: "((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei", ElapsedTimeBase: 10 * time.Minute, ElapsedTimeImportance: 10, From f8dcce964279ede50b1795cbc93b6ea0fdf4cb81 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 16 May 2024 13:35:52 -0500 Subject: [PATCH 67/92] Fix lastUpdateTimeOffset -> ArbitrumStartTime --- arbos/programs/data_pricer.go | 8 +++++--- arbos/programs/programs.go | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arbos/programs/data_pricer.go b/arbos/programs/data_pricer.go index b0184d7dc7..ed7c98556d 100644 --- a/arbos/programs/data_pricer.go +++ b/arbos/programs/data_pricer.go @@ -27,12 +27,14 @@ const ( inertiaOffset ) +const ArbitrumStartTime = 1421388000 // the day it all began + const initialDemand = 0 // no demand const InitialHourlyBytes = 1 * (1 << 40) / (365 * 24) // 1Tb total footprint const initialBytesPerSecond = InitialHourlyBytes / (60 * 60) // refill each second -const initialLastUpdateTime = 1421388000 // the day it all began -const initialMinPrice = 82928201 // 5Mb = $1 -const initialInertia = 21360419 // expensive at 1Tb +const initialLastUpdateTime = ArbitrumStartTime +const initialMinPrice = 82928201 // 5Mb = $1 +const initialInertia = 21360419 // expensive at 1Tb func initDataPricer(sto *storage.Storage) { demand := sto.OpenStorageBackedUint32(demandOffset) diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index d3113ae98d..9d51172986 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -527,12 +527,12 @@ func (status userStatus) toResult(data []byte, debug bool) ([]byte, string, erro // Hours since Arbitrum began, rounded down. func hoursSinceArbitrum(time uint64) uint24 { - return uint24((time - lastUpdateTimeOffset) / 3600) + return uint24((time - ArbitrumStartTime) / 3600) } // Computes program age in seconds from the hours passed since Arbitrum began. func hoursToAge(time uint64, hours uint24) uint64 { seconds := am.SaturatingUMul(uint64(hours), 3600) - activatedAt := am.SaturatingUAdd(lastUpdateTimeOffset, seconds) + activatedAt := am.SaturatingUAdd(ArbitrumStartTime, seconds) return am.SaturatingUSub(time, activatedAt) } From f301094d5dd0019c1906e9f98a4cabbc720aea46 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 16 May 2024 15:03:25 -0500 Subject: [PATCH 68/92] Use SaturatingUSub for hoursSinceArbitrum --- arbos/programs/programs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 9d51172986..6f73e16b85 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -527,7 +527,7 @@ func (status userStatus) toResult(data []byte, debug bool) ([]byte, string, erro // Hours since Arbitrum began, rounded down. func hoursSinceArbitrum(time uint64) uint24 { - return uint24((time - ArbitrumStartTime) / 3600) + return am.SaturatingUUCast[uint24]((am.SaturatingUSub(time, ArbitrumStartTime)) / 3600) } // Computes program age in seconds from the hours passed since Arbitrum began. From 804e4fa75860e4dd482f0b1e59844e260d277ba4 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Fri, 17 May 2024 11:25:33 -0300 Subject: [PATCH 69/92] fix: CleanCacheSize from hashdb.Config expects a value defined in bytes, and not as in MB as TrieCleanLimit is defined --- cmd/staterecovery/staterecovery.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index 6390826a91..58ad06ad14 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -31,7 +31,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon return fmt.Errorf("start block parent is missing, parent block number: %d", current-1) } hashConfig := *hashdb.Defaults - hashConfig.CleanCacheSize = cacheConfig.TrieCleanLimit + hashConfig.CleanCacheSize = cacheConfig.TrieCleanLimit * 1024 * 1024 trieConfig := &trie.Config{ Preimages: false, HashDB: &hashConfig, From 6884188d20b089f9320b6fc26bad6d049583364f Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 17 May 2024 13:21:12 -0500 Subject: [PATCH 70/92] address PR comments --- blocks_reexecutor/blocks_reexecutor.go | 46 ++++++++++++-------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index a03b29fefd..f58e0ce00f 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -25,8 +25,6 @@ type Config struct { EndBlock uint64 `koanf:"end-block"` Room int `koanf:"room"` BlocksPerThread uint64 `koanf:"blocks-per-thread"` - - blocksPerThread uint64 } func (c *Config) Validate() error { @@ -40,11 +38,6 @@ func (c *Config) Validate() error { if c.Room < 0 { return errors.New("room for blocks re-execution should be greater than 0") } - if c.BlocksPerThread != 0 { - c.blocksPerThread = c.BlocksPerThread - } else { - c.blocksPerThread = 10000 - } return nil } @@ -59,7 +52,6 @@ var TestConfig = Config{ Mode: "full", Room: runtime.NumCPU(), BlocksPerThread: 10, - blocksPerThread: 10, } func ConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -73,13 +65,14 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { type BlocksReExecutor struct { stopwaiter.StopWaiter - config *Config - blockchain *core.BlockChain - stateFor arbitrum.StateForHeaderFunction - done chan struct{} - fatalErrChan chan error - startBlock uint64 - currentBlock uint64 + config *Config + blockchain *core.BlockChain + stateFor arbitrum.StateForHeaderFunction + done chan struct{} + fatalErrChan chan error + startBlock uint64 + currentBlock uint64 + blocksPerThread uint64 } func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *BlocksReExecutor { @@ -99,9 +92,13 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block log.Warn("invalid state reexecutor's end block number, resetting to latest", "end", end, "latest", chainEnd) end = chainEnd } + blocksPerThread := uint64(10000) + if c.BlocksPerThread != 0 { + blocksPerThread = c.BlocksPerThread + } if c.Mode == "random" && end != start { // Reexecute a range of 10000 or (non-zero) c.BlocksPerThread number of blocks between start to end picked randomly - rng := c.blocksPerThread + rng := blocksPerThread if rng > end-start { rng = end - start } @@ -117,16 +114,17 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block if c.BlocksPerThread == 0 { work := (end - start) / uint64(c.Room) if work > 0 { - c.blocksPerThread = work + blocksPerThread = work } } return &BlocksReExecutor{ - config: c, - blockchain: blockchain, - currentBlock: end, - startBlock: start, - done: make(chan struct{}, c.Room), - fatalErrChan: fatalErrChan, + config: c, + blockchain: blockchain, + currentBlock: end, + startBlock: start, + blocksPerThread: blocksPerThread, + done: make(chan struct{}, c.Room), + fatalErrChan: fatalErrChan, stateFor: func(header *types.Header) (*state.StateDB, arbitrum.StateReleaseFunc, error) { state, err := blockchain.StateAt(header.Root) return state, arbitrum.NoopStateRelease, err @@ -136,7 +134,7 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block // LaunchBlocksReExecution launches the thread to apply blocks of range [currentBlock-s.config.BlocksPerThread, currentBlock] to the last available valid state func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentBlock uint64) uint64 { - start := arbmath.SaturatingUSub(currentBlock, s.config.blocksPerThread) + start := arbmath.SaturatingUSub(currentBlock, s.blocksPerThread) if start < s.startBlock { start = s.startBlock } From b5b12e89049de4c334035d979f7734d67e3d36d3 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 15:11:19 -0600 Subject: [PATCH 71/92] Add support for configurable lru cache resize on nitro init --- arbitrator/stylus/src/cache.rs | 4 ++++ arbitrator/stylus/src/lib.rs | 6 ++++++ arbnode/inbox_test.go | 1 + arbos/programs/native.go | 4 ++++ execution/gethexec/blockchain.go | 18 ++++++++++++++++++ execution/gethexec/executionengine.go | 10 ++++++++++ execution/gethexec/node.go | 3 +++ system_tests/recreatestate_rpc_test.go | 2 +- system_tests/staterecovery_test.go | 2 +- 9 files changed, 48 insertions(+), 2 deletions(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 2b83c6152f..6a9e677be5 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -66,6 +66,10 @@ impl InitCache { } } + pub fn set_lru_size(size: u32) { + cache!().lru.resize(NonZeroUsize::new(size.try_into().unwrap()).unwrap()) + } + /// Retrieves a cached value, updating items as necessary. pub fn get(module_hash: Bytes32, version: u16, debug: bool) -> Option<(Module, Store)> { let mut cache = cache!(); diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index 7abfb98bf5..9ccc9829ca 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -212,6 +212,12 @@ pub unsafe extern "C" fn stylus_call( status } +/// resize lru +#[no_mangle] +pub extern "C" fn stylus_cache_lru_resize(size: u32) { + InitCache::set_lru_size(size); +} + /// Caches an activated user program. /// /// # Safety diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go index 5c879743a4..594e0cedb5 100644 --- a/arbnode/inbox_test.go +++ b/arbnode/inbox_test.go @@ -65,6 +65,7 @@ func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (* if err != nil { Fail(t, err) } + execEngine.Initialize(gethexec.DefaultCachingConfig.StylusLRUCache) execSeq := &execClientWrapper{execEngine, t} inbox, err := NewTransactionStreamer(arbDb, bc.Config(), execSeq, nil, make(chan error, 1), transactionStreamerConfigFetcher) if err != nil { diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 7a6c16d866..17068371b1 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -253,6 +253,10 @@ func init() { } } +func ResizeWasmLruCache(size uint32) { + C.stylus_cache_lru_resize(u32(size)) +} + func (value bytes32) toHash() common.Hash { hash := common.Hash{} for index, b := range value.bytes { diff --git a/execution/gethexec/blockchain.go b/execution/gethexec/blockchain.go index 2a20c3da26..1d5060ca8a 100644 --- a/execution/gethexec/blockchain.go +++ b/execution/gethexec/blockchain.go @@ -37,6 +37,7 @@ type CachingConfig struct { SnapshotRestoreGasLimit uint64 `koanf:"snapshot-restore-gas-limit"` MaxNumberOfBlocksToSkipStateSaving uint32 `koanf:"max-number-of-blocks-to-skip-state-saving"` MaxAmountOfGasToSkipStateSaving uint64 `koanf:"max-amount-of-gas-to-skip-state-saving"` + StylusLRUCache uint32 `koanf:"stylus-lru-cache"` } func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -51,6 +52,7 @@ func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreGasLimit, "maximum gas rolled back to recover snapshot") f.Uint32(prefix+".max-number-of-blocks-to-skip-state-saving", DefaultCachingConfig.MaxNumberOfBlocksToSkipStateSaving, "maximum number of blocks to skip state saving to persistent storage (archive node only) -- warning: this option seems to cause issues") f.Uint64(prefix+".max-amount-of-gas-to-skip-state-saving", DefaultCachingConfig.MaxAmountOfGasToSkipStateSaving, "maximum amount of gas in blocks to skip saving state to Persistent storage (archive node only) -- warning: this option seems to cause issues") + f.Uint32(prefix+".stylus-lru-cache", DefaultCachingConfig.StylusLRUCache, "initialized stylus programs to keep in LRU cache") } var DefaultCachingConfig = CachingConfig{ @@ -65,6 +67,22 @@ var DefaultCachingConfig = CachingConfig{ SnapshotRestoreGasLimit: 300_000_000_000, MaxNumberOfBlocksToSkipStateSaving: 0, MaxAmountOfGasToSkipStateSaving: 0, + StylusLRUCache: 256, +} + +var TestCachingConfig = CachingConfig{ + Archive: false, + BlockCount: 128, + BlockAge: 30 * time.Minute, + TrieTimeLimit: time.Hour, + TrieDirtyCache: 1024, + TrieCleanCache: 600, + SnapshotCache: 400, + DatabaseCache: 2048, + SnapshotRestoreGasLimit: 300_000_000_000, + MaxNumberOfBlocksToSkipStateSaving: 0, + MaxAmountOfGasToSkipStateSaving: 0, + StylusLRUCache: 0, } // TODO remove stack from parameters as it is no longer needed here diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 38569f44ab..b3ebe80f37 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -1,6 +1,9 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE +//go:build !wasm +// +build !wasm + package gethexec /* @@ -28,6 +31,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/arbos/programs" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/arbmath" @@ -72,6 +76,12 @@ func NewExecutionEngine(bc *core.BlockChain) (*ExecutionEngine, error) { }, nil } +func (n *ExecutionEngine) Initialize(rustCacheSize uint32) { + if rustCacheSize != 0 { + programs.ResizeWasmLruCache(rustCacheSize) + } +} + func (s *ExecutionEngine) SetRecorder(recorder *BlockRecorder) { if s.Started() { panic("trying to set recorder after start") diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index ae76b88530..b7fe1c6e14 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -107,6 +107,7 @@ var ConfigDefault = Config{ func ConfigDefaultNonSequencerTest() *Config { config := ConfigDefault + config.Caching = TestCachingConfig config.ParentChainReader = headerreader.TestConfig config.Sequencer.Enable = false config.Forwarder = DefaultTestForwarderConfig @@ -119,6 +120,7 @@ func ConfigDefaultNonSequencerTest() *Config { func ConfigDefaultTest() *Config { config := ConfigDefault + config.Caching = TestCachingConfig config.Sequencer = TestSequencerConfig config.ParentChainReader = headerreader.TestConfig config.ForwardingTarget = "null" @@ -280,6 +282,7 @@ func (n *ExecutionNode) GetL1GasPriceEstimate() (uint64, error) { } func (n *ExecutionNode) Initialize(ctx context.Context) error { + n.ExecEngine.Initialize(n.ConfigFetcher().Caching.StylusLRUCache) n.ArbInterface.Initialize(n) err := n.Backend.Start() if err != nil { diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 777ed17961..bf321808de 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -449,7 +449,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig } func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { - cacheConfig := gethexec.DefaultCachingConfig + cacheConfig := gethexec.TestCachingConfig cacheConfig.Archive = true cacheConfig.SnapshotCache = 0 // disable snapshots cacheConfig.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are diff --git a/system_tests/staterecovery_test.go b/system_tests/staterecovery_test.go index 632e748da8..02c2623cfa 100644 --- a/system_tests/staterecovery_test.go +++ b/system_tests/staterecovery_test.go @@ -52,7 +52,7 @@ func TestRectreateMissingStates(t *testing.T) { chainDb, err := stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) Require(t, err) defer chainDb.Close() - cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.DefaultCachingConfig) + cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.TestCachingConfig) bc, err := gethexec.GetBlockChain(chainDb, cacheConfig, builder.chainConfig, builder.execConfig.TxLookupLimit) Require(t, err) err = staterecovery.RecreateMissingStates(chainDb, bc, cacheConfig, 1) From ced4a07273a2de581bc57580468b2fc58e8922b5 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 16:04:11 -0600 Subject: [PATCH 72/92] add tags when creating wasm-wrapped database --- cmd/nitro/init.go | 4 ++-- system_tests/common_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index c52c87732c..0b36fcfdaf 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -186,7 +186,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return nil, nil, err } - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1) err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) @@ -243,7 +243,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return nil, nil, err } - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1) if config.Init.ImportFile != "" { initDataReader, err = statetransfer.NewJsonInitDataReader(config.Init.ImportFile) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index f6bfde2108..edc16ffec4 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -777,7 +777,7 @@ func createL2BlockChainWithStackConfig( Require(t, err) wasmData, err := stack.OpenDatabase("wasm", 0, 0, "wasm/", false) Require(t, err) - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 0) arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) Require(t, err) @@ -984,7 +984,7 @@ func Create2ndNodeWithConfig( Require(t, err) wasmData, err := l2stack.OpenDatabase("wasm", 0, 0, "wasm/", false) Require(t, err) - l2chainDb := rawdb.WrapDatabaseWithWasm(l2chainData, wasmData) + l2chainDb := rawdb.WrapDatabaseWithWasm(l2chainData, wasmData, 0) l2arbDb, err := l2stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) Require(t, err) From 0f30f9f4e6cbd05cc76c6710cefbb24929b75eb9 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 16:05:41 -0600 Subject: [PATCH 73/92] arbitrator: add and use long_term tag --- arbitrator/stylus/src/cache.rs | 63 +++++++++++++++++++-------------- arbitrator/stylus/src/lib.rs | 16 +++++---- arbitrator/stylus/src/native.rs | 9 ++--- 3 files changed, 52 insertions(+), 36 deletions(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 6a9e677be5..3a15bc5d6a 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -21,7 +21,7 @@ macro_rules! cache { } pub struct InitCache { - arbos: HashMap, + long_term: HashMap, lru: LruCache, } @@ -59,9 +59,14 @@ impl CacheItem { } impl InitCache { + // current implementation only has one tag that stores to the long_term + // future implementations might have more, but 0 is a reserved tag + // that will never modify long_term state + const ARBOS_TAG: u32 = 1; + fn new(size: usize) -> Self { Self { - arbos: HashMap::new(), + long_term: HashMap::new(), lru: LruCache::new(NonZeroUsize::new(size).unwrap()), } } @@ -76,7 +81,7 @@ impl InitCache { let key = CacheKey::new(module_hash, version, debug); // See if the item is in the long term cache - if let Some(item) = cache.arbos.get(&key) { + if let Some(item) = cache.long_term.get(&key) { return Some(item.data()); } @@ -88,18 +93,27 @@ impl InitCache { } /// Inserts an item into the long term cache, cloning from the LRU cache if able. + /// If long_term_tag is 0 will only insert to LRU pub fn insert( module_hash: Bytes32, module: &[u8], version: u16, + long_term_tag: u32, debug: bool, ) -> Result<(Module, Store)> { let key = CacheKey::new(module_hash, version, debug); // if in LRU, add to ArbOS let mut cache = cache!(); + if let Some(item) = cache.long_term.get(&key) { + return Ok(item.data()) + } if let Some(item) = cache.lru.peek(&key).cloned() { - cache.arbos.insert(key, item.clone()); + if long_term_tag == Self::ARBOS_TAG { + cache.long_term.insert(key, item.clone()); + } else { + cache.lru.promote(&key) + } return Ok(item.data()); } drop(cache); @@ -109,37 +123,34 @@ impl InitCache { let item = CacheItem::new(module, engine); let data = item.data(); - cache!().arbos.insert(key, item); + let mut cache = cache!(); + if long_term_tag != Self::ARBOS_TAG { + cache.lru.put(key, item); + } else { + cache.long_term.insert(key, item); + } Ok(data) } - /// Inserts an item into the short-lived LRU cache. - pub fn insert_lru( - module_hash: Bytes32, - module: &[u8], - version: u16, - debug: bool, - ) -> Result<(Module, Store)> { - let engine = CompileConfig::version(version, debug).engine(); - let module = unsafe { Module::deserialize_unchecked(&engine, module)? }; - - let key = CacheKey::new(module_hash, version, debug); - let item = CacheItem::new(module, engine); - cache!().lru.put(key, item.clone()); - Ok(item.data()) - } - /// Evicts an item in the long-term cache. - pub fn evict(module_hash: Bytes32, version: u16, debug: bool) { + pub fn evict(module_hash: Bytes32, version: u16, long_term_tag: u32, debug: bool) { + if long_term_tag != Self::ARBOS_TAG { + return + } let key = CacheKey::new(module_hash, version, debug); - cache!().arbos.remove(&key); + let mut cache = cache!(); + if let Some(item) = cache.long_term.remove(&key) { + cache.lru.put(key, item); + } } - /// Modifies the cache for reorg, dropping the long-term cache. - pub fn reorg(_block: u64) { + pub fn clear_long_term(long_term_tag: u32) { + if long_term_tag != Self::ARBOS_TAG { + return + } let mut cache = cache!(); let cache = &mut *cache; - for (key, item) in cache.arbos.drain() { + for (key, item) in cache.long_term.drain() { cache.lru.put(key, item); // not all will fit, just a heuristic } } diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index 9ccc9829ca..6133b6ac34 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -183,6 +183,7 @@ pub unsafe extern "C" fn stylus_call( debug_chain: bool, output: *mut RustBytes, gas: *mut u64, + long_term_tag: u32, ) -> UserOutcomeKind { let module = module.slice(); let calldata = calldata.slice().to_vec(); @@ -193,7 +194,7 @@ pub unsafe extern "C" fn stylus_call( // Safety: module came from compile_user_wasm and we've paid for memory expansion let instance = unsafe { - NativeInstance::deserialize_cached(module, config.version, evm_api, evm_data, debug_chain) + NativeInstance::deserialize_cached(module, config.version, evm_api, evm_data, long_term_tag, debug_chain) }; let mut instance = match instance { Ok(instance) => instance, @@ -223,28 +224,31 @@ pub extern "C" fn stylus_cache_lru_resize(size: u32) { /// # Safety /// /// `module` must represent a valid module produced from `stylus_activate`. +/// arbos_tag: a tag for arbos cache. 0 won't affect real caching +/// currently only if tag==1 caching will be affected #[no_mangle] pub unsafe extern "C" fn stylus_cache_module( module: GoSliceData, module_hash: Bytes32, version: u16, + arbos_tag: u32, debug: bool, ) { - if let Err(error) = InitCache::insert(module_hash, module.slice(), version, debug) { + if let Err(error) = InitCache::insert(module_hash, module.slice(), version, arbos_tag, debug) { panic!("tried to cache invalid asm!: {error}"); } } /// Evicts an activated user program from the init cache. #[no_mangle] -pub extern "C" fn stylus_evict_module(module_hash: Bytes32, version: u16, debug: bool) { - InitCache::evict(module_hash, version, debug); +pub extern "C" fn stylus_evict_module(module_hash: Bytes32, version: u16, arbos_tag: u32, debug: bool) { + InitCache::evict(module_hash, version, arbos_tag, debug); } /// Reorgs the init cache. This will likely never happen. #[no_mangle] -pub extern "C" fn stylus_reorg_vm(block: u64) { - InitCache::reorg(block); +pub extern "C" fn stylus_reorg_vm(_block: u64, arbos_tag: u32) { + InitCache::clear_long_term(arbos_tag); } /// Frees the vector. Does nothing when the vector is null. diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs index 6d5e4cd2e9..38155818c0 100644 --- a/arbitrator/stylus/src/native.rs +++ b/arbitrator/stylus/src/native.rs @@ -113,6 +113,7 @@ impl> NativeInstance { version: u16, evm: E, evm_data: EvmData, + mut long_term_tag: u32, debug: bool, ) -> Result { let compile = CompileConfig::version(version, debug); @@ -122,10 +123,10 @@ impl> NativeInstance { if let Some((module, store)) = InitCache::get(module_hash, version, debug) { return Self::from_module(module, store, env); } - let (module, store) = match env.evm_data.cached { - true => InitCache::insert(module_hash, module, version, debug)?, - false => InitCache::insert_lru(module_hash, module, version, debug)?, - }; + if !env.evm_data.cached { + long_term_tag = 0; + } + let (module, store) = InitCache::insert(module_hash, module, version, long_term_tag, debug)?; Self::from_module(module, store, env) } From 72f8b9da72a8fb6821e06b970ff85573d565f3f2 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 16:08:59 -0600 Subject: [PATCH 74/92] nitro: use tag for stylus calls --- arbos/programs/native.go | 20 ++++++++++++-------- arbos/programs/programs.go | 7 ++++++- arbos/programs/wasm.go | 1 + arbos/tx_processor.go | 1 + execution/gethexec/executionengine.go | 3 ++- 5 files changed, 22 insertions(+), 10 deletions(-) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 17068371b1..f24dcac64d 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -172,6 +172,7 @@ func callProgram( evmData *EvmData, stylusParams *ProgParams, memoryModel *MemoryModel, + arbos_tag uint32, ) ([]byte, error) { db := interpreter.Evm().StateDB debug := stylusParams.DebugMode @@ -198,6 +199,7 @@ func callProgram( cbool(debug), output, (*u64)(&scope.Contract.Gas), + u32(arbos_tag), )) depth := interpreter.Depth() @@ -228,8 +230,9 @@ func cacheProgram(db vm.StateDB, module common.Hash, program Program, params *St if err != nil { panic("unable to recreate wasm") } - state.CacheWasmRust(asm, module, program.version, debug) - db.RecordCacheWasm(state.CacheWasm{ModuleHash: module, Version: program.version, Debug: debug}) + tag := db.Database().WasmCacheTag() + state.CacheWasmRust(asm, module, program.version, tag, debug) + db.RecordCacheWasm(state.CacheWasm{ModuleHash: module, Version: program.version, Tag: tag, Debug: debug}) } } @@ -237,19 +240,20 @@ func cacheProgram(db vm.StateDB, module common.Hash, program Program, params *St // For gas estimation and eth_call, we ignore permanent updates and rely on Rust's LRU. func evictProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, runMode core.MessageRunMode, forever bool) { if runMode == core.MessageCommitMode { - state.EvictWasmRust(module, version, debug) + tag := db.Database().WasmCacheTag() + state.EvictWasmRust(module, version, tag, debug) if !forever { - db.RecordEvictWasm(state.EvictWasm{ModuleHash: module, Version: version, Debug: debug}) + db.RecordEvictWasm(state.EvictWasm{ModuleHash: module, Version: version, Tag: tag, Debug: debug}) } } } func init() { - state.CacheWasmRust = func(asm []byte, moduleHash common.Hash, version uint16, debug bool) { - C.stylus_cache_module(goSlice(asm), hashToBytes32(moduleHash), u16(version), cbool(debug)) + state.CacheWasmRust = func(asm []byte, moduleHash common.Hash, version uint16, tag uint32, debug bool) { + C.stylus_cache_module(goSlice(asm), hashToBytes32(moduleHash), u16(version), u32(tag), cbool(debug)) } - state.EvictWasmRust = func(moduleHash common.Hash, version uint16, debug bool) { - C.stylus_evict_module(hashToBytes32(moduleHash), u16(version), cbool(debug)) + state.EvictWasmRust = func(moduleHash common.Hash, version uint16, tag uint32, debug bool) { + C.stylus_evict_module(hashToBytes32(moduleHash), u16(version), u32(tag), cbool(debug)) } } diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 9d51172986..f27d5834bf 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -166,6 +166,7 @@ func (p Programs) CallProgram( tracingInfo *util.TracingInfo, calldata []byte, reentrant bool, + runmode core.MessageRunMode, ) ([]byte, error) { evm := interpreter.Evm() contract := scope.Contract @@ -237,7 +238,11 @@ func (p Programs) CallProgram( if contract.CodeAddr != nil { address = *contract.CodeAddr } - return callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model) + var arbos_tag uint32 + if runmode == core.MessageCommitMode { + arbos_tag = statedb.Database().WasmCacheTag() + } + return callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, arbos_tag) } func getWasm(statedb vm.StateDB, program common.Address) ([]byte, error) { diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go index 95f30e83b6..4bc978a2b6 100644 --- a/arbos/programs/wasm.go +++ b/arbos/programs/wasm.go @@ -143,6 +143,7 @@ func callProgram( evmData *EvmData, params *ProgParams, memoryModel *MemoryModel, + _arbos_tag uint32, ) ([]byte, error) { reqHandler := newApiClosures(interpreter, tracingInfo, scope, memoryModel) gasLeft, retData, err := CallProgramLoop(moduleHash, calldata, scope.Contract.Gas, evmData, params, reqHandler) diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index b5fb64f695..65762fd2d1 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -127,6 +127,7 @@ func (p *TxProcessor) ExecuteWASM(scope *vm.ScopeContext, input []byte, interpre tracingInfo, input, reentrant, + p.RunMode(), ) } diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index b3ebe80f37..00218c9291 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -147,8 +147,9 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost return nil, nil } + tag := s.bc.StateCache().WasmCacheTag() // reorg Rust-side VM state - C.stylus_reorg_vm(C.uint64_t(blockNum)) + C.stylus_reorg_vm(C.uint64_t(blockNum), C.uint32_t(tag)) err := s.bc.ReorgToOldBlock(targetBlock) if err != nil { From cd03bf07ed3d7065d8b5a243ac4562f62370774f Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 16:09:20 -0600 Subject: [PATCH 75/92] geth: udate pin to support arbos tags --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 8048ac4bed..940fbe020e 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 8048ac4bed2eda18284e3c022ea5ee4cce771134 +Subproject commit 940fbe020e03707365da09de939058944d9e1f5d From 1ed090dcda3ac03c0c46321cb4a309b59dcb87c8 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 17:23:44 -0600 Subject: [PATCH 76/92] cargo fmt --- arbitrator/stylus/src/cache.rs | 12 +++++++----- arbitrator/stylus/src/lib.rs | 16 ++++++++++++++-- arbitrator/stylus/src/native.rs | 3 ++- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 3a15bc5d6a..06739f2219 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -72,7 +72,9 @@ impl InitCache { } pub fn set_lru_size(size: u32) { - cache!().lru.resize(NonZeroUsize::new(size.try_into().unwrap()).unwrap()) + cache!() + .lru + .resize(NonZeroUsize::new(size.try_into().unwrap()).unwrap()) } /// Retrieves a cached value, updating items as necessary. @@ -106,7 +108,7 @@ impl InitCache { // if in LRU, add to ArbOS let mut cache = cache!(); if let Some(item) = cache.long_term.get(&key) { - return Ok(item.data()) + return Ok(item.data()); } if let Some(item) = cache.lru.peek(&key).cloned() { if long_term_tag == Self::ARBOS_TAG { @@ -135,7 +137,7 @@ impl InitCache { /// Evicts an item in the long-term cache. pub fn evict(module_hash: Bytes32, version: u16, long_term_tag: u32, debug: bool) { if long_term_tag != Self::ARBOS_TAG { - return + return; } let key = CacheKey::new(module_hash, version, debug); let mut cache = cache!(); @@ -146,8 +148,8 @@ impl InitCache { pub fn clear_long_term(long_term_tag: u32) { if long_term_tag != Self::ARBOS_TAG { - return - } + return; + } let mut cache = cache!(); let cache = &mut *cache; for (key, item) in cache.long_term.drain() { diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index 6133b6ac34..3c53359f8b 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -194,7 +194,14 @@ pub unsafe extern "C" fn stylus_call( // Safety: module came from compile_user_wasm and we've paid for memory expansion let instance = unsafe { - NativeInstance::deserialize_cached(module, config.version, evm_api, evm_data, long_term_tag, debug_chain) + NativeInstance::deserialize_cached( + module, + config.version, + evm_api, + evm_data, + long_term_tag, + debug_chain, + ) }; let mut instance = match instance { Ok(instance) => instance, @@ -241,7 +248,12 @@ pub unsafe extern "C" fn stylus_cache_module( /// Evicts an activated user program from the init cache. #[no_mangle] -pub extern "C" fn stylus_evict_module(module_hash: Bytes32, version: u16, arbos_tag: u32, debug: bool) { +pub extern "C" fn stylus_evict_module( + module_hash: Bytes32, + version: u16, + arbos_tag: u32, + debug: bool, +) { InitCache::evict(module_hash, version, arbos_tag, debug); } diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs index 38155818c0..2858d59fdc 100644 --- a/arbitrator/stylus/src/native.rs +++ b/arbitrator/stylus/src/native.rs @@ -126,7 +126,8 @@ impl> NativeInstance { if !env.evm_data.cached { long_term_tag = 0; } - let (module, store) = InitCache::insert(module_hash, module, version, long_term_tag, debug)?; + let (module, store) = + InitCache::insert(module_hash, module, version, long_term_tag, debug)?; Self::from_module(module, store, env) } From faa405c6799d852a2a9d7cfb38e7688464627d97 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Mon, 20 May 2024 10:04:41 -0300 Subject: [PATCH 77/92] adjust error and log msg to use 'expected blockhashes' instead of 'last batch messages block hashes' when pruning block hashes from the db --- arbnode/message_pruner.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index c31dbc496d..5d18341a27 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -118,10 +118,10 @@ func (m *MessagePruner) prune(ctx context.Context, count arbutil.MessageIndex, g func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCount arbutil.MessageIndex, delayedMessageCount uint64) error { prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, &m.cachedPrunedBlockHashesInputFeed, uint64(messageCount)) if err != nil { - return fmt.Errorf("error deleting last batch messages' block hashes: %w", err) + return fmt.Errorf("error deleting expected block hashes: %w", err) } if len(prunedKeysRange) > 0 { - log.Info("Pruned last batch messages' block hashes:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) + log.Info("Pruned expected block hashes:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, &m.cachedPrunedMessages, uint64(messageCount)) From 7f8d471028d3093c5110dc429d5f5d9fddfe5878 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Mon, 20 May 2024 10:06:20 -0300 Subject: [PATCH 78/92] fix: uses arbnode.BlockHashMismatchLogMsg instead of block_hash_mismatch --- system_tests/seqfeed_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index 589a48d3af..ab30598b60 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -84,8 +84,8 @@ func TestSequencerFeed(t *testing.T) { t.Fatal("Unexpected balance:", l2balance) } - if logHandler.WasLogged("block_hash_mismatch") { - t.Fatal("block_hash_mismatch was logged unexpectedly") + if logHandler.WasLogged(arbnode.BlockHashMismatchLogMsg) { + t.Fatal("BlockHashMismatchLogMsg was logged unexpectedly") } } From 019581e7e733139c331751ae6485ffbd153f8dd5 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 20 May 2024 14:18:26 -0600 Subject: [PATCH 79/92] allowed-wasm-module-roots: accept paths as well --- cmd/nitro/nitro.go | 17 ++++++++++++++++- validator/valnode/valnode.go | 2 +- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 9280c3af02..473df21811 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -6,6 +6,7 @@ package main import ( "context" "crypto/ecdsa" + "encoding/hex" "errors" "fmt" "io" @@ -452,7 +453,21 @@ func mainImpl() int { if len(allowedWasmModuleRoots) > 0 { moduleRootMatched := false for _, root := range allowedWasmModuleRoots { - if common.HexToHash(root) == moduleRoot { + bytes, err := hex.DecodeString(root) + if err == nil { + if common.HexToHash(root) == common.BytesToHash(bytes) { + moduleRootMatched = true + break + } + continue + } + locator, locatorErr := server_common.NewMachineLocator(root) + if err != nil { + log.Warn("allowed-wasm-module-roots: value not a hex nor valid path:", "value", root, "locatorErr", locatorErr, "decodeErr", err) + continue + } + path := locator.GetMachinePath(moduleRoot) + if _, err := os.Stat(path); err == nil { moduleRootMatched = true break } diff --git a/validator/valnode/valnode.go b/validator/valnode/valnode.go index 93a5b37238..972e11189d 100644 --- a/validator/valnode/valnode.go +++ b/validator/valnode/valnode.go @@ -25,7 +25,7 @@ type WasmConfig struct { func WasmConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".root-path", DefaultWasmConfig.RootPath, "path to machine folders, each containing wasm files (machine.wavm.br, replay.wasm)") f.Bool(prefix+".enable-wasmroots-check", DefaultWasmConfig.EnableWasmrootsCheck, "enable check for compatibility of on-chain WASM module root with node") - f.StringSlice(prefix+".allowed-wasm-module-roots", DefaultWasmConfig.AllowedWasmModuleRoots, "list of WASM module roots to check if the on-chain WASM module root belongs to on node startup") + f.StringSlice(prefix+".allowed-wasm-module-roots", DefaultWasmConfig.AllowedWasmModuleRoots, "list of WASM module roots or mahcine base paths to match against on-chain WasmModuleRoot") } var DefaultWasmConfig = WasmConfig{ From 458669ad9a2e003c3c8ec920b24798a378b080c2 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 20 May 2024 17:59:45 -0600 Subject: [PATCH 80/92] dockerfile: sort split-validator support nitro has legacy machines and config to check for these wasmModuleRots nitro-validator has split-validation on nitro-dev is based of validator and has latest as well --- Dockerfile | 37 +++++++++++++++++++------------------ scripts/split-val-entry.sh | 2 +- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5c56b60cc0..f7e26ec084 100644 --- a/Dockerfile +++ b/Dockerfile @@ -203,6 +203,7 @@ COPY ./scripts/download-machine.sh . #RUN ./download-machine.sh consensus-v11 0xf4389b835497a910d7ba3ebfb77aa93da985634f3c052de1290360635be40c4a #RUN ./download-machine.sh consensus-v11.1 0x68e4fe5023f792d4ef584796c84d710303a5e12ea02d6e37e2b5e9c4332507c4 #RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4 +RUN ./download-machine.sh consensus-v30-rc.1 0x8805d035d5fdb8bb4450f306d9ab82633e2b6316260529cdcaf1b3702afbd5d5 FROM golang:1.21-bookworm as node-builder WORKDIR /workspace @@ -268,11 +269,15 @@ USER user WORKDIR /home/user/ ENTRYPOINT [ "/usr/local/bin/nitro" ] +FROM offchainlabs/nitro-node:v2.3.4-rc.5-b4cc111 as nitro-legacy + FROM nitro-node-slim as nitro-node USER root COPY --from=prover-export /bin/jit /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/daserver /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/datool /usr/local/bin/ +COPY --from=nitro-legacy /home/user/target/machines /home/user/nitro-legacy/machines +RUN rm -rf /workspace/target/legacy-machines/latest RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ apt-get install -y \ @@ -282,10 +287,23 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version +ENTRYPOINT [ "/usr/local/bin/nitro" , "--validation.wasm.allowed-wasm-module-roots", "/home/user/nitro-legacy/machines,/workspace/machines"] USER user -FROM nitro-node as nitro-node-dev-base +FROM nitro-node as nitro-node-validator +USER root +COPY --from=nitro-legacy /usr/local/bin/nitro-val /home/user/nitro-legacy/bin/nitro-val +COPY --from=nitro-legacy /usr/local/bin/jit /home/user/nitro-legacy/bin/jit +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y xxd netcat-traditional && \ + rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* +COPY scripts/split-val-entry.sh /usr/local/bin +ENTRYPOINT [ "/usr/local/bin/split-val-entry.sh" ] +USER user + +FROM nitro-node-validator as nitro-node-dev USER root # Copy in latest WASM module root RUN rm -f /home/user/target/machines/latest @@ -309,22 +327,5 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ USER user -FROM offchainlabs/nitro-node:v2.3.4-rc.5-b4cc111 as nitro-legacy - -FROM nitro-node-dev-base as nitro-node-dev -USER root - -RUN export DEBIAN_FRONTEND=noninteractive && \ - apt-get update && \ - apt-get install -y xxd netcat-traditional && \ - rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* -COPY scripts/split-val-entry.sh /usr/local/bin -COPY --from=nitro-legacy /home/user/target/machines /home/user/nitro-legacy/machines -RUN rm -rf /workspace/target/legacy-machines/latest -COPY --from=nitro-legacy /usr/local/bin/nitro-val /home/user/nitro-legacy/bin/nitro-val -COPY --from=nitro-legacy /usr/local/bin/jit /home/user/nitro-legacy/bin/jit -ENTRYPOINT [ "/usr/local/bin/split-val-entry.sh" ] -USER user - FROM nitro-node as nitro-node-default # Just to ensure nitro-node-dist is default diff --git a/scripts/split-val-entry.sh b/scripts/split-val-entry.sh index 6f56a8ec46..1f640f9763 100755 --- a/scripts/split-val-entry.sh +++ b/scripts/split-val-entry.sh @@ -16,4 +16,4 @@ for port in 52000 52001; do done done echo launching nitro-node -/usr/local/bin/nitro --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}, {"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52001"}]' "$@" +/usr/local/bin/nitro --validation.wasm.allowed-wasm-module-roots /home/user/nitro-legacy/machines,/workspace/machines --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}, {"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52001"}]' "$@" From b3693be5a21f45d5e9ec63c681068844995d3dbd Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 20 May 2024 18:41:22 -0600 Subject: [PATCH 81/92] log when choosing validator --- staker/block_validator.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/staker/block_validator.go b/staker/block_validator.go index e494b3da10..50ccac0471 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -1097,13 +1097,18 @@ func (v *BlockValidator) Initialize(ctx context.Context) error { for _, root := range moduleRoots { if v.redisValidator != nil && validator.SpawnerSupportsModule(v.redisValidator, root) { v.chosenValidator[root] = v.redisValidator + log.Info("validator chosen", "WasmMosuleRoot", root, "chosen", "redis") } else { for _, spawner := range v.execSpawners { if validator.SpawnerSupportsModule(spawner, root) { v.chosenValidator[root] = spawner + log.Info("validator chosen", "WasmMosuleRoot", root, "chosen", spawner.Name()) break } } + if v.chosenValidator[root] == nil { + log.Error("validator not found", "WasmMosuleRoot", root) + } } } return nil From c79b98d6bbb3b36a43e089e2fa622c676ae5c1b5 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 20 May 2024 20:30:54 -0600 Subject: [PATCH 82/92] fix moduleRoots condition --- cmd/nitro/nitro.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 473df21811..815257cf7a 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -462,7 +462,7 @@ func mainImpl() int { continue } locator, locatorErr := server_common.NewMachineLocator(root) - if err != nil { + if locatorErr != nil { log.Warn("allowed-wasm-module-roots: value not a hex nor valid path:", "value", root, "locatorErr", locatorErr, "decodeErr", err) continue } From dc7e874065523970eae4d4f6c1b20f991c2c228b Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 20 May 2024 20:41:22 -0600 Subject: [PATCH 83/92] Dockerfile: use consensus 30-rc.2 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index f7e26ec084..e5718868fa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -203,7 +203,7 @@ COPY ./scripts/download-machine.sh . #RUN ./download-machine.sh consensus-v11 0xf4389b835497a910d7ba3ebfb77aa93da985634f3c052de1290360635be40c4a #RUN ./download-machine.sh consensus-v11.1 0x68e4fe5023f792d4ef584796c84d710303a5e12ea02d6e37e2b5e9c4332507c4 #RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4 -RUN ./download-machine.sh consensus-v30-rc.1 0x8805d035d5fdb8bb4450f306d9ab82633e2b6316260529cdcaf1b3702afbd5d5 +RUN ./download-machine.sh consensus-v30-rc.2 0xb0de9cb89e4d944ae6023a3b62276e54804c242fd8c4c2d8e6cc4450f5fa8b1b FROM golang:1.21-bookworm as node-builder WORKDIR /workspace From 71c9da7c8cf98933c8d306cf198fcbc269727917 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 21 May 2024 08:45:33 -0600 Subject: [PATCH 84/92] Dockerfile: fix path --- Dockerfile | 2 +- scripts/split-val-entry.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index e5718868fa..58976fc6e1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -287,7 +287,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version -ENTRYPOINT [ "/usr/local/bin/nitro" , "--validation.wasm.allowed-wasm-module-roots", "/home/user/nitro-legacy/machines,/workspace/machines"] +ENTRYPOINT [ "/usr/local/bin/nitro" , "--validation.wasm.allowed-wasm-module-roots", "/home/user/nitro-legacy/machines,/home/user/target/machines"] USER user diff --git a/scripts/split-val-entry.sh b/scripts/split-val-entry.sh index 1f640f9763..8e1be0f6cc 100755 --- a/scripts/split-val-entry.sh +++ b/scripts/split-val-entry.sh @@ -16,4 +16,4 @@ for port in 52000 52001; do done done echo launching nitro-node -/usr/local/bin/nitro --validation.wasm.allowed-wasm-module-roots /home/user/nitro-legacy/machines,/workspace/machines --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}, {"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52001"}]' "$@" +/usr/local/bin/nitro --validation.wasm.allowed-wasm-module-roots /home/user/nitro-legacy/machines,/home/user/target/machines --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}, {"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52001"}]' "$@" From 153ffa76ca5226b2068d01acef643fee3f0e0fa9 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Tue, 21 May 2024 13:32:07 -0300 Subject: [PATCH 85/92] add apt-get update to wasm-libs-builder --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 5c56b60cc0..19a0b46ebd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -41,7 +41,8 @@ RUN apt-get update && apt-get install -y curl build-essential=12.9 FROM wasm-base as wasm-libs-builder # clang / lld used by soft-float wasm -RUN apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt +RUN apt-get update && \ + apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt # pinned rust 1.75.0 RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.75.0 --target x86_64-unknown-linux-gnu wasm32-unknown-unknown wasm32-wasi COPY ./Makefile ./ From de58296c1a41da7cf9b4fce82ab5687b4925bd47 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 21 May 2024 12:28:32 -0600 Subject: [PATCH 86/92] geth: update --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 940fbe020e..b8d4ced531 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 940fbe020e03707365da09de939058944d9e1f5d +Subproject commit b8d4ced5316c987d095ef1fc3ecb5e8ae0df094d From e8685b359cd82771ec9b5c30900c32e4a142834a Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 21 May 2024 16:52:49 -0600 Subject: [PATCH 87/92] fix typo --- go-ethereum | 2 +- staker/block_validator.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go-ethereum b/go-ethereum index 8048ac4bed..b8d4ced531 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 8048ac4bed2eda18284e3c022ea5ee4cce771134 +Subproject commit b8d4ced5316c987d095ef1fc3ecb5e8ae0df094d diff --git a/staker/block_validator.go b/staker/block_validator.go index 50ccac0471..027ee78248 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -1097,17 +1097,17 @@ func (v *BlockValidator) Initialize(ctx context.Context) error { for _, root := range moduleRoots { if v.redisValidator != nil && validator.SpawnerSupportsModule(v.redisValidator, root) { v.chosenValidator[root] = v.redisValidator - log.Info("validator chosen", "WasmMosuleRoot", root, "chosen", "redis") + log.Info("validator chosen", "WasmModuleRoot", root, "chosen", "redis") } else { for _, spawner := range v.execSpawners { if validator.SpawnerSupportsModule(spawner, root) { v.chosenValidator[root] = spawner - log.Info("validator chosen", "WasmMosuleRoot", root, "chosen", spawner.Name()) + log.Info("validator chosen", "WasmModuleRoot", root, "chosen", spawner.Name()) break } } if v.chosenValidator[root] == nil { - log.Error("validator not found", "WasmMosuleRoot", root) + log.Error("validator not found", "WasmModuleRoot", root) } } } From aaf4d1c8ce1baa12d14b3becaf51510fb687d654 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 21 May 2024 21:03:29 -0500 Subject: [PATCH 88/92] Fix off-by-one in data poster nonce check --- arbnode/dataposter/data_poster.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index fb35ac3c8d..34ca9e1483 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -857,24 +857,23 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } if precedingTx != nil { // precedingTx == nil -> the actual preceding tx was already confirmed - var latestBlockNumber, prevBlockNumber, reorgResistantNonce uint64 if precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent { - latestBlockNumber, err = p.client.BlockNumber(ctx) + latestBlockNumber, err := p.client.BlockNumber(ctx) if err != nil { return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } - prevBlockNumber = arbmath.SaturatingUSub(latestBlockNumber, 1) - reorgResistantNonce, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) + prevBlockNumber := arbmath.SaturatingUSub(latestBlockNumber, 1) + reorgResistantTxCount, err := p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) if err != nil { return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } - if precedingTx.FullTx.Nonce() > reorgResistantNonce { - log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) + if newTx.FullTx.Nonce() > reorgResistantTxCount { + log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantTxCount", reorgResistantTxCount) return nil } } else { - log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantNonce", reorgResistantNonce) + log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) } } } From 345e828b430efff7b66d401abe21759cc0af3abc Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 21 May 2024 21:41:53 -0500 Subject: [PATCH 89/92] Always log when sending previously unsent tx --- arbnode/dataposter/data_poster.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 34ca9e1483..399bc19dbd 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -857,13 +857,14 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } if precedingTx != nil { // precedingTx == nil -> the actual preceding tx was already confirmed + var latestBlockNumber, prevBlockNumber, reorgResistantTxCount uint64 if precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent { - latestBlockNumber, err := p.client.BlockNumber(ctx) + latestBlockNumber, err = p.client.BlockNumber(ctx) if err != nil { return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } - prevBlockNumber := arbmath.SaturatingUSub(latestBlockNumber, 1) - reorgResistantTxCount, err := p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) + prevBlockNumber = arbmath.SaturatingUSub(latestBlockNumber, 1) + reorgResistantTxCount, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) if err != nil { return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } @@ -872,9 +873,8 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantTxCount", reorgResistantTxCount) return nil } - } else { - log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) } + log.Debug("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantTxCount", reorgResistantTxCount) } } From 8737d5ccca5c252797af89906f1c5840df93d6ee Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 21 May 2024 21:45:39 -0500 Subject: [PATCH 90/92] Improve previouslySent check --- arbnode/dataposter/data_poster.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 399bc19dbd..5aaef959d8 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -851,7 +851,8 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti // different type with a lower nonce. // If we decide not to send this tx yet, just leave it queued and with Sent set to false. // The resending/repricing loop in DataPoster.Start will keep trying. - if !newTx.Sent && newTx.FullTx.Nonce() > 0 { + previouslySent := newTx.Sent || (prevTx != nil && prevTx.Sent) // if we've previously sent this nonce + if !previouslySent && newTx.FullTx.Nonce() > 0 { precedingTx, err := p.queue.Get(ctx, arbmath.SaturatingUSub(newTx.FullTx.Nonce(), 1)) if err != nil { return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) From 0ce93785e406c3375cb1931297b4e9580e4faf4f Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Wed, 22 May 2024 08:47:57 -0600 Subject: [PATCH 91/92] block_validator: fail but dont segfault if no validator --- staker/block_validator.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/staker/block_validator.go b/staker/block_validator.go index 027ee78248..5a511920f2 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -791,8 +791,9 @@ validationsLoop: } for _, moduleRoot := range wasmRoots { if v.chosenValidator[moduleRoot] == nil { - v.possiblyFatal(fmt.Errorf("did not find spawner for moduleRoot :%v", moduleRoot)) - continue + notFoundErr := fmt.Errorf("did not find spawner for moduleRoot :%v", moduleRoot) + v.possiblyFatal(notFoundErr) + return nil, notFoundErr } if v.chosenValidator[moduleRoot].Room() == 0 { log.Trace("advanceValidations: no more room", "moduleRoot", moduleRoot) @@ -1107,7 +1108,7 @@ func (v *BlockValidator) Initialize(ctx context.Context) error { } } if v.chosenValidator[root] == nil { - log.Error("validator not found", "WasmModuleRoot", root) + return fmt.Errorf("cannot validate WasmModuleRoot %v", root) } } } From 8504c5c0ba8303fdf18ce8efc0f94b1e81b47f00 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 23 May 2024 08:54:02 -0700 Subject: [PATCH 92/92] Update blocks_reexecutor/blocks_reexecutor.go Co-authored-by: Maciej Kulawik <10907694+magicxyyz@users.noreply.github.com> --- blocks_reexecutor/blocks_reexecutor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index f58e0ce00f..1e4a06fe90 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -35,7 +35,7 @@ func (c *Config) Validate() error { if c.EndBlock < c.StartBlock { return errors.New("invalid block range for blocks re-execution") } - if c.Room < 0 { + if c.Room <= 0 { return errors.New("room for blocks re-execution should be greater than 0") } return nil