diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index b2713bdfde..3ec3327392 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -64,7 +64,7 @@ jobs: - name: Setup nodejs uses: actions/setup-node@v3 with: - node-version: '16' + node-version: '18' cache: 'yarn' cache-dependency-path: '**/yarn.lock' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d97b5bfd5..cc5cd68a90 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -39,7 +39,7 @@ jobs: - name: Setup nodejs uses: actions/setup-node@v3 with: - node-version: '16' + node-version: '18' cache: 'yarn' cache-dependency-path: '**/yarn.lock' @@ -174,7 +174,7 @@ jobs: run: | packages=`go list ./...` stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -parallel=8 -tags=stylustest -run="TestProgramLong" > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") - + - name: Archive detailed run log uses: actions/upload-artifact@v3 with: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index acaa97895d..1cde8f06b9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: - name: Setup nodejs uses: actions/setup-node@v3 with: - node-version: '16' + node-version: '18' cache: 'yarn' cache-dependency-path: '**/yarn.lock' diff --git a/Dockerfile b/Dockerfile index 3eff2dc787..50c5faf8d6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,7 +24,7 @@ RUN apt-get update && \ FROM scratch as brotli-library-export COPY --from=brotli-library-builder /workspace/install/ / -FROM node:16-bookworm-slim as contracts-builder +FROM node:18-bookworm-slim as contracts-builder RUN apt-get update && \ apt-get install -y git python3 make g++ curl RUN curl -L https://foundry.paradigm.xyz | bash && . ~/.bashrc && ~/.foundry/bin/foundryup diff --git a/arbitrator/prover/src/binary.rs b/arbitrator/prover/src/binary.rs index f6c3e9fe8f..aa5537476c 100644 --- a/arbitrator/prover/src/binary.rs +++ b/arbitrator/prover/src/binary.rs @@ -616,7 +616,10 @@ impl<'a> WasmBinary<'a> { cached_init = cached_init.saturating_add(data_len.saturating_mul(75244) / 100_000); cached_init = cached_init.saturating_add(footprint as u64 * 5); - let mut init = cached_init; + let mut init: u64 = 0; + if compile.version == 1 { + init = cached_init; // in version 1 cached cost is part of init cost + } init = init.saturating_add(funcs.saturating_mul(8252) / 1000); init = init.saturating_add(type_len.saturating_mul(1059) / 1000); init = init.saturating_add(wasm_len.saturating_mul(1286) / 10_000); diff --git a/arbitrator/prover/src/programs/config.rs b/arbitrator/prover/src/programs/config.rs index 0b5ce17475..1a37294b04 100644 --- a/arbitrator/prover/src/programs/config.rs +++ b/arbitrator/prover/src/programs/config.rs @@ -162,8 +162,7 @@ impl CompileConfig { match version { 0 => {} - 1 => { - // TODO: settle on reasonable values for the v1 release + 1 | 2 => { config.bounds.heap_bound = Pages(128); // 8 mb config.bounds.max_frame_size = 10 * 1024; config.bounds.max_frame_contention = 4096; diff --git a/arbitrator/stylus/tests/return-size.wat b/arbitrator/stylus/tests/return-size.wat new file mode 100644 index 0000000000..52a2bc8ece --- /dev/null +++ b/arbitrator/stylus/tests/return-size.wat @@ -0,0 +1,71 @@ +;; Copyright 2024, Offchain Labs, Inc. +;; For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +(module + (import "vm_hooks" "pay_for_memory_grow" (func (param i32))) + (import "vm_hooks" "read_args" (func $read_args (param i32))) + (import "vm_hooks" "write_result" (func $write_result (param i32 i32))) + (func (export "user_entrypoint") (param $args_len i32) (result i32) + (local $size i32) + + ;; read input + i32.const 0 + call $read_args + + ;; read the target size from the last 4 bytes of the input big endian + ;; byte 1 + local.get $args_len + i32.const 1 + i32.sub + local.tee $size + i32.load8_u + + ;; byte 2 + local.get $size + i32.const 1 + i32.sub + local.tee $size + i32.load8_u + i32.const 8 + i32.shl + i32.or + + ;; byte 3 + local.get $size + i32.const 1 + i32.sub + local.tee $size + i32.load8_u + i32.const 16 + i32.shl + i32.or + + ;; byte 4 + local.get $size + i32.const 1 + i32.sub + local.tee $size + i32.load8_u + i32.const 32 + i32.shl + i32.or + + local.tee $size + + ;; grow memory enough to handle the output + ;; we start with one page allocated, so no need to round up + i32.const 65536 + i32.div_u + memory.grow + drop + + ;; set return data + i32.const 0 + local.get $size + call $write_result + + ;; return success + i32.const 0 + ) + (memory (export "memory") 1) +) diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 0ac5d1380d..9ff3dd3aa5 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -74,8 +74,8 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error) } return &ArbosState{ arbosVersion, - 30, - 30, + 31, + 31, backingStorage.OpenStorageBackedUint64(uint64(upgradeVersionOffset)), backingStorage.OpenStorageBackedUint64(uint64(upgradeTimestampOffset)), backingStorage.OpenStorageBackedAddress(uint64(networkFeeAccountOffset)), @@ -318,6 +318,12 @@ func (state *ArbosState) UpgradeArbosVersion( case 30: programs.Initialize(state.backingStorage.OpenSubStorage(programsSubspace)) + case 31: + params, err := state.Programs().Params() + ensure(err) + ensure(params.UpgradeToVersion(2)) + ensure(params.Save()) + default: return fmt.Errorf( "the chain is upgrading to unsupported ArbOS version %v, %w", diff --git a/arbos/programs/api.go b/arbos/programs/api.go index 787f127ea4..65a58a47c2 100644 --- a/arbos/programs/api.go +++ b/arbos/programs/api.go @@ -228,6 +228,9 @@ func newApiClosures( return addr, res, cost, nil } emitLog := func(topics []common.Hash, data []byte) error { + if tracingInfo != nil { + tracingInfo.RecordEmitLog(topics, data) + } if readOnly { return vm.ErrWriteProtection } diff --git a/arbos/programs/native.go b/arbos/programs/native.go index ffb27cb6c0..f8e2696aad 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -222,10 +222,10 @@ func handleReqImpl(apiId usize, req_type u32, data *rustSlice, costPtr *u64, out // Caches a program in Rust. We write a record so that we can undo on revert. // For gas estimation and eth_call, we ignore permanent updates and rely on Rust's LRU. -func cacheProgram(db vm.StateDB, module common.Hash, program Program, code []byte, codeHash common.Hash, params *StylusParams, debug bool, time uint64, runMode core.MessageRunMode) { +func cacheProgram(db vm.StateDB, module common.Hash, program Program, addressForLogging common.Address, code []byte, codeHash common.Hash, params *StylusParams, debug bool, time uint64, runMode core.MessageRunMode) { if runMode == core.MessageCommitMode { // address is only used for logging - asm, err := getLocalAsm(db, module, common.Address{}, code, codeHash, params.PageLimit, time, debug, program) + asm, err := getLocalAsm(db, module, addressForLogging, code, codeHash, params.PageLimit, time, debug, program) if err != nil { panic("unable to recreate wasm") } diff --git a/arbos/programs/params.go b/arbos/programs/params.go index 6138e36033..a0b8acd95c 100644 --- a/arbos/programs/params.go +++ b/arbos/programs/params.go @@ -5,6 +5,7 @@ package programs import ( "errors" + "fmt" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -29,6 +30,8 @@ const initialExpiryDays = 365 // deactivate after 1 year. const initialKeepaliveDays = 31 // wait a month before allowing reactivation. const initialRecentCacheSize = 32 // cache the 32 most recent programs. +const v2MinInitGas = 69 // charge 69 * 128 = 8832 gas (minCachedGas will also be charged in v2). + const MinCachedGasUnits = 32 /// 32 gas for each unit const MinInitGasUnits = 128 // 128 gas for each unit const CostScalarPercent = 2 // 2% for each unit @@ -137,6 +140,18 @@ func (p *StylusParams) Save() error { return nil } +func (p *StylusParams) UpgradeToVersion(version uint16) error { + if version != 2 { + return fmt.Errorf("dest version not supported for upgrade") + } + if p.Version != 1 { + return fmt.Errorf("existing version not supported for upgrade") + } + p.Version = 2 + p.MinInitGas = v2MinInitGas + return nil +} + func initStylusParams(sto *storage.Storage) { params := &StylusParams{ backingStorage: sto, diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index bfe48ec876..12102bac84 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + gethParams "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbos/addressSet" "github.com/offchainlabs/nitro/arbos/storage" @@ -154,7 +155,7 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, runMode c // replace the cached asm if cached { code := statedb.GetCode(address) - cacheProgram(statedb, info.moduleHash, programData, code, codeHash, params, debugMode, time, runMode) + cacheProgram(statedb, info.moduleHash, programData, address, code, codeHash, params, debugMode, time, runMode) } return stylusVersion, codeHash, info.moduleHash, dataFee, false, p.setProgram(codeHash, programData) @@ -163,6 +164,7 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, runMode c func (p Programs) CallProgram( scope *vm.ScopeContext, statedb vm.StateDB, + arbosVersion uint64, interpreter *vm.EVMInterpreter, tracingInfo *util.TracingInfo, calldata []byte, @@ -172,6 +174,7 @@ func (p Programs) CallProgram( evm := interpreter.Evm() contract := scope.Contract codeHash := contract.CodeHash + startingGas := contract.Gas debugMode := evm.ChainConfig().DebugMode() params, err := p.Params() @@ -200,9 +203,10 @@ func (p Programs) CallProgram( // pay for program init cached := program.cached || statedb.GetRecentWasms().Insert(codeHash, params.BlockCacheSize) - if cached { + if cached || program.version > 1 { // in version 1 cached cost is part of init cost callCost = am.SaturatingUAdd(callCost, program.cachedGas(params)) - } else { + } + if !cached { callCost = am.SaturatingUAdd(callCost, program.initGas(params)) } if err := contract.BurnGas(callCost); err != nil { @@ -243,7 +247,26 @@ func (p Programs) CallProgram( if runmode == core.MessageCommitMode { arbos_tag = statedb.Database().WasmCacheTag() } - return callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, arbos_tag) + ret, err := callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, arbos_tag) + if len(ret) > 0 && arbosVersion >= gethParams.ArbosVersion_StylusFixes { + // Ensure that return data costs as least as much as it would in the EVM. + evmCost := evmMemoryCost(uint64(len(ret))) + if startingGas < evmCost { + contract.Gas = 0 + return nil, vm.ErrOutOfGas + } + maxGasToReturn := startingGas - evmCost + contract.Gas = am.MinInt(contract.Gas, maxGasToReturn) + } + return ret, err +} + +func evmMemoryCost(size uint64) uint64 { + // It would take 100GB to overflow this calculation, so no need to worry about that + words := (size + 31) / 32 + linearCost := words * gethParams.MemoryGas + squareCost := (words * words) / gethParams.QuadCoeffDiv + return linearCost + squareCost } func getWasm(statedb vm.StateDB, program common.Address) ([]byte, error) { @@ -362,10 +385,13 @@ func (p Programs) ProgramCached(codeHash common.Hash) (bool, error) { } // Sets whether a program is cached. Errors if trying to cache an expired program. +// `address` must be present if setting cache to true as of ArbOS 31, +// and if `address` is present it must have the specified codeHash. func (p Programs) SetProgramCached( emitEvent func() error, db vm.StateDB, codeHash common.Hash, + address common.Address, cache bool, time uint64, params *StylusParams, @@ -378,8 +404,8 @@ func (p Programs) SetProgramCached( } expired := program.ageSeconds > am.DaysToSeconds(params.ExpiryDays) - if program.version == 0 && cache { - return ProgramNeedsUpgradeError(0, params.Version) + if program.version != params.Version && cache { + return ProgramNeedsUpgradeError(program.version, params.Version) } if expired && cache { return ProgramExpiredError(program.ageSeconds) @@ -405,7 +431,7 @@ func (p Programs) SetProgramCached( if err != nil { return err } - cacheProgram(db, moduleHash, program, code, codeHash, params, debug, time, runMode) + cacheProgram(db, moduleHash, program, address, code, codeHash, params, debug, time, runMode) } else { evictProgram(db, moduleHash, program.version, debug, runMode, expired) } @@ -437,7 +463,12 @@ func (p Programs) ProgramTimeLeft(codeHash common.Hash, time uint64, params *Sty func (p Programs) ProgramInitGas(codeHash common.Hash, time uint64, params *StylusParams) (uint64, uint64, error) { program, err := p.getActiveProgram(codeHash, time, params) - return program.initGas(params), program.cachedGas(params), err + cachedGas := program.cachedGas(params) + initGas := program.initGas(params) + if params.Version > 1 { + initGas += cachedGas + } + return initGas, cachedGas, err } func (p Programs) ProgramMemoryFootprint(codeHash common.Hash, time uint64, params *StylusParams) (uint16, error) { diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go index 0301a7e847..f7191dca8f 100644 --- a/arbos/programs/wasm.go +++ b/arbos/programs/wasm.go @@ -95,7 +95,7 @@ func activateProgram( } // stub any non-consensus, Rust-side caching updates -func cacheProgram(db vm.StateDB, module common.Hash, program Program, code []byte, codeHash common.Hash, params *StylusParams, debug bool, time uint64, runMode core.MessageRunMode) { +func cacheProgram(db vm.StateDB, module common.Hash, program Program, addressForLogging common.Address, code []byte, codeHash common.Hash, params *StylusParams, debug bool, time uint64, runMode core.MessageRunMode) { } func evictProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, mode core.MessageRunMode, forever bool) { } diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index d3ca790ce8..b08c7c5d30 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -123,6 +123,7 @@ func (p *TxProcessor) ExecuteWASM(scope *vm.ScopeContext, input []byte, interpre return p.state.Programs().CallProgram( scope, p.evm.StateDB, + p.state.ArbOSVersion(), interpreter, tracingInfo, input, diff --git a/arbos/util/tracing.go b/arbos/util/tracing.go index 49b82d6d64..f0f101bc20 100644 --- a/arbos/util/tracing.go +++ b/arbos/util/tracing.go @@ -4,6 +4,7 @@ package util import ( + "fmt" "math/big" "github.com/ethereum/go-ethereum/common" @@ -47,6 +48,26 @@ func NewTracingInfo(evm *vm.EVM, from, to common.Address, scenario TracingScenar } } +func (info *TracingInfo) RecordEmitLog(topics []common.Hash, data []byte) { + size := uint64(len(data)) + var args []uint256.Int + args = append(args, *uint256.NewInt(0)) // offset: byte offset in the memory in bytes + args = append(args, *uint256.NewInt(size)) // size: byte size to copy (length of data) + for _, topic := range topics { + args = append(args, HashToUint256(topic)) // topic: 32-byte value. Max topics count is 4 + } + memory := vm.NewMemory() + memory.Resize(size) + memory.Set(0, size, data) + scope := &vm.ScopeContext{ + Memory: memory, + Stack: TracingStackFromArgs(args...), + Contract: info.Contract, + } + logType := fmt.Sprintf("LOG%d", len(topics)) + info.Tracer.CaptureState(0, vm.StringToOp(logType), 0, 0, scope, []byte{}, info.Depth, nil) +} + func (info *TracingInfo) RecordStorageGet(key common.Hash) { tracer := info.Tracer if info.Scenario == TracingDuringEVM { diff --git a/cmd/conf/init.go b/cmd/conf/init.go index 7c0db0b057..4bea00f9f2 100644 --- a/cmd/conf/init.go +++ b/cmd/conf/init.go @@ -16,6 +16,7 @@ type InitConfig struct { Url string `koanf:"url"` Latest string `koanf:"latest"` LatestBase string `koanf:"latest-base"` + ValidateChecksum bool `koanf:"validate-checksum"` DownloadPath string `koanf:"download-path"` DownloadPoll time.Duration `koanf:"download-poll"` DevInit bool `koanf:"dev-init"` @@ -39,6 +40,7 @@ var InitConfigDefault = InitConfig{ Url: "", Latest: "", LatestBase: "https://snapshot.arbitrum.foundation/", + ValidateChecksum: true, DownloadPath: "/tmp/", DownloadPoll: time.Minute, DevInit: false, @@ -62,6 +64,7 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".url", InitConfigDefault.Url, "url to download initialization data - will poll if download fails") f.String(prefix+".latest", InitConfigDefault.Latest, "if set, searches for the latest snapshot of the given kind "+acceptedSnapshotKindsStr) f.String(prefix+".latest-base", InitConfigDefault.LatestBase, "base url used when searching for the latest") + f.Bool(prefix+".validate-checksum", InitConfigDefault.ValidateChecksum, "if true: validate the checksum after downloading the snapshot") f.String(prefix+".download-path", InitConfigDefault.DownloadPath, "path to save temp downloaded file") f.Duration(prefix+".download-poll", InitConfigDefault.DownloadPoll, "how long to wait between polling attempts") f.Bool(prefix+".dev-init", InitConfigDefault.DevInit, "init with dev data (1 account with balance) instead of file import") diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 3b97b45a3e..97678a7d23 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -77,18 +77,28 @@ func downloadInit(ctx context.Context, initConfig *conf.InitConfig) (string, err return initFile, nil } log.Info("Downloading initial database", "url", initConfig.Url) - path, err := downloadFile(ctx, initConfig, initConfig.Url) - if errors.Is(err, notFoundError) { - return downloadInitInParts(ctx, initConfig) + if !initConfig.ValidateChecksum { + file, err := downloadFile(ctx, initConfig, initConfig.Url, nil) + if err != nil && errors.Is(err, notFoundError) { + return downloadInitInParts(ctx, initConfig) + } + return file, err } - return path, err -} - -func downloadFile(ctx context.Context, initConfig *conf.InitConfig, url string) (string, error) { - checksum, err := fetchChecksum(ctx, url+".sha256") + checksum, err := fetchChecksum(ctx, initConfig.Url+".sha256") if err != nil { + if errors.Is(err, notFoundError) { + return downloadInitInParts(ctx, initConfig) + } return "", fmt.Errorf("error fetching checksum: %w", err) } + file, err := downloadFile(ctx, initConfig, initConfig.Url, checksum) + if err != nil && errors.Is(err, notFoundError) { + return "", fmt.Errorf("file not found but checksum exists") + } + return file, err +} + +func downloadFile(ctx context.Context, initConfig *conf.InitConfig, url string, checksum []byte) (string, error) { grabclient := grab.NewClient() printTicker := time.NewTicker(time.Second) defer printTicker.Stop() @@ -99,7 +109,10 @@ func downloadFile(ctx context.Context, initConfig *conf.InitConfig, url string) if err != nil { panic(err) } - req.SetChecksum(sha256.New(), checksum, false) + if checksum != nil { + const deleteOnError = true + req.SetChecksum(sha256.New(), checksum, deleteOnError) + } resp := grabclient.Do(req.WithContext(ctx)) firstPrintTime := time.Now().Add(time.Second * 2) updateLoop: @@ -125,7 +138,7 @@ func downloadFile(ctx context.Context, initConfig *conf.InitConfig, url string) case <-resp.Done: if err := resp.Err(); err != nil { if resp.HTTPResponse.StatusCode == http.StatusNotFound { - return "", fmt.Errorf("file not found but checksum exists") + return "", notFoundError } fmt.Printf("\n attempt %d failed: %v\n", attempt, err) break updateLoop @@ -193,39 +206,66 @@ func downloadInitInParts(ctx context.Context, initConfig *conf.InitConfig) (stri if err != nil || !fileInfo.IsDir() { return "", fmt.Errorf("download path must be a directory: %v", initConfig.DownloadPath) } - part := 0 - parts := []string{} + archiveUrl, err := url.Parse(initConfig.Url) + if err != nil { + return "", fmt.Errorf("failed to parse init url \"%s\": %w", initConfig.Url, err) + } + + // Get parts from manifest file + manifest, err := httpGet(ctx, archiveUrl.String()+".manifest.txt") + if err != nil { + return "", fmt.Errorf("failed to get manifest file: %w", err) + } + partNames := []string{} + checksums := [][]byte{} + lines := strings.Split(strings.TrimSpace(string(manifest)), "\n") + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) != 2 { + return "", fmt.Errorf("manifest file in wrong format") + } + checksum, err := hex.DecodeString(fields[0]) + if err != nil { + return "", fmt.Errorf("failed decoding checksum in manifest file: %w", err) + } + checksums = append(checksums, checksum) + partNames = append(partNames, fields[1]) + } + + partFiles := []string{} defer func() { // remove all temporary files. - for _, part := range parts { + for _, part := range partFiles { err := os.Remove(part) if err != nil { log.Warn("Failed to remove temporary file", "file", part) } } }() - for { - url := fmt.Sprintf("%s.part%d", initConfig.Url, part) - log.Info("Downloading database part", "url", url) - partFile, err := downloadFile(ctx, initConfig, url) - if errors.Is(err, notFoundError) { - log.Info("Part not found; concatenating archive into single file", "numParts", len(parts)) - break - } else if err != nil { - return "", err + + // Download parts + for i, partName := range partNames { + log.Info("Downloading database part", "part", partName) + partUrl := archiveUrl.JoinPath("..", partName).String() + var checksum []byte + if initConfig.ValidateChecksum { + checksum = checksums[i] } - parts = append(parts, partFile) - part++ + partFile, err := downloadFile(ctx, initConfig, partUrl, checksum) + if err != nil { + return "", fmt.Errorf("error downloading part \"%s\": %w", partName, err) + } + partFiles = append(partFiles, partFile) } - return joinArchive(parts) + archivePath := path.Join(initConfig.DownloadPath, path.Base(archiveUrl.Path)) + return joinArchive(partFiles, archivePath) } // joinArchive joins the archive parts into a single file and return its path. -func joinArchive(parts []string) (string, error) { +func joinArchive(parts []string, archivePath string) (string, error) { if len(parts) == 0 { return "", fmt.Errorf("no database parts found") } - archivePath := strings.TrimSuffix(parts[0], ".part0") archive, err := os.Create(archivePath) if err != nil { return "", fmt.Errorf("failed to create archive: %w", err) @@ -332,6 +372,30 @@ func dirExists(path string) bool { return info.IsDir() } +func checkEmptyDatabaseDir(dir string, force bool) error { + entries, err := os.ReadDir(dir) + if err != nil { + return fmt.Errorf("failed to open database dir %s: %w", dir, err) + } + unexpectedFiles := []string{} + allowedFiles := map[string]bool{ + "LOCK": true, "classic-msg": true, "l2chaindata": true, + } + for _, entry := range entries { + if !allowedFiles[entry.Name()] { + unexpectedFiles = append(unexpectedFiles, entry.Name()) + } + } + if len(unexpectedFiles) > 0 { + if force { + return fmt.Errorf("trying to overwrite old database directory '%s' (delete the database directory and try again)", dir) + } + firstThreeFilenames := strings.Join(unexpectedFiles[:min(len(unexpectedFiles), 3)], ", ") + return fmt.Errorf("found %d unexpected files in database directory, including: %s", len(unexpectedFiles), firstThreeFilenames) + } + return nil +} + var pebbleNotExistErrorRegex = regexp.MustCompile("pebble: database .* does not exist") func isPebbleNotExistError(err error) bool { @@ -429,23 +493,8 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo return nil, nil, fmt.Errorf(errorFmt, stack.InstanceDir(), grandParentDir) } - // Check if database directory is empty - entries, err := os.ReadDir(stack.InstanceDir()) - if err != nil { - return nil, nil, fmt.Errorf("failed to open database dir %s: %w", stack.InstanceDir(), err) - } - unexpectedFiles := []string{} - for _, entry := range entries { - if entry.Name() != "LOCK" { - unexpectedFiles = append(unexpectedFiles, entry.Name()) - } - } - if len(unexpectedFiles) > 0 { - if config.Init.Force { - return nil, nil, fmt.Errorf("trying to overwrite old database directory '%s' (delete the database directory and try again)", stack.InstanceDir()) - } - firstThreeFilenames := strings.Join(unexpectedFiles[:min(len(unexpectedFiles), 3)], ", ") - return nil, nil, fmt.Errorf("found %d unexpected files in database directory, including: %s", len(unexpectedFiles), firstThreeFilenames) + if err := checkEmptyDatabaseDir(stack.InstanceDir(), config.Init.Force); err != nil { + return nil, nil, err } if err := setLatestSnapshotUrl(ctx, &config.Init, config.Chain.Name); err != nil { diff --git a/cmd/nitro/init_test.go b/cmd/nitro/init_test.go index b0f5011ecd..6c363972e9 100644 --- a/cmd/nitro/init_test.go +++ b/cmd/nitro/init_test.go @@ -13,7 +13,9 @@ import ( "net" "net/http" "os" + "path" "path/filepath" + "strings" "testing" "time" @@ -22,13 +24,47 @@ import ( "github.com/offchainlabs/nitro/util/testhelpers" ) -func TestDownloadInit(t *testing.T) { - const ( - archiveName = "random_data.tar.gz" - dataSize = 1024 * 1024 - filePerm = 0600 - ) +const ( + archiveName = "random_data.tar.gz" + numParts = 3 + partSize = 1024 * 1024 + dataSize = numParts * partSize + filePerm = 0600 + dirPerm = 0700 +) +func TestDownloadInitWithoutChecksum(t *testing.T) { + // Create archive with random data + serverDir := t.TempDir() + data := testhelpers.RandomSlice(dataSize) + + // Write archive file + archiveFile := fmt.Sprintf("%s/%s", serverDir, archiveName) + err := os.WriteFile(archiveFile, data, filePerm) + Require(t, err, "failed to write archive") + + // Start HTTP server + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addr := startFileServer(t, ctx, serverDir) + + // Download file + initConfig := conf.InitConfigDefault + initConfig.Url = fmt.Sprintf("http://%s/%s", addr, archiveName) + initConfig.DownloadPath = t.TempDir() + initConfig.ValidateChecksum = false + receivedArchive, err := downloadInit(ctx, &initConfig) + Require(t, err, "failed to download") + + // Check archive contents + receivedData, err := os.ReadFile(receivedArchive) + Require(t, err, "failed to read received archive") + if !bytes.Equal(receivedData, data) { + t.Error("downloaded archive is different from generated one") + } +} + +func TestDownloadInitWithChecksum(t *testing.T) { // Create archive with random data serverDir := t.TempDir() data := testhelpers.RandomSlice(dataSize) @@ -65,32 +101,69 @@ func TestDownloadInit(t *testing.T) { } } -func TestDownloadInitInParts(t *testing.T) { - const ( - archiveName = "random_data.tar.gz" - numParts = 3 - partSize = 1024 * 1024 - dataSize = numParts * partSize - filePerm = 0600 - ) +func TestDownloadInitInPartsWithoutChecksum(t *testing.T) { + // Create parts with random data + serverDir := t.TempDir() + data := testhelpers.RandomSlice(dataSize) + manifest := bytes.NewBuffer(nil) + for i := 0; i < numParts; i++ { + partData := data[partSize*i : partSize*(i+1)] + partName := fmt.Sprintf("%s.part%d", archiveName, i) + fmt.Fprintf(manifest, "%s %s\n", strings.Repeat("0", 64), partName) + err := os.WriteFile(path.Join(serverDir, partName), partData, filePerm) + Require(t, err, "failed to write part") + } + manifestFile := fmt.Sprintf("%s/%s.manifest.txt", serverDir, archiveName) + err := os.WriteFile(manifestFile, manifest.Bytes(), filePerm) + Require(t, err, "failed to write manifest file") + // Start HTTP server + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addr := startFileServer(t, ctx, serverDir) + + // Download file + initConfig := conf.InitConfigDefault + initConfig.Url = fmt.Sprintf("http://%s/%s", addr, archiveName) + initConfig.DownloadPath = t.TempDir() + initConfig.ValidateChecksum = false + receivedArchive, err := downloadInit(ctx, &initConfig) + Require(t, err, "failed to download") + + // check database contents + receivedData, err := os.ReadFile(receivedArchive) + Require(t, err, "failed to read received archive") + if !bytes.Equal(receivedData, data) { + t.Error("downloaded archive is different from generated one") + } + + // Check if the function deleted the temporary files + entries, err := os.ReadDir(initConfig.DownloadPath) + Require(t, err, "failed to read temp dir") + if len(entries) != 1 { + t.Error("download function did not delete temp files") + } +} + +func TestDownloadInitInPartsWithChecksum(t *testing.T) { // Create parts with random data serverDir := t.TempDir() data := testhelpers.RandomSlice(dataSize) + manifest := bytes.NewBuffer(nil) for i := 0; i < numParts; i++ { // Create part and checksum partData := data[partSize*i : partSize*(i+1)] + partName := fmt.Sprintf("%s.part%d", archiveName, i) checksumBytes := sha256.Sum256(partData) checksum := hex.EncodeToString(checksumBytes[:]) + fmt.Fprintf(manifest, "%s %s\n", checksum, partName) // Write part file - partFile := fmt.Sprintf("%s/%s.part%d", serverDir, archiveName, i) - err := os.WriteFile(partFile, partData, filePerm) + err := os.WriteFile(path.Join(serverDir, partName), partData, filePerm) Require(t, err, "failed to write part") - // Write checksum file - checksumFile := partFile + ".sha256" - err = os.WriteFile(checksumFile, []byte(checksum), filePerm) - Require(t, err, "failed to write checksum") } + manifestFile := fmt.Sprintf("%s/%s.manifest.txt", serverDir, archiveName) + err := os.WriteFile(manifestFile, manifest.Bytes(), filePerm) + Require(t, err, "failed to write manifest file") // Start HTTP server ctx, cancel := context.WithCancel(context.Background()) @@ -124,8 +197,6 @@ func TestSetLatestSnapshotUrl(t *testing.T) { chain = "arb1" snapshotKind = "archive" latestFile = "latest-" + snapshotKind + ".txt" - dirPerm = 0700 - filePerm = 0600 ) testCases := []struct { @@ -240,3 +311,53 @@ func TestIsNotExistError(t *testing.T) { testIsNotExistError(t, "leveldb", isLeveldbNotExistError) }) } + +func TestEmptyDatabaseDir(t *testing.T) { + testCases := []struct { + name string + files []string + force bool + wantErr string + }{ + { + name: "succeed with empty dir", + }, + { + name: "succeed with expected files", + files: []string{"LOCK", "classic-msg", "l2chaindata"}, + }, + { + name: "fail with unexpected files", + files: []string{"LOCK", "a", "b", "c", "d"}, + wantErr: "found 4 unexpected files in database directory, including: a, b, c", + }, + { + name: "fail with unexpected files when forcing", + files: []string{"LOCK", "a", "b", "c", "d"}, + force: true, + wantErr: "trying to overwrite old database directory", + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dir := t.TempDir() + for _, file := range tc.files { + const filePerm = 0600 + err := os.WriteFile(path.Join(dir, file), []byte{1, 2, 3}, filePerm) + Require(t, err) + } + err := checkEmptyDatabaseDir(dir, tc.force) + if tc.wantErr == "" { + if err != nil { + t.Errorf("expected nil error, got %q", err) + } + } else { + if err == nil { + t.Error("expected error, got nil") + } else if !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("expected %q, got %q", tc.wantErr, err) + } + } + }) + } +} diff --git a/contracts b/contracts index 7a41cd59cd..61204dd455 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 7a41cd59cdf2eb01cf31c2351b8d1ff6fbf52178 +Subproject commit 61204dd455966cb678192427a07aa9795ff91c14 diff --git a/go-ethereum b/go-ethereum index b85c24798e..e35bf9cdd3 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit b85c24798efc5a826cd813ac899a1ab168db31c2 +Subproject commit e35bf9cdd3d02034ac1be34a479d101f12012ba6 diff --git a/precompiles/ArbWasmCache.go b/precompiles/ArbWasmCache.go index 36b4e1ad31..3cada9dd70 100644 --- a/precompiles/ArbWasmCache.go +++ b/precompiles/ArbWasmCache.go @@ -3,6 +3,8 @@ package precompiles +import "github.com/ethereum/go-ethereum/common" + type ArbWasmCache struct { Address addr // 0x72 @@ -20,14 +22,23 @@ func (con ArbWasmCache) AllCacheManagers(c ctx, _ mech) ([]addr, error) { return c.State.Programs().CacheManagers().AllMembers(65536) } -// Caches all programs with the given codehash. Caller must be a cache manager or chain owner. +// Deprecated: replaced with CacheProgram. func (con ArbWasmCache) CacheCodehash(c ctx, evm mech, codehash hash) error { - return con.setProgramCached(c, evm, codehash, true) + return con.setProgramCached(c, evm, common.Address{}, codehash, true) +} + +// Caches all programs with a codehash equal to the given address. Caller must be a cache manager or chain owner. +func (con ArbWasmCache) CacheProgram(c ctx, evm mech, address addr) error { + codehash, err := c.GetCodeHash(address) + if err != nil { + return err + } + return con.setProgramCached(c, evm, address, codehash, true) } // Evicts all programs with the given codehash. Caller must be a cache manager or chain owner. func (con ArbWasmCache) EvictCodehash(c ctx, evm mech, codehash hash) error { - return con.setProgramCached(c, evm, codehash, false) + return con.setProgramCached(c, evm, common.Address{}, codehash, false) } // Gets whether a program is cached. Note that the program may be expired. @@ -36,7 +47,7 @@ func (con ArbWasmCache) CodehashIsCached(c ctx, evm mech, codehash hash) (bool, } // Caches all programs with the given codehash. -func (con ArbWasmCache) setProgramCached(c ctx, evm mech, codehash hash, cached bool) error { +func (con ArbWasmCache) setProgramCached(c ctx, evm mech, address addr, codehash hash, cached bool) error { if !con.hasAccess(c) { return c.BurnOut() } @@ -51,7 +62,7 @@ func (con ArbWasmCache) setProgramCached(c ctx, evm mech, codehash hash, cached return con.UpdateProgramCache(c, evm, c.caller, codehash, cached) } return programs.SetProgramCached( - emitEvent, evm.StateDB, codehash, cached, evm.Context.Time, params, txRunMode, debugMode, + emitEvent, evm.StateDB, codehash, address, cached, evm.Context.Time, params, txRunMode, debugMode, ) } diff --git a/precompiles/precompile.go b/precompiles/precompile.go index c39f2bcb6d..9a6d8885ad 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -72,11 +72,12 @@ type Precompile struct { } type PrecompileMethod struct { - name string - template abi.Method - purity purity - handler reflect.Method - arbosVersion uint64 + name string + template abi.Method + purity purity + handler reflect.Method + arbosVersion uint64 + maxArbosVersion uint64 } type PrecompileEvent struct { @@ -226,6 +227,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr purity, handler, 0, + 0, } methods[id] = &method methodsByName[name] = &method @@ -575,6 +577,8 @@ func Precompiles() map[addr]ArbosPrecompile { for _, method := range ArbWasmCache.methods { method.arbosVersion = ArbWasmCache.arbosVersion } + ArbWasmCache.methodsByName["CacheCodehash"].maxArbosVersion = params.ArbosVersion_Stylus + ArbWasmCache.methodsByName["CacheProgram"].arbosVersion = params.ArbosVersion_StylusFixes ArbRetryableImpl := &ArbRetryableTx{Address: types.ArbRetryableTxAddress} ArbRetryable := insert(MakePrecompile(pgen.ArbRetryableTxMetaData, ArbRetryableImpl)) @@ -680,7 +684,7 @@ func (p *Precompile) Call( } id := *(*[4]byte)(input) method, ok := p.methods[id] - if !ok || arbosVersion < method.arbosVersion { + if !ok || arbosVersion < method.arbosVersion || (method.maxArbosVersion > 0 && arbosVersion > method.maxArbosVersion) { // method does not exist or hasn't yet been activated return nil, 0, vm.ErrExecutionReverted } diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index 86047038dc..ecce77088a 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -194,6 +194,7 @@ func TestPrecompilesPerArbosVersion(t *testing.T) { 11: 4, 20: 8, 30: 38, + 31: 1, } precompiles := Precompiles() diff --git a/system_tests/program_test.go b/system_tests/program_test.go index d8d9e05aa1..b05589a1bf 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -8,6 +8,7 @@ import ( "context" "encoding/binary" "fmt" + "math" "math/big" "os" "path/filepath" @@ -18,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -26,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbos/programs" "github.com/offchainlabs/nitro/arbos/util" @@ -224,6 +227,102 @@ func testActivateTwice(t *testing.T, jit bool) { validateBlocks(t, 7, jit, builder) } +func TestStylusUpgrade(t *testing.T) { + t.Parallel() + testStylusUpgrade(t, true) +} + +func testStylusUpgrade(t *testing.T, jit bool) { + builder, auth, cleanup := setupProgramTest(t, false, func(b *NodeBuilder) { b.WithArbOSVersion(params.ArbosVersion_Stylus) }) + defer cleanup() + + ctx := builder.ctx + + l2info := builder.L2Info + l2client := builder.L2.Client + + ensure := func(tx *types.Transaction, err error) *types.Receipt { + t.Helper() + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, l2client, tx) + Require(t, err) + return receipt + } + + arbOwner, err := pgen.NewArbOwner(types.ArbOwnerAddress, l2client) + Require(t, err) + ensure(arbOwner.SetInkPrice(&auth, 1)) + + wasm, _ := readWasmFile(t, rustFile("keccak")) + keccakAddr := deployContract(t, ctx, auth, l2client, wasm) + + colors.PrintBlue("keccak program deployed to ", keccakAddr) + + preimage := []byte("hello, you fool") + + keccakArgs := []byte{0x01} // keccak the preimage once + keccakArgs = append(keccakArgs, preimage...) + + checkFailWith := func(errMessage string) uint64 { + msg := ethereum.CallMsg{ + To: &keccakAddr, + Data: keccakArgs, + } + _, err = l2client.CallContract(ctx, msg, nil) + if err == nil || !strings.Contains(err.Error(), errMessage) { + Fatal(t, "call should have failed with "+errMessage, " got: "+err.Error()) + } + + // execute onchain for proving's sake + tx := l2info.PrepareTxTo("Owner", &keccakAddr, 1e9, nil, keccakArgs) + Require(t, l2client.SendTransaction(ctx, tx)) + return EnsureTxFailed(t, ctx, l2client, tx).BlockNumber.Uint64() + } + + checkSucceeds := func() uint64 { + msg := ethereum.CallMsg{ + To: &keccakAddr, + Data: keccakArgs, + } + _, err = l2client.CallContract(ctx, msg, nil) + if err != nil { + Fatal(t, err) + } + + // execute onchain for proving's sake + tx := l2info.PrepareTxTo("Owner", &keccakAddr, 1e9, nil, keccakArgs) + Require(t, l2client.SendTransaction(ctx, tx)) + receipt, err := EnsureTxSucceeded(ctx, l2client, tx) + if err != nil { + Fatal(t, err) + } + return receipt.BlockNumber.Uint64() + } + + // Calling the contract pre-activation should fail. + blockFail1 := checkFailWith("ProgramNotActivated") + + activateWasm(t, ctx, auth, l2client, keccakAddr, "keccak") + + blockSuccess1 := checkSucceeds() + + tx, err := arbOwner.ScheduleArbOSUpgrade(&auth, 31, 0) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + // generate traffic to perform the upgrade + TransferBalance(t, "Owner", "Owner", big.NewInt(1), builder.L2Info, builder.L2.Client, ctx) + + blockFail2 := checkFailWith("ProgramNeedsUpgrade") + + activateWasm(t, ctx, auth, l2client, keccakAddr, "keccak") + + blockSuccess2 := checkSucceeds() + + validateBlockRange(t, []uint64{blockFail1, blockSuccess1, blockFail2, blockSuccess2}, jit, builder) +} + func TestProgramErrors(t *testing.T) { t.Parallel() errorTest(t, true) @@ -641,10 +740,15 @@ func testReturnData(t *testing.T, jit bool) { func TestProgramLogs(t *testing.T) { t.Parallel() - testLogs(t, true) + testLogs(t, true, false) } -func testLogs(t *testing.T, jit bool) { +func TestProgramLogsWithTracing(t *testing.T) { + t.Parallel() + testLogs(t, true, true) +} + +func testLogs(t *testing.T, jit, tracing bool) { builder, auth, cleanup := setupProgramTest(t, jit) ctx := builder.ctx l2info := builder.L2Info @@ -653,6 +757,27 @@ func testLogs(t *testing.T, jit bool) { logAddr := deployWasm(t, ctx, auth, l2client, rustFile("log")) multiAddr := deployWasm(t, ctx, auth, l2client, rustFile("multicall")) + type traceLog struct { + Address common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data hexutil.Bytes `json:"data"` + } + traceTx := func(tx *types.Transaction) []traceLog { + type traceLogs struct { + Logs []traceLog `json:"logs"` + } + var trace traceLogs + traceConfig := map[string]interface{}{ + "tracer": "callTracer", + "tracerConfig": map[string]interface{}{ + "withLog": true, + }, + } + rpc := l2client.Client() + err := rpc.CallContext(ctx, &trace, "debug_traceTransaction", tx.Hash(), traceConfig) + Require(t, err) + return trace.Logs + } ensure := func(tx *types.Transaction, err error) *types.Receipt { t.Helper() Require(t, err) @@ -679,6 +804,20 @@ func testLogs(t *testing.T, jit bool) { topics[j] = testhelpers.RandomHash() } data := randBytes(0, 48) + verifyLogTopicsAndData := func(logData []byte, logTopics []common.Hash) { + if !bytes.Equal(logData, data) { + Fatal(t, "data mismatch", logData, data) + } + if len(logTopics) != len(topics) { + Fatal(t, "topics mismatch", len(logTopics), len(topics)) + } + for j := 0; j < i; j++ { + if logTopics[j] != topics[j] { + Fatal(t, "topic mismatch", logTopics, topics) + } + } + } + args := encode(topics, data) tx := l2info.PrepareTxTo("Owner", &logAddr, 1e9, nil, args) receipt := ensure(tx, l2client.SendTransaction(ctx, tx)) @@ -687,16 +826,14 @@ func testLogs(t *testing.T, jit bool) { Fatal(t, "wrong number of logs", len(receipt.Logs)) } log := receipt.Logs[0] - if !bytes.Equal(log.Data, data) { - Fatal(t, "data mismatch", log.Data, data) - } - if len(log.Topics) != len(topics) { - Fatal(t, "topics mismatch", len(log.Topics), len(topics)) - } - for j := 0; j < i; j++ { - if log.Topics[j] != topics[j] { - Fatal(t, "topic mismatch", log.Topics, topics) + verifyLogTopicsAndData(log.Data, log.Topics) + if tracing { + logs := traceTx(tx) + if len(logs) != 1 { + Fatal(t, "wrong number of logs in trace", len(logs)) } + log := logs[0] + verifyLogTopicsAndData(log.Data, log.Topics) } } @@ -1251,7 +1388,7 @@ func TestProgramCacheManager(t *testing.T) { // check ownership assert(arbOwner.IsChainOwner(nil, ownerAuth.From)) ensure(arbWasmCache.EvictCodehash(&ownerAuth, codehash)) - ensure(arbWasmCache.CacheCodehash(&ownerAuth, codehash)) + ensure(arbWasmCache.CacheProgram(&ownerAuth, program)) // de-authorize manager ensure(arbOwner.RemoveWasmCacheManager(&ownerAuth, manager)) @@ -1261,13 +1398,82 @@ func TestProgramCacheManager(t *testing.T) { assert(len(all) == 0, err) } -func setupProgramTest(t *testing.T, jit bool) ( +func testReturnDataCost(t *testing.T, arbosVersion uint64) { + builder, auth, cleanup := setupProgramTest(t, false, func(b *NodeBuilder) { b.WithArbOSVersion(arbosVersion) }) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + // use a consistent ink price + arbOwner, err := pgen.NewArbOwner(types.ArbOwnerAddress, l2client) + Require(t, err) + tx, err := arbOwner.SetInkPrice(&auth, 10000) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) + Require(t, err) + + returnSize := big.NewInt(1024 * 1024) // 1MiB + returnSizeBytes := arbmath.U256Bytes(returnSize) + + testCall := func(to common.Address) uint64 { + msg := ethereum.CallMsg{ + To: &to, + Data: returnSizeBytes, + SkipL1Charging: true, + } + ret, err := l2client.CallContract(ctx, msg, nil) + Require(t, err) + + if !arbmath.BigEquals(big.NewInt(int64(len(ret))), returnSize) { + Fatal(t, "unexpected return length", len(ret), "expected", returnSize) + } + + gas, err := l2client.EstimateGas(ctx, msg) + Require(t, err) + + return gas + } + + stylusReturnSizeAddr := deployWasm(t, ctx, auth, l2client, watFile("return-size")) + + stylusGas := testCall(stylusReturnSizeAddr) + + // PUSH32 [returnSizeBytes] + evmBytecode := append([]byte{0x7F}, returnSizeBytes...) + // PUSH0 RETURN + evmBytecode = append(evmBytecode, 0x5F, 0xF3) + evmReturnSizeAddr := deployContract(t, ctx, auth, l2client, evmBytecode) + + evmGas := testCall(evmReturnSizeAddr) + + colors.PrintGrey(fmt.Sprintf("arbosVersion=%v stylusGas=%v evmGas=%v", arbosVersion, stylusGas, evmGas)) + // a bit of gas difference is expected due to EVM PUSH32 and PUSH0 cost (in practice this is 5 gas) + similarGas := math.Abs(float64(stylusGas)-float64(evmGas)) <= 100 + if arbosVersion >= params.ArbosVersion_StylusFixes { + if !similarGas { + Fatal(t, "unexpected gas difference for return data: stylus", stylusGas, ", evm", evmGas) + } + } else if similarGas { + Fatal(t, "gas unexpectedly similar for return data: stylus", stylusGas, ", evm", evmGas) + } +} + +func TestReturnDataCost(t *testing.T) { + testReturnDataCost(t, params.ArbosVersion_Stylus) + testReturnDataCost(t, params.ArbosVersion_StylusFixes) +} + +func setupProgramTest(t *testing.T, jit bool, builderOpts ...func(*NodeBuilder)) ( *NodeBuilder, bind.TransactOpts, func(), ) { ctx, cancel := context.WithCancel(context.Background()) builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + for _, opt := range builderOpts { + opt(builder) + } + builder.nodeConfig.BlockValidator.Enable = false builder.nodeConfig.Staker.Enable = true builder.nodeConfig.BatchPoster.Enable = true diff --git a/system_tests/stylus_test.go b/system_tests/stylus_test.go index 97f3041196..331bca2e9c 100644 --- a/system_tests/stylus_test.go +++ b/system_tests/stylus_test.go @@ -41,7 +41,7 @@ func TestProgramArbitratorReturnData(t *testing.T) { } func TestProgramArbitratorLogs(t *testing.T) { - testLogs(t, false) + testLogs(t, false, false) } func TestProgramArbitratorCreate(t *testing.T) { @@ -108,3 +108,7 @@ func TestProgramLongCall(t *testing.T) { func TestProgramLongArbitratorCall(t *testing.T) { testProgramResursiveCalls(t, fullRecurseTest(), false) } + +func TestProgramArbitratorStylusUpgrade(t *testing.T) { + testStylusUpgrade(t, false) +}