diff --git a/.dockerignore b/.dockerignore index 05cec369d9..763aeda1be 100644 --- a/.dockerignore +++ b/.dockerignore @@ -13,6 +13,9 @@ solgen/go **/node_modules target/**/* +!target/machines +!target/machines/* +!target/machines/**/* brotli/buildfiles/**/* # these are used by environment outside the docker: diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index e11f5511a1..8c491a421c 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -3,6 +3,7 @@ run-name: Arbitrator CI triggered from @${{ github.actor }} of ${{ github.head_r on: workflow_dispatch: + merge_group: pull_request: paths: - 'arbitrator/**' @@ -24,7 +25,7 @@ jobs: runs-on: ubuntu-8 steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 with: submodules: recursive @@ -36,35 +37,30 @@ jobs: sudo ln -s /usr/bin/wasm-ld-14 /usr/local/bin/wasm-ld - name: Install go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: 1.20.x - name: Setup nodejs - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: '16' cache: 'yarn' cache-dependency-path: '**/yarn.lock' - name: Install rust stable - uses: actions-rs/toolchain@v1 - id: install-rust + uses: dtolnay/rust-toolchain@stable with: - profile: minimal - toolchain: "stable" - override: true components: 'llvm-tools-preview, rustfmt, clippy' + targets: 'wasm32-wasi, wasm32-unknown-unknown' - name: Install grcov - uses: actions-rs/install@v0.1 + uses: jaxxstorm/action-install-gh-release@v1.10.0 with: - crate: grcov - version: latest - use-tool-cache: true - - - name: Install rust wasm targets - run: rustup target add wasm32-wasi wasm32-unknown-unknown + repo: mozilla/grcov + tag: v0.8.18 + extension: "\\.bz2" + cache: enable - name: Cache Rust intermediate build products uses: actions/cache@v3 @@ -119,7 +115,7 @@ jobs: - name: Setup emsdk if: steps.cache-cbrotli.outputs.cache-hit != 'true' - uses: mymindstorm/setup-emsdk@v11 + uses: mymindstorm/setup-emsdk@v12 with: # Make sure to set a version number! version: 3.1.6 @@ -147,22 +143,13 @@ jobs: echo RUSTDOCFLAGS="-Cpanic=abort" >> $GITHUB_ENV - name: Clippy check - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all --manifest-path arbitrator/Cargo.toml -- -D warnings + run: cargo clippy --all --manifest-path arbitrator/Cargo.toml -- -D warnings - name: Run rust tests - uses: actions-rs/cargo@v1 - with: - command: test - args: --all --manifest-path arbitrator/Cargo.toml + run: cargo test --all --manifest-path arbitrator/Cargo.toml - name: Rustfmt - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all --manifest-path arbitrator/Cargo.toml -- --check + run: cargo fmt --all --manifest-path arbitrator/Cargo.toml -- --check - name: Make proofs from test cases run: make -j test-gen-proofs diff --git a/.github/workflows/arbitrator-skip-ci.yml b/.github/workflows/arbitrator-skip-ci.yml index 6dfd962ee6..75fb47d0a5 100644 --- a/.github/workflows/arbitrator-skip-ci.yml +++ b/.github/workflows/arbitrator-skip-ci.yml @@ -2,6 +2,7 @@ name: Arbitrator skip CI run-name: Arbitrator skip CI triggered from @${{ github.actor }} of ${{ github.head_ref }} on: + merge_group: pull_request: paths-ignore: - 'arbitrator/**' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e1a591b720..f2c4fac84c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,6 +3,7 @@ run-name: Go tests CI triggered from @${{ github.actor }} of ${{ github.head_ref on: workflow_dispatch: + merge_group: pull_request: push: branches: @@ -28,7 +29,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: true @@ -36,14 +37,14 @@ jobs: run: sudo apt update && sudo apt install -y wabt gotestsum - name: Setup nodejs - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: '16' cache: 'yarn' cache-dependency-path: '**/yarn.lock' - name: Install go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: 1.20.x @@ -52,27 +53,10 @@ jobs: sudo apt-get update && sudo apt-get install -y lld-14 sudo ln -s /usr/bin/wasm-ld-14 /usr/local/bin/wasm-ld - - name: Install rust wasm32-unknown-unknown - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: "stable" - target: wasm32-unknown-unknown - - - name: Install rust wasm32-wasi - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: "stable" - target: wasm32-wasi - - name: Install rust stable - uses: actions-rs/toolchain@v1 - id: install-rust + uses: dtolnay/rust-toolchain@stable with: - profile: minimal - toolchain: "stable" - override: true + targets: 'wasm32-unknown-unknown, wasm32-wasi' - name: Cache Build Products uses: actions/cache@v3 @@ -124,16 +108,19 @@ jobs: run: make -j build-node-deps - name: Lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3 with: version: latest - skip-go-installation: true skip-pkg-cache: true + - name: Custom Lint + run: | + go run ./linter/koanf ./... + go run ./linter/pointercheck ./... - name: Set environment variables run: | - mkdir -p target/tmp - echo "TMPDIR=$(pwd)/target/tmp" >> "$GITHUB_ENV" + mkdir -p target/tmp/deadbeefbee + echo "TMPDIR=$(pwd)/target/tmp/deadbeefbee" >> "$GITHUB_ENV" echo "GOMEMLIMIT=6GiB" >> "$GITHUB_ENV" echo "GOGC=80" >> "$GITHUB_ENV" diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 92411d17e2..8fb9d80c21 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -14,6 +14,8 @@ name: "CodeQL" on: push: branches: [ "master" ] + merge_group: + branches: [ "master" ] pull_request: # The branches below must be a subset of the branches above branches: [ "master" ] @@ -23,13 +25,13 @@ on: jobs: analyze: name: Analyze + if: github.repository == 'OffchainLabs/nitro' # don't run in any forks without "Advanced Security" enabled runs-on: ubuntu-8 permissions: actions: read contents: read security-events: write env: - CODEQL_EXTRACTOR_GO_BUILD_TRACING: 'on' WABT_VERSION: 1.0.32 strategy: @@ -41,7 +43,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: true @@ -59,24 +61,19 @@ jobs: config-file: ./.github/codeql/codeql-config.yml - name: Setup nodejs - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: '16' cache: 'yarn' cache-dependency-path: '**/yarn.lock' - name: Install go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: 1.20.x - name: Install rust stable - uses: actions-rs/toolchain@v1 - id: install-rust - with: - profile: minimal - toolchain: "stable" - override: true + uses: dtolnay/rust-toolchain@stable - name: Cache Rust Build Products uses: actions/cache@v3 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index cf5fdd5ca9..30ad88d91a 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -3,6 +3,7 @@ run-name: Docker build CI triggered from @${{ github.actor }} of ${{ github.head on: workflow_dispatch: + merge_group: pull_request: push: branches: @@ -22,12 +23,12 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: recursive - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 with: driver-opts: network=host @@ -39,7 +40,7 @@ jobs: restore-keys: ${{ runner.os }}-buildx- - name: Build nitro-node docker - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v5 with: target: nitro-node push: true @@ -49,7 +50,7 @@ jobs: cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max - name: Build nitro-node-dev docker - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v5 with: target: nitro-node-dev push: true @@ -58,6 +59,17 @@ jobs: cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + - name: Start background nitro-testnode + shell: bash + run: | + cd nitro-testnode + ./test-node.bash --init --dev & + + - name: Wait for rpc to come up + shell: bash + run: | + ${{ github.workspace }}/.github/workflows/waitForNitro.sh + - name: Print WAVM module root id: module-root run: | @@ -65,7 +77,7 @@ jobs: # We work around this by piping a tarball through stdout docker run --rm --entrypoint tar localhost:5000/nitro-node-dev:latest -cf - target/machines/latest | tar xf - module_root="$(cat "target/machines/latest/module-root.txt")" - echo "::set-output name=module-root::$module_root" + echo "name=module-root=$module_root" >> $GITHUB_STATE echo -e "\x1b[1;34mWAVM module root:\x1b[0m $module_root" - name: Upload WAVM machine as artifact diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh new file mode 100755 index 0000000000..cf3f6484fc --- /dev/null +++ b/.github/workflows/waitForNitro.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# poll the nitro endpoint until we get a 0 return code or 30mins have passed, in that case exit 1 +timeout_time=$(($(date +%s) + 1800)) + +while (( $(date +%s) <= timeout_time )); do + if curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547'; then + exit 0 + else + sleep 20 + fi +done + +exit 1 \ No newline at end of file diff --git a/.gitignore b/.gitignore index f0eb5c2ec3..8a628e29c4 100644 --- a/.gitignore +++ b/.gitignore @@ -19,5 +19,6 @@ solgen/go/ target/ yarn-error.log local/ -testdata system_tests/test-data/* +system_tests/testdata/* +arbos/testdata/* diff --git a/Dockerfile b/Dockerfile index 42daad5e4c..b05cb3d4d9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,7 +29,7 @@ RUN apt-get update && \ apt-get install -y git python3 make g++ WORKDIR /workspace COPY contracts/package.json contracts/yarn.lock contracts/ -RUN cd contracts && yarn install --ignore-optional +RUN cd contracts && yarn install COPY contracts contracts/ COPY Makefile . RUN NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-solidity @@ -44,6 +44,7 @@ RUN apt-get install -y clang=1:11.0-51+nmu5 lld=1:11.0-51+nmu5 # pinned rust 1.65.0 RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.68.2 --target x86_64-unknown-linux-gnu wasm32-unknown-unknown wasm32-wasi COPY ./Makefile ./ +COPY arbitrator/arbutil arbitrator/arbutil COPY arbitrator/wasm-libraries arbitrator/wasm-libraries COPY --from=brotli-wasm-export / target/ RUN . ~/.cargo/env && NITRO_BUILD_IGNORE_TIMESTAMPS=1 RUSTFLAGS='-C symbol-mangling-version=v0' make build-wasm-libs @@ -53,7 +54,7 @@ COPY --from=wasm-libs-builder /workspace/ / FROM wasm-base as wasm-bin-builder # pinned go version -RUN curl -L https://golang.org/dl/go1.19.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - +RUN curl -L https://golang.org/dl/go1.20.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - COPY ./Makefile ./go.mod ./go.sum ./ COPY ./arbcompress ./arbcompress COPY ./arbos ./arbos @@ -76,6 +77,7 @@ COPY ./fastcache ./fastcache COPY ./go-ethereum ./go-ethereum COPY --from=brotli-wasm-export / target/ COPY --from=contracts-builder workspace/contracts/build/contracts/src/precompiles/ contracts/build/contracts/src/precompiles/ +COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/ COPY --from=contracts-builder workspace/.make/ .make/ RUN PATH="$PATH:/usr/local/go/bin" NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-wasm-bin @@ -160,8 +162,9 @@ COPY ./scripts/download-machine.sh . #RUN ./download-machine.sh consensus-v9 0xd1842bfbe047322b3f3b3635b5fe62eb611557784d17ac1d2b1ce9c170af6544 RUN ./download-machine.sh consensus-v10 0x6b94a7fc388fd8ef3def759297828dc311761e88d8179c7ee8d3887dc554f3c3 RUN ./download-machine.sh consensus-v10.1 0xda4e3ad5e7feacb817c21c8d0220da7650fe9051ece68a3f0b1c5d38bbb27b21 +RUN ./download-machine.sh consensus-v10.2 0x0754e09320c381566cc0449904c377a52bd34a6b9404432e80afd573b67f7b17 -FROM golang:1.19-bullseye as node-builder +FROM golang:1.20-bullseye as node-builder WORKDIR /workspace ARG version="" ARG datetime="" @@ -178,6 +181,7 @@ COPY fastcache/go.mod fastcache/go.sum fastcache/ RUN go mod download COPY . ./ COPY --from=contracts-builder workspace/contracts/build/ contracts/build/ +COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/ COPY --from=contracts-builder workspace/.make/ .make/ COPY --from=prover-header-export / target/ COPY --from=brotli-library-export / target/ @@ -201,6 +205,7 @@ WORKDIR /home/user COPY --from=node-builder /workspace/target/bin/nitro /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/relay /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/nitro-val /usr/local/bin/ +COPY --from=node-builder /workspace/target/bin/seq-coordinator-manager /usr/local/bin/ COPY --from=machine-versions /workspace/machines /home/user/target/machines USER root RUN export DEBIAN_FRONTEND=noninteractive && \ @@ -215,7 +220,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ chown -R user:user /home/user && \ chmod -R 555 /home/user/target/machines && \ apt-get clean && \ - rm -rf /var/lib/apt/lists/* /usr/share/doc/* && \ + rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version USER user @@ -234,7 +239,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ node-ws vim-tiny python3 \ dnsutils && \ apt-get clean && \ - rm -rf /var/lib/apt/lists/* /usr/share/doc/* && \ + rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version USER user @@ -258,7 +263,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ adduser user sudo && \ echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers && \ apt-get clean && \ - rm -rf /var/lib/apt/lists/* /usr/share/doc/* && \ + rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version USER user diff --git a/Makefile b/Makefile index 4b6928151f..4221100961 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,7 @@ # have to update an existing file. So - for docker, convert all dependencies # to order-only dependencies (timestamps ignored). # WARNING: when using this trick, you cannot use the $< automatic variable + ifeq ($(origin NITRO_BUILD_IGNORE_TIMESTAMPS),undefined) DEP_PREDICATE:= ORDER_ONLY_PREDICATE:=| @@ -87,7 +88,7 @@ push: lint test-go .make/fmt all: build build-replay-env test-gen-proofs @touch .make/all -build: $(patsubst %,$(output_root)/bin/%, nitro deploy relay daserver datool seq-coordinator-invalidate nitro-val) +build: $(patsubst %,$(output_root)/bin/%, nitro deploy relay daserver datool seq-coordinator-invalidate nitro-val seq-coordinator-manager) @printf $(done) build-node-deps: $(go_source) build-prover-header build-prover-lib build-jit .make/solgen .make/cbrotli-lib @@ -184,6 +185,9 @@ $(output_root)/bin/seq-coordinator-invalidate: $(DEP_PREDICATE) build-node-deps $(output_root)/bin/nitro-val: $(DEP_PREDICATE) build-node-deps go build $(GOLANG_PARAMS) -o $@ "$(CURDIR)/cmd/nitro-val" +$(output_root)/bin/seq-coordinator-manager: $(DEP_PREDICATE) build-node-deps + go build $(GOLANG_PARAMS) -o $@ "$(CURDIR)/cmd/seq-coordinator-manager" + # recompile wasm, but don't change timestamp unless files differ $(replay_wasm): $(DEP_PREDICATE) $(go_source) .make/solgen mkdir -p `dirname $(replay_wasm)` @@ -260,17 +264,17 @@ $(output_root)/machines/latest/soft-float.wasm: $(DEP_PREDICATE) \ --export wavm__f32_demote_f64 \ --export wavm__f64_promote_f32 -$(output_root)/machines/latest/go_stub.wasm: $(DEP_PREDICATE) $(wildcard arbitrator/wasm-libraries/go-stub/src/*/*) +$(output_root)/machines/latest/go_stub.wasm: $(DEP_PREDICATE) $(wildcard arbitrator/wasm-libraries/go-stub/src/*) mkdir -p $(output_root)/machines/latest cargo build --manifest-path arbitrator/wasm-libraries/Cargo.toml --release --target wasm32-wasi --package go-stub install arbitrator/wasm-libraries/target/wasm32-wasi/release/go_stub.wasm $@ -$(output_root)/machines/latest/host_io.wasm: $(DEP_PREDICATE) $(wildcard arbitrator/wasm-libraries/host-io/src/*/*) +$(output_root)/machines/latest/host_io.wasm: $(DEP_PREDICATE) $(wildcard arbitrator/wasm-libraries/host-io/src/*) mkdir -p $(output_root)/machines/latest cargo build --manifest-path arbitrator/wasm-libraries/Cargo.toml --release --target wasm32-wasi --package host-io install arbitrator/wasm-libraries/target/wasm32-wasi/release/host_io.wasm $@ -$(output_root)/machines/latest/brotli.wasm: $(DEP_PREDICATE) $(wildcard arbitrator/wasm-libraries/brotli/src/*/*) .make/cbrotli-wasm +$(output_root)/machines/latest/brotli.wasm: $(DEP_PREDICATE) $(wildcard arbitrator/wasm-libraries/brotli/src/*) .make/cbrotli-wasm mkdir -p $(output_root)/machines/latest cargo build --manifest-path arbitrator/wasm-libraries/Cargo.toml --release --target wasm32-wasi --package brotli install arbitrator/wasm-libraries/target/wasm32-wasi/release/brotli.wasm $@ @@ -291,7 +295,7 @@ contracts/test/prover/proofs/rust-%.json: $(arbitrator_cases)/rust/target/wasm32 $(arbitrator_prover_bin) $< $(arbitrator_wasm_lib_flags_nogo) -o $@ -b --allow-hostapi --require-success --inbox-add-stub-headers --inbox $(arbitrator_cases)/rust/data/msg0.bin --inbox $(arbitrator_cases)/rust/data/msg1.bin --delayed-inbox $(arbitrator_cases)/rust/data/msg0.bin --delayed-inbox $(arbitrator_cases)/rust/data/msg1.bin --preimages $(arbitrator_cases)/rust/data/preimages.bin contracts/test/prover/proofs/go.json: $(arbitrator_cases)/go/main $(arbitrator_prover_bin) $(arbitrator_wasm_libs) - $(arbitrator_prover_bin) $< $(arbitrator_wasm_lib_flags) -o $@ -i 5000000 --require-success + $(arbitrator_prover_bin) $< $(arbitrator_wasm_lib_flags) -o $@ -i 5000000 --require-success --preimages $(arbitrator_cases)/rust/data/preimages.bin # avoid testing read-inboxmsg-10 in onestepproofs. It's used for go challenge testing. contracts/test/prover/proofs/read-inboxmsg-10.json: @@ -303,6 +307,8 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make + go run ./linter/koanf ./... + go run ./linter/pointercheck ./... golangci-lint run --fix yarn --cwd contracts solhint @touch $@ @@ -345,10 +351,9 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro test -f target/lib-wasm/libbrotlidec-static.a || ./scripts/build-brotli.sh -w -d @touch $@ -.make/wasm-lib: $(DEP_PREDICATE) $(ORDER_ONLY_PREDICATE) .make - test -f arbitrator/wasm-libraries/soft-float/bindings32.o || ./scripts/build-brotli.sh -f -d -t . - test -f arbitrator/wasm-libraries/soft-float/bindings64.o || ./scripts/build-brotli.sh -f -d -t . - test -f arbitrator/wasm-libraries/soft-float/SoftFloat/build/Wasm-Clang/softfloat.a || ./scripts/build-brotli.sh -f -d -t . +.make/wasm-lib: $(DEP_PREDICATE) arbitrator/wasm-libraries/soft-float/SoftFloat/build/Wasm-Clang/softfloat.a $(ORDER_ONLY_PREDICATE) .make + test -f arbitrator/wasm-libraries/soft-float/bindings32.o || ./scripts/build-brotli.sh -f -d -t .. + test -f arbitrator/wasm-libraries/soft-float/bindings64.o || ./scripts/build-brotli.sh -f -d -t .. @touch $@ .make: diff --git a/README.md b/README.md index 2cfef3de6a..67a182ec30 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- Logo + Logo

Arbitrum Nitro

@@ -14,7 +14,7 @@ ## About Arbitrum Nitro -Logo +Logo Nitro is the latest iteration of the Arbitrum technology. It is a fully integrated, complete layer 2 optimistic rollup system, including fraud proofs, the sequencer, the token bridges, diff --git a/arbcompress/compress_common.go b/arbcompress/compress_common.go index 6b66fe302b..990fd2e2be 100644 --- a/arbcompress/compress_common.go +++ b/arbcompress/compress_common.go @@ -3,7 +3,6 @@ package arbcompress -const LEVEL_FAST = 0 const LEVEL_WELL = 11 const WINDOW_SIZE = 22 // BROTLI_DEFAULT_WINDOW @@ -11,6 +10,6 @@ func compressedBufferSizeFor(length int) int { return length + (length>>10)*8 + 64 // actual limit is: length + (length >> 14) * 4 + 6 } -func CompressFast(input []byte) ([]byte, error) { - return compressLevel(input, LEVEL_FAST) +func CompressLevel(input []byte, level int) ([]byte, error) { + return compressLevel(input, level) } diff --git a/arbcompress/compress_test.go b/arbcompress/compress_test.go index fffcda8923..21629d9663 100644 --- a/arbcompress/compress_test.go +++ b/arbcompress/compress_test.go @@ -27,7 +27,7 @@ func testCompressDecompress(t *testing.T, data []byte) { } testDecompress(t, compressedWell, data) - compressedFast, err := CompressFast(data) + compressedFast, err := CompressLevel(data, 0) if err != nil { t.Fatal(err) } diff --git a/arbitrator/Cargo.lock b/arbitrator/Cargo.lock index 41522a82bb..472c9e4b57 100644 --- a/arbitrator/Cargo.lock +++ b/arbitrator/Cargo.lock @@ -55,6 +55,12 @@ dependencies = [ [[package]] name = "arbutil" version = "0.1.0" +dependencies = [ + "digest 0.10.7", + "num_enum", + "sha2", + "sha3 0.10.8", +] [[package]] name = "arrayvec" @@ -119,6 +125,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + [[package]] name = "block-padding" version = "0.2.1" @@ -224,6 +239,15 @@ dependencies = [ "windows-sys 0.33.0", ] +[[package]] +name = "cpufeatures" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +dependencies = [ + "libc", +] + [[package]] name = "cranelift-bforest" version = "0.86.1" @@ -334,6 +358,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + [[package]] name = "darling" version = "0.13.4" @@ -378,6 +412,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer 0.10.4", + "crypto-common", +] + [[package]] name = "either" version = "1.6.1" @@ -425,6 +469,12 @@ dependencies = [ "syn 1.0.76", ] +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "eyre" version = "0.6.5" @@ -484,7 +534,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" dependencies = [ "fallible-iterator", - "indexmap", + "indexmap 1.8.1", "stable_deref_trait", ] @@ -503,6 +553,12 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" + [[package]] name = "heck" version = "0.3.3" @@ -555,6 +611,16 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", +] + [[package]] name = "inkwell" version = "0.1.0-beta.4" @@ -617,7 +683,7 @@ dependencies = [ "parking_lot 0.12.1", "rand", "rand_pcg", - "sha3", + "sha3 0.9.1", "structopt", "thiserror", "wasmer", @@ -636,9 +702,12 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] [[package]] name = "lazy_static" @@ -852,6 +921,27 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.18", +] + [[package]] name = "object" version = "0.28.4" @@ -970,6 +1060,16 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -1010,7 +1110,7 @@ dependencies = [ "arbutil", "bincode", "brotli2", - "digest", + "digest 0.9.0", "eyre", "fnv", "hex", @@ -1024,7 +1124,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "sha3", + "sha3 0.9.1", "smallvec", "static_assertions", "structopt", @@ -1177,7 +1277,7 @@ checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" dependencies = [ "bytecheck", "hashbrown 0.12.3", - "indexmap", + "indexmap 1.8.1", "ptr_meta", "rend", "rkyv_derive", @@ -1323,18 +1423,39 @@ dependencies = [ "syn 1.0.76", ] +[[package]] +name = "sha2" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + [[package]] name = "sha3" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ - "block-buffer", - "digest", + "block-buffer 0.9.0", + "digest 0.9.0", "keccak", "opaque-debug", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + [[package]] name = "slice-group-by" version = "0.3.0" @@ -1455,6 +1576,23 @@ dependencies = [ "syn 1.0.76", ] +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" + +[[package]] +name = "toml_edit" +version = "0.19.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" +dependencies = [ + "indexmap 2.0.0", + "toml_datetime", + "winnow", +] + [[package]] name = "tracing" version = "0.1.34" @@ -1635,7 +1773,7 @@ checksum = "740f96c9e5d49f0056d716977657f3f7f8eea9923b41f46d1046946707aa038f" dependencies = [ "bytes", "cfg-if", - "indexmap", + "indexmap 1.8.1", "js-sys", "more-asserts", "serde", @@ -1740,7 +1878,7 @@ checksum = "3bc6cd7a2d2d3bd901ff491f131188c1030694350685279e16e1233b9922846b" dependencies = [ "enum-iterator", "enumset", - "indexmap", + "indexmap 1.8.1", "more-asserts", "rkyv", "target-lexicon", @@ -1758,7 +1896,7 @@ dependencies = [ "cfg-if", "corosensei", "enum-iterator", - "indexmap", + "indexmap 1.8.1", "lazy_static", "libc", "mach", @@ -1783,7 +1921,7 @@ version = "0.84.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77dc97c22bb5ce49a47b745bed8812d30206eff5ef3af31424f2c1820c0974b2" dependencies = [ - "indexmap", + "indexmap 1.8.1", ] [[package]] @@ -1914,3 +2052,12 @@ name = "windows_x86_64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + +[[package]] +name = "winnow" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" +dependencies = [ + "memchr", +] diff --git a/arbitrator/arbutil/Cargo.toml b/arbitrator/arbutil/Cargo.toml index e8b0e86d30..cab0b22983 100644 --- a/arbitrator/arbutil/Cargo.toml +++ b/arbitrator/arbutil/Cargo.toml @@ -4,3 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] +digest = "0.10.7" +num_enum = "0.7.0" +sha2 = "0.10.7" +sha3 = "0.10.8" diff --git a/arbitrator/arbutil/src/lib.rs b/arbitrator/arbutil/src/lib.rs index 44d608682a..aa748b84e8 100644 --- a/arbitrator/arbutil/src/lib.rs +++ b/arbitrator/arbutil/src/lib.rs @@ -3,5 +3,7 @@ pub mod color; pub mod format; +mod types; pub use color::{Color, DebugColor}; +pub use types::PreimageType; diff --git a/arbitrator/arbutil/src/types.rs b/arbitrator/arbutil/src/types.rs new file mode 100644 index 0000000000..165a90d8c4 --- /dev/null +++ b/arbitrator/arbutil/src/types.rs @@ -0,0 +1,25 @@ +// Copyright 2022-2023, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +use digest::Digest; +use num_enum::{IntoPrimitive, TryFromPrimitive}; + +// These values must be kept in sync with `arbutil/preimage_type.go`, +// and the if statement in `contracts/src/osp/OneStepProverHostIo.sol` (search for "UNKNOWN_PREIMAGE_TYPE"). +#[derive( + Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TryFromPrimitive, IntoPrimitive, +)] +#[repr(u8)] +pub enum PreimageType { + Keccak256, + Sha2_256, +} + +impl PreimageType { + pub fn hash(&self, preimage: &[u8]) -> [u8; 32] { + match self { + Self::Keccak256 => sha3::Keccak256::digest(preimage).into(), + Self::Sha2_256 => sha2::Sha256::digest(preimage).into(), + } + } +} diff --git a/arbitrator/jit/src/gostack.rs b/arbitrator/jit/src/gostack.rs index 80fccf179c..bf7ac47675 100644 --- a/arbitrator/jit/src/gostack.rs +++ b/arbitrator/jit/src/gostack.rs @@ -64,6 +64,10 @@ impl GoStack { Self { start, memory } } + pub fn shift_start(&mut self, offset: u32) { + self.start += offset; + } + fn view(&self) -> &MemoryView { self.memory.view() } diff --git a/arbitrator/jit/src/machine.rs b/arbitrator/jit/src/machine.rs index 2cc97dd9c2..ed22e12ef9 100644 --- a/arbitrator/jit/src/machine.rs +++ b/arbitrator/jit/src/machine.rs @@ -6,7 +6,7 @@ use crate::{ wavmio, wavmio::Bytes32, Opts, }; -use arbutil::Color; +use arbutil::{Color, PreimageType}; use eyre::{bail, Result, WrapErr}; use sha3::{Digest, Keccak256}; use thiserror::Error; @@ -108,7 +108,13 @@ pub fn create(opts: &Opts, env: WasmEnv) -> (Instance, FunctionEnv, Sto "github.com/offchainlabs/nitro/wavmio.setGlobalStateU64" => func!(wavmio::set_global_state_u64), "github.com/offchainlabs/nitro/wavmio.readInboxMessage" => func!(wavmio::read_inbox_message), "github.com/offchainlabs/nitro/wavmio.readDelayedInboxMessage" => func!(wavmio::read_delayed_inbox_message), - "github.com/offchainlabs/nitro/wavmio.resolvePreImage" => func!(wavmio::resolve_preimage), + "github.com/offchainlabs/nitro/wavmio.resolvePreImage" => { + #[allow(deprecated)] // we're just keeping this around until we no longer need to validate old replay binaries + { + func!(wavmio::resolve_keccak_preimage) + } + }, + "github.com/offchainlabs/nitro/wavmio.resolveTypedPreimage" => func!(wavmio::resolve_typed_preimage), "github.com/offchainlabs/nitro/arbcompress.brotliCompress" => func!(arbcompress::brotli_compress), "github.com/offchainlabs/nitro/arbcompress.brotliDecompress" => func!(arbcompress::brotli_decompress), @@ -178,7 +184,7 @@ impl From for Escape { pub type WasmEnvMut<'a> = FunctionEnvMut<'a, WasmEnv>; pub type Inbox = BTreeMap>; -pub type Oracle = BTreeMap<[u8; 32], Vec>; +pub type Preimages = BTreeMap>>; #[derive(Default)] pub struct WasmEnv { @@ -193,7 +199,7 @@ pub struct WasmEnv { /// An ordered list of the 32-byte globals pub large_globals: [Bytes32; 2], /// An oracle allowing the prover to reverse keccak256 - pub preimages: Oracle, + pub preimages: Preimages, /// The sequencer inbox's messages pub sequencer_messages: Inbox, /// The delayed inbox's messages @@ -242,11 +248,12 @@ impl WasmEnv { file.read_exact(&mut buf)?; preimages.push(buf); } + let keccak_preimages = env.preimages.entry(PreimageType::Keccak256).or_default(); for preimage in preimages { let mut hasher = Keccak256::new(); hasher.update(&preimage); let hash = hasher.finalize().into(); - env.preimages.insert(hash, preimage); + keccak_preimages.insert(hash, preimage); } } @@ -311,8 +318,6 @@ pub struct ProcessEnv { pub debug: bool, /// Mechanism for asking for preimages and returning results pub socket: Option<(BufWriter, BufReader)>, - /// The last preimage received over the socket - pub last_preimage: Option<([u8; 32], Vec)>, /// A timestamp that helps with printing at various moments pub timestamp: Instant, /// Whether the machine has reached the first wavmio instruction @@ -325,7 +330,6 @@ impl Default for ProcessEnv { forks: false, debug: false, socket: None, - last_preimage: None, timestamp: Instant::now(), reached_wavmio: false, } diff --git a/arbitrator/jit/src/runtime.rs b/arbitrator/jit/src/runtime.rs index 4d83fbbe6d..d547a06553 100644 --- a/arbitrator/jit/src/runtime.rs +++ b/arbitrator/jit/src/runtime.rs @@ -39,20 +39,20 @@ pub fn wasm_write(mut env: WasmEnvMut, sp: u32) { } pub fn nanotime1(mut env: WasmEnvMut, sp: u32) { - let (sp, mut env) = GoStack::new(sp, &mut env); + let (sp, env) = GoStack::new(sp, &mut env); env.go_state.time += env.go_state.time_interval; sp.write_u64(0, env.go_state.time); } pub fn walltime(mut env: WasmEnvMut, sp: u32) { - let (sp, mut env) = GoStack::new(sp, &mut env); + let (sp, env) = GoStack::new(sp, &mut env); env.go_state.time += env.go_state.time_interval; sp.write_u64(0, env.go_state.time / 1_000_000_000); sp.write_u32(1, (env.go_state.time % 1_000_000_000) as u32); } pub fn walltime1(mut env: WasmEnvMut, sp: u32) { - let (sp, mut env) = GoStack::new(sp, &mut env); + let (sp, env) = GoStack::new(sp, &mut env); env.go_state.time += env.go_state.time_interval; sp.write_u64(0, env.go_state.time / 1_000_000_000); sp.write_u64(1, env.go_state.time % 1_000_000_000); diff --git a/arbitrator/jit/src/socket.rs b/arbitrator/jit/src/socket.rs index c346916414..3941763a0d 100644 --- a/arbitrator/jit/src/socket.rs +++ b/arbitrator/jit/src/socket.rs @@ -11,7 +11,6 @@ use crate::wavmio::Bytes32; pub const SUCCESS: u8 = 0x0; pub const FAILURE: u8 = 0x1; -pub const PREIMAGE: u8 = 0x2; pub const ANOTHER: u8 = 0x3; pub const READY: u8 = 0x4; diff --git a/arbitrator/jit/src/syscall.rs b/arbitrator/jit/src/syscall.rs index 4cd0363b49..c81641a7f8 100644 --- a/arbitrator/jit/src/syscall.rs +++ b/arbitrator/jit/src/syscall.rs @@ -306,10 +306,10 @@ pub fn js_value_index(mut env: WasmEnvMut, sp: u32) { pub fn js_value_call(mut env: WasmEnvMut, sp: u32) -> MaybeEscape { let Some(resume) = env.data().exports.resume.clone() else { - return Escape::failure(format!("wasmer failed to bind {}", "resume".red())) + return Escape::failure(format!("wasmer failed to bind {}", "resume".red())); }; let Some(get_stack_pointer) = env.data().exports.get_stack_pointer.clone() else { - return Escape::failure(format!("wasmer failed to bind {}", "getsp".red())) + return Escape::failure(format!("wasmer failed to bind {}", "getsp".red())); }; let sp = GoStack::simple(sp, &env); let data = env.data_mut(); diff --git a/arbitrator/jit/src/wavmio.rs b/arbitrator/jit/src/wavmio.rs index 44edbc450a..a398cb22f5 100644 --- a/arbitrator/jit/src/wavmio.rs +++ b/arbitrator/jit/src/wavmio.rs @@ -7,10 +7,10 @@ use crate::{ socket, }; -use arbutil::Color; +use arbutil::{Color, PreimageType}; use std::{ io, - io::{BufReader, BufWriter, ErrorKind, Write}, + io::{BufReader, BufWriter, ErrorKind}, net::TcpStream, time::Instant, }; @@ -141,11 +141,25 @@ fn inbox_message_impl(sp: &GoStack, inbox: &Inbox, name: &str) -> MaybeEscape { Ok(()) } -pub fn resolve_preimage(mut env: WasmEnvMut, sp: u32) -> MaybeEscape { +#[deprecated] // we're just keeping this around until we no longer need to validate old replay binaries +pub fn resolve_keccak_preimage(mut env: WasmEnvMut, sp: u32) -> MaybeEscape { let (sp, env) = GoStack::new(sp, &mut env); + resolve_preimage_impl(env, sp, 0, "wavmio.ResolvePreImage") +} - let name = "wavmio.resolvePreImage"; +pub fn resolve_typed_preimage(mut env: WasmEnvMut, sp: u32) -> MaybeEscape { + let (mut sp, env) = GoStack::new(sp, &mut env); + let preimage_type = sp.read_u8(0); + sp.shift_start(8); // to account for the preimage type being the first slot + resolve_preimage_impl(env, sp, preimage_type, "wavmio.ResolveTypedPreimage") +} +pub fn resolve_preimage_impl( + env: &mut WasmEnv, + sp: GoStack, + preimage_type: u8, + name: &str, +) -> MaybeEscape { let hash_ptr = sp.read_u64(0); let hash_len = sp.read_u64(1); let offset = sp.read_u64(3); @@ -157,6 +171,12 @@ pub fn resolve_preimage(mut env: WasmEnvMut, sp: u32) -> MaybeEscape { return Ok(()); } + let Ok(preimage_type) = preimage_type.try_into() else { + eprintln!("Go trying to resolve pre image with unknown type {preimage_type}"); + sp.write_u64(7, 0); + return Ok(()); + }; + macro_rules! error { ($text:expr $(,$args:expr)*) => {{ let text = format!($text $(,$args)*); @@ -168,40 +188,10 @@ pub fn resolve_preimage(mut env: WasmEnvMut, sp: u32) -> MaybeEscape { let hash: &[u8; 32] = &hash.try_into().unwrap(); let hash_hex = hex::encode(hash); - let mut preimage = None; - let temporary; // makes the borrow checker happy - - // see if we've cached the preimage - if let Some((key, cached)) = &env.process.last_preimage { - if key == hash { - preimage = Some(cached); - } - } - - // see if this is a known preimage - if preimage.is_none() { - preimage = env.preimages.get(hash); - } - - // see if Go has the preimage - if preimage.is_none() { - if let Some((writer, reader)) = &mut env.process.socket { - socket::write_u8(writer, socket::PREIMAGE)?; - socket::write_bytes32(writer, hash)?; - writer.flush()?; - - if socket::read_u8(reader)? == socket::SUCCESS { - temporary = socket::read_bytes(reader)?; - env.process.last_preimage = Some((*hash, temporary.clone())); - preimage = Some(&temporary); - } - } - } - - let preimage = match preimage { - Some(preimage) => preimage, - None => error!("Missing requested preimage for hash {hash_hex} in {name}"), + let Some(preimage) = env.preimages.get(&preimage_type).and_then(|m| m.get(hash)) else { + error!("Missing requested preimage for preimage type {preimage_type:?} hash {hash_hex} in {name}"); }; + let offset = match u32::try_from(offset) { Ok(offset) => offset as usize, Err(_) => error!("bad offset {offset} in {name}"), @@ -291,11 +281,17 @@ fn ready_hostio(env: &mut WasmEnv) -> MaybeEscape { env.delayed_messages.insert(position, message); } - let preimage_count = socket::read_u64(stream)?; - for _ in 0..preimage_count { - let hash = socket::read_bytes32(stream)?; - let preimage = socket::read_bytes(stream)?; - env.preimages.insert(hash, preimage); + let preimage_types = socket::read_u64(stream)?; + for _ in 0..preimage_types { + let preimage_ty = PreimageType::try_from(socket::read_u8(stream)?) + .map_err(|e| Escape::Failure(e.to_string()))?; + let map = env.preimages.entry(preimage_ty).or_default(); + let preimage_count = socket::read_u64(stream)?; + for _ in 0..preimage_count { + let hash = socket::read_bytes32(stream)?; + let preimage = socket::read_bytes(stream)?; + map.insert(hash, preimage); + } } if socket::read_u8(stream)? != socket::READY { diff --git a/arbitrator/prover/src/host.rs b/arbitrator/prover/src/host.rs index f3826c0670..c66052ad54 100644 --- a/arbitrator/prover/src/host.rs +++ b/arbitrator/prover/src/host.rs @@ -10,7 +10,7 @@ use crate::{ value::{ArbValueType, FunctionType}, wavm::{wasm_to_wavm, Instruction, Opcode}, }; -use arbutil::Color; +use arbutil::{Color, PreimageType}; use eyre::{bail, ErrReport, Result}; use lazy_static::lazy_static; use std::{collections::HashMap, str::FromStr}; @@ -49,7 +49,8 @@ pub enum Hostio { WavmSetGlobalStateBytes32, WavmGetGlobalStateU64, WavmSetGlobalStateU64, - WavmReadPreImage, + WavmReadKeccakPreimage, + WavmReadSha256Preimage, WavmReadInboxMessage, WavmReadDelayedInboxMessage, WavmHaltAndSetFinished, @@ -71,7 +72,8 @@ impl FromStr for Hostio { ("env", "wavm_set_globalstate_bytes32") => WavmSetGlobalStateBytes32, ("env", "wavm_get_globalstate_u64") => WavmGetGlobalStateU64, ("env", "wavm_set_globalstate_u64") => WavmSetGlobalStateU64, - ("env", "wavm_read_pre_image") => WavmReadPreImage, + ("env", "wavm_read_keccak_256_preimage") => WavmReadKeccakPreimage, + ("env", "wavm_read_sha2_256_preimage") => WavmReadSha256Preimage, ("env", "wavm_read_inbox_message") => WavmReadInboxMessage, ("env", "wavm_read_delayed_inbox_message") => WavmReadDelayedInboxMessage, ("env", "wavm_halt_and_set_finished") => WavmHaltAndSetFinished, @@ -107,7 +109,8 @@ impl Hostio { WavmSetGlobalStateBytes32 => func!([I32, I32]), WavmGetGlobalStateU64 => func!([I32], [I64]), WavmSetGlobalStateU64 => func!([I32, I64]), - WavmReadPreImage => func!([I32, I32], [I32]), + WavmReadKeccakPreimage => func!([I32, I32], [I32]), + WavmReadSha256Preimage => func!([I32, I32], [I32]), WavmReadInboxMessage => func!([I64, I32, I32], [I32]), WavmReadDelayedInboxMessage => func!([I64, I32, I32], [I32]), WavmHaltAndSetFinished => func!(), @@ -167,10 +170,15 @@ impl Hostio { opcode!(LocalGet, 1); opcode!(SetGlobalStateU64); } - WavmReadPreImage => { + WavmReadKeccakPreimage => { opcode!(LocalGet, 0); opcode!(LocalGet, 1); - opcode!(ReadPreImage); + opcode!(ReadPreImage, PreimageType::Keccak256); + } + WavmReadSha256Preimage => { + opcode!(LocalGet, 0); + opcode!(LocalGet, 1); + opcode!(ReadPreImage, PreimageType::Sha2_256); } WavmReadInboxMessage => { opcode!(LocalGet, 0); diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index 97845c0aa7..e4ea7a06c5 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -15,6 +15,7 @@ pub mod value; pub mod wavm; use crate::machine::{argument_data_to_inbox, Machine}; +use arbutil::PreimageType; use eyre::Result; use machine::{get_empty_preimage_resolver, GlobalState, MachineStatus, PreimageResolver}; use sha3::{Digest, Keccak256}; @@ -292,11 +293,11 @@ pub struct ResolvedPreimage { #[no_mangle] pub unsafe extern "C" fn arbitrator_set_preimage_resolver( mach: *mut Machine, - resolver: unsafe extern "C" fn(u64, *const u8) -> ResolvedPreimage, + resolver: unsafe extern "C" fn(u64, u8, *const u8) -> ResolvedPreimage, ) { - (*mach).set_preimage_resolver( - Arc::new(move |context: u64, hash: Bytes32| -> Option { - let res = resolver(context, hash.as_ptr()); + (*mach).set_preimage_resolver(Arc::new( + move |context: u64, ty: PreimageType, hash: Bytes32| -> Option { + let res = resolver(context, ty.into(), hash.as_ptr()); if res.len < 0 { return None; } @@ -310,8 +311,8 @@ pub unsafe extern "C" fn arbitrator_set_preimage_resolver( ); } Some(data) - }) as PreimageResolver, - ); + }, + ) as PreimageResolver); } #[no_mangle] diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index fff9c0f3d8..0849312f3d 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -14,7 +14,7 @@ use crate::{ IBinOpType, IRelOpType, IUnOpType, Instruction, Opcode, }, }; -use arbutil::Color; +use arbutil::{Color, PreimageType}; use digest::Digest; use eyre::{bail, ensure, eyre, Result, WrapErr}; use fnv::FnvHashMap as HashMap; @@ -651,7 +651,7 @@ pub struct MachineState<'a> { initial_hash: Bytes32, } -pub type PreimageResolver = Arc Option>; +pub type PreimageResolver = Arc Option + Send + Sync>; /// Wraps a preimage resolver to provide an easier API /// and cache the last preimage retrieved. @@ -675,7 +675,7 @@ impl PreimageResolverWrapper { } } - pub fn get(&mut self, context: u64, hash: Bytes32) -> Option<&[u8]> { + pub fn get(&mut self, context: u64, ty: PreimageType, hash: Bytes32) -> Option<&[u8]> { // TODO: this is unnecessarily complicated by the rust borrow checker. // This will probably be simplifiable when Polonius is shipped. if matches!(&self.last_resolved, Some(r) if r.0 != hash) { @@ -684,19 +684,19 @@ impl PreimageResolverWrapper { match &mut self.last_resolved { Some(resolved) => Some(&resolved.1), x => { - let data = (self.resolver)(context, hash)?; + let data = (self.resolver)(context, ty, hash)?; Some(&x.insert((hash, data)).1) } } } - pub fn get_const(&self, context: u64, hash: Bytes32) -> Option { + pub fn get_const(&self, context: u64, ty: PreimageType, hash: Bytes32) -> Option { if let Some(resolved) = &self.last_resolved { if resolved.0 == hash { return Some(resolved.1.clone()); } } - (self.resolver)(context, hash) + (self.resolver)(context, ty, hash) } } @@ -848,7 +848,7 @@ where } pub fn get_empty_preimage_resolver() -> PreimageResolver { - Arc::new(|_, _| None) as _ + Arc::new(|_, _, _| None) as _ } impl Machine { @@ -1864,8 +1864,11 @@ impl Machine { Opcode::ReadPreImage => { let offset = self.value_stack.pop().unwrap().assume_u32(); let ptr = self.value_stack.pop().unwrap().assume_u32(); + let preimage_ty = PreimageType::try_from(u8::try_from(inst.argument_data)?)?; if let Some(hash) = module.memory.load_32_byte_aligned(ptr.into()) { - if let Some(preimage) = self.preimage_resolver.get(self.context, hash) { + if let Some(preimage) = + self.preimage_resolver.get(self.context, preimage_ty, hash) + { let offset = usize::try_from(offset).unwrap(); let len = std::cmp::min(32, preimage.len().saturating_sub(offset)); let read = preimage.get(offset..(offset + len)).unwrap_or_default(); @@ -2279,10 +2282,19 @@ impl Machine { data.extend(mem_merkle.prove(idx).unwrap_or_default()); if next_inst.opcode == Opcode::ReadPreImage { let hash = Bytes32(prev_data); - let preimage = match self.preimage_resolver.get_const(self.context, hash) { - Some(b) => b, - None => panic!("Missing requested preimage for hash {}", hash), - }; + let preimage_ty = PreimageType::try_from( + u8::try_from(next_inst.argument_data) + .expect("ReadPreImage argument data is out of range for a u8"), + ) + .expect("Invalid preimage type in ReadPreImage argument data"); + let preimage = + match self + .preimage_resolver + .get_const(self.context, preimage_ty, hash) + { + Some(b) => b, + None => panic!("Missing requested preimage for hash {}", hash), + }; data.push(0); // preimage proof type data.extend(preimage); } else if next_inst.opcode == Opcode::ReadInboxMessage { diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs index 9afa6a7f55..2c72d0b577 100644 --- a/arbitrator/prover/src/main.rs +++ b/arbitrator/prover/src/main.rs @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -use arbutil::{format, Color, DebugColor}; +use arbutil::{format, Color, DebugColor, PreimageType}; use eyre::{Context, Result}; use fnv::{FnvHashMap as HashMap, FnvHashSet as HashSet}; use prover::{ @@ -9,9 +9,8 @@ use prover::{ utils::{Bytes32, CBytes}, wavm::Opcode, }; -use sha3::{Digest, Keccak256}; -use std::io::BufWriter; use std::sync::Arc; +use std::{convert::TryInto, io::BufWriter}; use std::{ fs::File, io::{BufReader, ErrorKind, Read, Write}, @@ -79,24 +78,6 @@ struct Opts { max_steps: Option, } -fn parse_size_delim(path: &Path) -> Result>> { - let mut file = BufReader::new(File::open(path)?); - let mut contents = Vec::new(); - loop { - let mut size_buf = [0u8; 8]; - match file.read_exact(&mut size_buf) { - Ok(()) => {} - Err(e) if e.kind() == ErrorKind::UnexpectedEof => break, - Err(e) => return Err(e.into()), - } - let size = u64::from_le_bytes(size_buf) as usize; - let mut buf = vec![0u8; size]; - file.read_exact(&mut buf)?; - contents.push(buf); - } - Ok(contents) -} - fn file_with_stub_header(path: &Path, headerlength: usize) -> Result> { let mut msg = vec![0u8; headerlength]; File::open(path).unwrap().read_to_end(&mut msg)?; @@ -160,19 +141,34 @@ fn main() -> Result<()> { delayed_position += 1; } - let mut preimages: HashMap = HashMap::default(); + let mut preimages: HashMap> = HashMap::default(); if let Some(path) = opts.preimages { - preimages = parse_size_delim(&path)? - .into_iter() - .map(|b| { - let mut hasher = Keccak256::new(); - hasher.update(&b); - (hasher.finalize().into(), CBytes::from(b.as_slice())) - }) - .collect(); + let mut file = BufReader::new(File::open(path)?); + loop { + let mut ty_buf = [0u8; 1]; + match file.read_exact(&mut ty_buf) { + Ok(()) => {} + Err(e) if e.kind() == ErrorKind::UnexpectedEof => break, + Err(e) => return Err(e.into()), + } + let preimage_ty: PreimageType = ty_buf[0].try_into()?; + + let mut size_buf = [0u8; 8]; + file.read_exact(&mut size_buf)?; + let size = u64::from_le_bytes(size_buf) as usize; + let mut buf = vec![0u8; size]; + file.read_exact(&mut buf)?; + + let hash = preimage_ty.hash(&buf); + preimages + .entry(preimage_ty) + .or_default() + .insert(hash.into(), buf.as_slice().into()); + } } let preimage_resolver = - Arc::new(move |_, hash| preimages.get(&hash).cloned()) as PreimageResolver; + Arc::new(move |_, ty, hash| preimages.get(&ty).and_then(|m| m.get(&hash)).cloned()) + as PreimageResolver; let last_block_hash = decode_hex_arg(&opts.last_block_hash, "--last-block-hash")?; let last_send_root = decode_hex_arg(&opts.last_send_root, "--last-send-root")?; @@ -387,9 +383,7 @@ fn main() -> Result<()> { while let Some((module, func, profile)) = func_stack.pop() { sum.total_cycles += profile.total_cycles; sum.count += profile.count; - let entry = func_profile - .entry((module, func)) - .or_insert_with(SimpleProfile::default); + let entry = func_profile.entry((module, func)).or_default(); entry.count += sum.count; entry.total_cycles += sum.total_cycles; entry.local_cycles += profile.local_cycles; diff --git a/arbitrator/prover/src/utils.rs b/arbitrator/prover/src/utils.rs index 6c11e9af05..e86ea96768 100644 --- a/arbitrator/prover/src/utils.rs +++ b/arbitrator/prover/src/utils.rs @@ -158,6 +158,13 @@ impl From<&[u8]> for CBytes { } } +// There's no thread safety concerns for CBytes. +// This type is basically a Box<[u8]> (which is Send + Sync) with libc as an allocator. +// Any data races between threads are prevented by Rust borrowing rules, +// and the data isn't thread-local so there's no concern moving it between threads. +unsafe impl Send for CBytes {} +unsafe impl Sync for CBytes {} + #[derive(Serialize, Deserialize)] #[serde(remote = "Type")] enum RemoteType { diff --git a/arbitrator/prover/test-cases/go/main.go b/arbitrator/prover/test-cases/go/main.go index a5a1028fb0..afed870fea 100644 --- a/arbitrator/prover/test-cases/go/main.go +++ b/arbitrator/prover/test-cases/go/main.go @@ -5,14 +5,18 @@ package main import ( "bytes" + "encoding/hex" "fmt" "os" "runtime" "time" + "github.com/ethereum/go-ethereum/common" merkletree "github.com/wealdtech/go-merkletree" "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/wavmio" ) // MerkleSample is an example using the Merkle tree to generate and verify proofs. @@ -42,7 +46,7 @@ func MerkleSample(data [][]byte, toproove int) (bool, error) { } func testCompression(data []byte) { - compressed, err := arbcompress.CompressFast(data) + compressed, err := arbcompress.CompressLevel(data, 0) if err != nil { panic(err) } @@ -85,7 +89,7 @@ func main() { verified, err = MerkleSample(data, -1) if err != nil { if verified { - panic("succeded to verify proof invalid") + panic("succeeded to verify proof invalid") } } @@ -95,4 +99,19 @@ func main() { testCompression([]byte("This is a test string la la la la la la la la la la")) println("test compression passed!\n") + + checkPreimage := func(ty arbutil.PreimageType, hash common.Hash) { + preimage, err := wavmio.ResolveTypedPreimage(ty, hash) + if err != nil { + panic(fmt.Sprintf("failed to resolve preimage of type %v: %v", ty, err)) + } + if !bytes.Equal(preimage, []byte("hello world")) { + panic(fmt.Sprintf("got wrong preimage of type %v: %v", ty, hex.EncodeToString(preimage))) + } + } + + checkPreimage(arbutil.Keccak256PreimageType, common.HexToHash("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad")) + checkPreimage(arbutil.Sha2_256PreimageType, common.HexToHash("b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9")) + + println("verified preimage resolution!\n") } diff --git a/arbitrator/prover/test-cases/rust/data/preimages.bin b/arbitrator/prover/test-cases/rust/data/preimages.bin index c832466d61..34ef5a4292 100644 Binary files a/arbitrator/prover/test-cases/rust/data/preimages.bin and b/arbitrator/prover/test-cases/rust/data/preimages.bin differ diff --git a/arbitrator/prover/test-cases/rust/src/bin/host-io.rs b/arbitrator/prover/test-cases/rust/src/bin/host-io.rs index 3112ba5624..19ddabddca 100644 --- a/arbitrator/prover/test-cases/rust/src/bin/host-io.rs +++ b/arbitrator/prover/test-cases/rust/src/bin/host-io.rs @@ -1,7 +1,8 @@ use hex_literal::hex; extern "C" { - pub fn wavm_read_pre_image(ptr: *mut u8, offset: usize) -> usize; + pub fn wavm_read_keccak_256_preimage(ptr: *mut u8, offset: usize) -> usize; + pub fn wavm_read_sha2_256_preimage(ptr: *mut u8, offset: usize) -> usize; pub fn wavm_read_inbox_message(msg_num: u64, ptr: *mut u8, offset: usize) -> usize; pub fn wavm_read_delayed_inbox_message(seq_num: u64, ptr: *mut u8, offset: usize) -> usize; pub fn wavm_halt_and_set_finished(); @@ -38,11 +39,17 @@ fn main() { for j in 0..32 { assert_eq!(bytebuffer.0[j], (j as u8) + 1); } - let hash = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"); - bytebuffer = Bytes32(hash); - println!("preimage"); + let keccak_hash = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"); + bytebuffer = Bytes32(keccak_hash); + println!("keccak preimage"); let expected_preimage = b"hello world"; - let len = wavm_read_pre_image(bytebuffer.0.as_mut_ptr(), 0); + let len = wavm_read_keccak_256_preimage(bytebuffer.0.as_mut_ptr(), 0); + assert_eq!(len, expected_preimage.len()); + assert_eq!(&bytebuffer.0[..len], expected_preimage); + println!("sha2 preimage"); + let sha2_hash = hex!("b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"); + bytebuffer = Bytes32(sha2_hash); + let len = wavm_read_sha2_256_preimage(bytebuffer.0.as_mut_ptr(), 0); assert_eq!(len, expected_preimage.len()); assert_eq!(&bytebuffer.0[..len], expected_preimage); } diff --git a/arbitrator/wasm-libraries/Cargo.lock b/arbitrator/wasm-libraries/Cargo.lock index 04ac828ec8..3b545fcc41 100644 --- a/arbitrator/wasm-libraries/Cargo.lock +++ b/arbitrator/wasm-libraries/Cargo.lock @@ -2,6 +2,25 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "arbutil" +version = "0.1.0" +dependencies = [ + "digest", + "num_enum", + "sha2", + "sha3", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + [[package]] name = "brotli" version = "0.1.0" @@ -9,12 +28,63 @@ dependencies = [ "go-abi", ] +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cpufeatures" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + [[package]] name = "go-abi" version = "0.1.0" @@ -29,13 +99,106 @@ dependencies = [ "rand_pcg", ] +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" + [[package]] name = "host-io" version = "0.1.0" dependencies = [ + "arbutil", "go-abi", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "keccak" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "libc" +version = "0.2.147" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "num_enum" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +dependencies = [ + "proc-macro2", +] + [[package]] name = "rand" version = "0.8.4" @@ -60,6 +223,82 @@ dependencies = [ "rand_core", ] +[[package]] +name = "sha2" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + +[[package]] +name = "syn" +version = "2.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" + +[[package]] +name = "toml_edit" +version = "0.19.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + +[[package]] +name = "typenum" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" + +[[package]] +name = "unicode-ident" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + [[package]] name = "wasi-stub" version = "0.1.0" + +[[package]] +name = "winnow" +version = "0.5.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83817bbecf72c73bad717ee86820ebf286203d2e04c3951f3cd538869c897364" +dependencies = [ + "memchr", +] diff --git a/arbitrator/wasm-libraries/go-stub/src/lib.rs b/arbitrator/wasm-libraries/go-stub/src/lib.rs index 8be1ff48e6..df77893fcb 100644 --- a/arbitrator/wasm-libraries/go-stub/src/lib.rs +++ b/arbitrator/wasm-libraries/go-stub/src/lib.rs @@ -587,7 +587,10 @@ pub unsafe extern "C" fn wavm__go_after_run() { while let Some(info) = state.times.pop() { while state.pending_ids.contains(&info.id) { TIME = std::cmp::max(TIME, info.time); - drop(state); + // Important: the current reference to state shouldn't be used after this resume call, + // as it might during the resume call the reference might be invalidated. + // That's why immediately after this resume call, we replace the reference + // with a new reference to TIMEOUT_STATE. wavm_guest_call__resume(); state = TIMEOUT_STATE.get_or_insert_with(Default::default); } diff --git a/arbitrator/wasm-libraries/host-io/Cargo.toml b/arbitrator/wasm-libraries/host-io/Cargo.toml index 8d31f4148a..48f498f910 100644 --- a/arbitrator/wasm-libraries/host-io/Cargo.toml +++ b/arbitrator/wasm-libraries/host-io/Cargo.toml @@ -9,3 +9,4 @@ crate-type = ["cdylib"] [dependencies] go-abi = { path = "../go-abi" } +arbutil = { path = "../../arbutil" } diff --git a/arbitrator/wasm-libraries/host-io/src/lib.rs b/arbitrator/wasm-libraries/host-io/src/lib.rs index 65a4f90130..e8f59994ac 100644 --- a/arbitrator/wasm-libraries/host-io/src/lib.rs +++ b/arbitrator/wasm-libraries/host-io/src/lib.rs @@ -1,11 +1,14 @@ +use arbutil::PreimageType; use go_abi::*; +use std::convert::TryInto; extern "C" { pub fn wavm_get_globalstate_bytes32(idx: u32, ptr: *mut u8); pub fn wavm_set_globalstate_bytes32(idx: u32, ptr: *const u8); pub fn wavm_get_globalstate_u64(idx: u32) -> u64; pub fn wavm_set_globalstate_u64(idx: u32, val: u64); - pub fn wavm_read_pre_image(ptr: *mut u8, offset: usize) -> usize; + pub fn wavm_read_keccak_256_preimage(ptr: *mut u8, offset: usize) -> usize; + pub fn wavm_read_sha2_256_preimage(ptr: *mut u8, offset: usize) -> usize; pub fn wavm_read_inbox_message(msg_num: u64, ptr: *mut u8, offset: usize) -> usize; pub fn wavm_read_delayed_inbox_message(seq_num: u64, ptr: *mut u8, offset: usize) -> usize; } @@ -117,26 +120,39 @@ pub unsafe extern "C" fn go__github_com_offchainlabs_nitro_wavmio_readDelayedInb } #[no_mangle] -pub unsafe extern "C" fn go__github_com_offchainlabs_nitro_wavmio_resolvePreImage(sp: GoStack) { - let hash_ptr = sp.read_u64(0); - let hash_len = sp.read_u64(1); - let offset = sp.read_u64(3); - let out_ptr = sp.read_u64(4); - let out_len = sp.read_u64(5); +pub unsafe extern "C" fn go__github_com_offchainlabs_nitro_wavmio_resolveTypedPreimage(sp: GoStack) { + let preimage_type = sp.read_u8(0); + let hash_ptr = sp.read_u64(1); + let hash_len = sp.read_u64(2); + let offset = sp.read_u64(4); + let out_ptr = sp.read_u64(5); + let out_len = sp.read_u64(6); if hash_len != 32 || out_len != 32 { eprintln!( - "Go attempting to resolve pre image with hash len {} and out len {}", + "Go attempting to resolve preimage with hash len {} and out len {}", hash_len, out_len, ); - sp.write_u64(7, 0); + sp.write_u64(8, 0); return; } + let Ok(preimage_type) = preimage_type.try_into() else { + eprintln!( + "Go trying to resolve preimage with unknown type {}", + preimage_type + ); + sp.write_u64(8, 0); + return; + }; let mut our_buf = MemoryLeaf([0u8; 32]); our_buf.0.copy_from_slice(&read_slice(hash_ptr, hash_len)); let our_ptr = our_buf.0.as_mut_ptr(); assert_eq!(our_ptr as usize % 32, 0); - let read = wavm_read_pre_image(our_ptr, offset as usize); + let preimage_reader = match preimage_type { + PreimageType::Keccak256 => wavm_read_keccak_256_preimage, + PreimageType::Sha2_256 => wavm_read_sha2_256_preimage, + }; + let read = preimage_reader(our_ptr, offset as usize); assert!(read <= 32); write_slice(&our_buf.0[..read], out_ptr); - sp.write_u64(7, read as u64); + sp.write_u64(8, read as u64); } diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 3e5e6a738f..e1e1f0b29e 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -9,22 +9,29 @@ import ( "encoding/hex" "errors" "fmt" + "math" "math/big" + "strings" + "sync/atomic" "time" "github.com/andybalholm/brotli" - flag "github.com/spf13/pflag" + "github.com/spf13/pflag" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode/dataposter" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" + "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" @@ -41,6 +48,7 @@ import ( var ( batchPosterWalletBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/wallet/balanceether", nil) batchPosterGasRefunderBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/gasrefunder/balanceether", nil) + batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" ) type batchPosterPosition struct { @@ -51,41 +59,71 @@ type batchPosterPosition struct { type BatchPoster struct { stopwaiter.StopWaiter - l1Reader *headerreader.HeaderReader - inbox *InboxTracker - streamer *TransactionStreamer - config BatchPosterConfigFetcher - seqInbox *bridgegen.SequencerInbox - bridge *bridgegen.Bridge - syncMonitor *SyncMonitor - seqInboxABI *abi.ABI - seqInboxAddr common.Address - building *buildingBatch - daWriter das.DataAvailabilityServiceWriter - dataPoster *dataposter.DataPoster[batchPosterPosition] - redisLock *SimpleRedisLock - firstAccErr time.Time // first time a continuous missing accumulator occurred - backlog uint64 // An estimate of the number of unposted batches + l1Reader *headerreader.HeaderReader + inbox *InboxTracker + streamer *TransactionStreamer + config BatchPosterConfigFetcher + seqInbox *bridgegen.SequencerInbox + bridge *bridgegen.Bridge + syncMonitor *SyncMonitor + seqInboxABI *abi.ABI + seqInboxAddr common.Address + bridgeAddr common.Address + gasRefunderAddr common.Address + building *buildingBatch + daWriter das.DataAvailabilityServiceWriter + dataPoster *dataposter.DataPoster + redisLock *redislock.Simple + firstEphemeralError time.Time // first time a continuous error suspected to be ephemeral occurred + // An estimate of the number of batches we want to post but haven't yet. + // This doesn't include batches which we don't want to post yet due to the L1 bounds. + backlog uint64 + lastHitL1Bounds time.Time // The last time we wanted to post a message but hit the L1 bounds + + batchReverted atomic.Bool // indicates whether data poster batch was reverted + nextRevertCheckBlock int64 // the last parent block scanned for reverting batches + + accessList func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList } +type l1BlockBound int + +// This enum starts at 1 to avoid the empty initialization of 0 being valid +const ( + // Default is Safe if the L1 reader has finality data enabled, otherwise Latest + l1BlockBoundDefault l1BlockBound = iota + 1 + l1BlockBoundSafe + l1BlockBoundFinalized + l1BlockBoundLatest + l1BlockBoundIgnore +) + type BatchPosterConfig struct { - Enable bool `koanf:"enable"` - DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` - MaxBatchSize int `koanf:"max-size" reload:"hot"` - MaxBatchPostDelay time.Duration `koanf:"max-delay" reload:"hot"` - WaitForMaxBatchPostDelay bool `koanf:"wait-for-max-delay" reload:"hot"` - BatchPollDelay time.Duration `koanf:"poll-delay" reload:"hot"` - PostingErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` - CompressionLevel int `koanf:"compression-level" reload:"hot"` - DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` - GasRefunderAddress string `koanf:"gas-refunder-address" reload:"hot"` - DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` - RedisUrl string `koanf:"redis-url"` - RedisLock SimpleRedisLockConfig `koanf:"redis-lock" reload:"hot"` - ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` - L1Wallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` - - gasRefunder common.Address + Enable bool `koanf:"enable"` + DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` + // Max batch size. + MaxSize int `koanf:"max-size" reload:"hot"` + // Max batch post delay. + MaxDelay time.Duration `koanf:"max-delay" reload:"hot"` + // Wait for max BatchPost delay. + WaitForMaxDelay bool `koanf:"wait-for-max-delay" reload:"hot"` + // Batch post polling interval. + PollInterval time.Duration `koanf:"poll-interval" reload:"hot"` + // Batch posting error delay. + ErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` + CompressionLevel int `koanf:"compression-level" reload:"hot"` + DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` + GasRefunderAddress string `koanf:"gas-refunder-address" reload:"hot"` + DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` + RedisUrl string `koanf:"redis-url"` + RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` + ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` + ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` + L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` + L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` + + gasRefunder common.Address + l1BlockBound l1BlockBound } func (c *BatchPosterConfig) Validate() error { @@ -93,125 +131,349 @@ func (c *BatchPosterConfig) Validate() error { return fmt.Errorf("invalid gas refunder address \"%v\"", c.GasRefunderAddress) } c.gasRefunder = common.HexToAddress(c.GasRefunderAddress) - if c.MaxBatchSize <= 40 { + if c.MaxSize <= 40 { return errors.New("MaxBatchSize too small") } + if c.L1BlockBound == "" { + c.l1BlockBound = l1BlockBoundDefault + } else if c.L1BlockBound == "safe" { + c.l1BlockBound = l1BlockBoundSafe + } else if c.L1BlockBound == "finalized" { + c.l1BlockBound = l1BlockBoundFinalized + } else if c.L1BlockBound == "latest" { + c.l1BlockBound = l1BlockBoundLatest + } else if c.L1BlockBound == "ignore" { + c.l1BlockBound = l1BlockBoundIgnore + } else { + return fmt.Errorf("invalid L1 block bound tag \"%v\" (see --help for options)", c.L1BlockBound) + } return nil } type BatchPosterConfigFetcher func() *BatchPosterConfig -func BatchPosterConfigAddOptions(prefix string, f *flag.FlagSet) { +func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") f.Bool(prefix+".disable-das-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDasFallbackStoreDataOnChain, "If unable to batch to DAS, disable fallback storing data on chain") - f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxBatchSize, "maximum batch size") - f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxBatchPostDelay, "maximum batch posting delay") - f.Bool(prefix+".wait-for-max-delay", DefaultBatchPosterConfig.WaitForMaxBatchPostDelay, "wait for the max batch delay, even if the batch is full") - f.Duration(prefix+".poll-delay", DefaultBatchPosterConfig.BatchPollDelay, "how long to delay after successfully posting batch") - f.Duration(prefix+".error-delay", DefaultBatchPosterConfig.PostingErrorDelay, "how long to delay after error posting batch") + f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") + f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxDelay, "maximum batch posting delay") + f.Bool(prefix+".wait-for-max-delay", DefaultBatchPosterConfig.WaitForMaxDelay, "wait for the max batch delay, even if the batch is full") + f.Duration(prefix+".poll-interval", DefaultBatchPosterConfig.PollInterval, "how long to wait after no batches are ready to be posted before checking again") + f.Duration(prefix+".error-delay", DefaultBatchPosterConfig.ErrorDelay, "how long to delay after error posting batch") f.Int(prefix+".compression-level", DefaultBatchPosterConfig.CompressionLevel, "batch compression level") f.Duration(prefix+".das-retention-period", DefaultBatchPosterConfig.DASRetentionPeriod, "In AnyTrust mode, the period which DASes are requested to retain the stored batches.") f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.Uint64(prefix+".extra-batch-gas", DefaultBatchPosterConfig.ExtraBatchGas, "use this much more gas than estimation says is necessary to post batches") f.String(prefix+".redis-url", DefaultBatchPosterConfig.RedisUrl, "if non-empty, the Redis URL to store queued transactions in") - RedisLockConfigAddOptions(prefix+".redis-lock", f) - dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) - genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.L1Wallet.Pathname) + f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") + f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") + redislock.AddConfigOptions(prefix+".redis-lock", f) + dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfig) + genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.ParentChainWallet.Pathname) } var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, DisableDasFallbackStoreDataOnChain: false, - MaxBatchSize: 100000, - BatchPollDelay: time.Second * 10, - PostingErrorDelay: time.Second * 10, - MaxBatchPostDelay: time.Hour, - WaitForMaxBatchPostDelay: false, - CompressionLevel: brotli.BestCompression, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 50_000, - DataPoster: dataposter.DefaultDataPosterConfig, - L1Wallet: DefaultBatchPosterL1WalletConfig, + // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go + MaxSize: 100000, + PollInterval: time.Second * 10, + ErrorDelay: time.Second * 10, + MaxDelay: time.Hour, + WaitForMaxDelay: false, + CompressionLevel: brotli.BestCompression, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 50_000, + DataPoster: dataposter.DefaultDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, + RedisLock: redislock.DefaultCfg, } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ Pathname: "batch-poster-wallet", - PasswordImpl: genericconf.WalletConfigDefault.PasswordImpl, + Password: genericconf.WalletConfigDefault.Password, PrivateKey: genericconf.WalletConfigDefault.PrivateKey, Account: genericconf.WalletConfigDefault.Account, OnlyCreateKey: genericconf.WalletConfigDefault.OnlyCreateKey, } var TestBatchPosterConfig = BatchPosterConfig{ - Enable: true, - MaxBatchSize: 100000, - BatchPollDelay: time.Millisecond * 10, - PostingErrorDelay: time.Millisecond * 10, - MaxBatchPostDelay: 0, - WaitForMaxBatchPostDelay: false, - CompressionLevel: 2, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 10_000, - DataPoster: dataposter.TestDataPosterConfig, - L1Wallet: DefaultBatchPosterL1WalletConfig, -} - -func NewBatchPoster(l1Reader *headerreader.HeaderReader, inbox *InboxTracker, streamer *TransactionStreamer, syncMonitor *SyncMonitor, config BatchPosterConfigFetcher, deployInfo *chaininfo.RollupAddresses, transactOpts *bind.TransactOpts, daWriter das.DataAvailabilityServiceWriter) (*BatchPoster, error) { - seqInbox, err := bridgegen.NewSequencerInbox(deployInfo.SequencerInbox, l1Reader.Client()) + Enable: true, + MaxSize: 100000, + PollInterval: time.Millisecond * 10, + ErrorDelay: time.Millisecond * 10, + MaxDelay: 0, + WaitForMaxDelay: false, + CompressionLevel: 2, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 10_000, + DataPoster: dataposter.TestDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, +} + +type BatchPosterOpts struct { + DataPosterDB ethdb.Database + L1Reader *headerreader.HeaderReader + Inbox *InboxTracker + Streamer *TransactionStreamer + SyncMonitor *SyncMonitor + Config BatchPosterConfigFetcher + DeployInfo *chaininfo.RollupAddresses + TransactOpts *bind.TransactOpts + DAWriter das.DataAvailabilityServiceWriter +} + +func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, error) { + seqInbox, err := bridgegen.NewSequencerInbox(opts.DeployInfo.SequencerInbox, opts.L1Reader.Client()) if err != nil { return nil, err } - bridge, err := bridgegen.NewBridge(deployInfo.Bridge, l1Reader.Client()) + bridge, err := bridgegen.NewBridge(opts.DeployInfo.Bridge, opts.L1Reader.Client()) if err != nil { return nil, err } - if err = config().Validate(); err != nil { + if err = opts.Config().Validate(); err != nil { return nil, err } seqInboxABI, err := bridgegen.SequencerInboxMetaData.GetAbi() if err != nil { return nil, err } - redisClient, err := redisutil.RedisClientFromURL(config().RedisUrl) + redisClient, err := redisutil.RedisClientFromURL(opts.Config().RedisUrl) if err != nil { return nil, err } - redisLockConfigFetcher := func() *SimpleRedisLockConfig { - return &config().RedisLock + redisLockConfigFetcher := func() *redislock.SimpleCfg { + simpleRedisLockConfig := opts.Config().RedisLock + simpleRedisLockConfig.Key = batchPosterSimpleRedisLockKey + return &simpleRedisLockConfig } - redisLock, err := NewSimpleRedisLock(redisClient, redisLockConfigFetcher, func() bool { return syncMonitor.Synced() }) + redisLock, err := redislock.NewSimple(redisClient, redisLockConfigFetcher, func() bool { return opts.SyncMonitor.Synced() }) if err != nil { return nil, err } b := &BatchPoster{ - l1Reader: l1Reader, - inbox: inbox, - streamer: streamer, - syncMonitor: syncMonitor, - config: config, - bridge: bridge, - seqInbox: seqInbox, - seqInboxABI: seqInboxABI, - seqInboxAddr: deployInfo.SequencerInbox, - daWriter: daWriter, - redisLock: redisLock, + l1Reader: opts.L1Reader, + inbox: opts.Inbox, + streamer: opts.Streamer, + syncMonitor: opts.SyncMonitor, + config: opts.Config, + bridge: bridge, + seqInbox: seqInbox, + seqInboxABI: seqInboxABI, + seqInboxAddr: opts.DeployInfo.SequencerInbox, + gasRefunderAddr: opts.Config().gasRefunder, + bridgeAddr: opts.DeployInfo.Bridge, + daWriter: opts.DAWriter, + redisLock: redisLock, + accessList: func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList { + return AccessList(&AccessListOpts{ + SequencerInboxAddr: opts.DeployInfo.SequencerInbox, + DataPosterAddr: opts.TransactOpts.From, + BridgeAddr: opts.DeployInfo.Bridge, + GasRefunderAddr: opts.Config().gasRefunder, + SequencerInboxAccs: SequencerInboxAccs, + AfterDelayedMessagesRead: AfterDelayedMessagesRead, + }) + }, } dataPosterConfigFetcher := func() *dataposter.DataPosterConfig { - return &config().DataPoster - } - b.dataPoster, err = dataposter.NewDataPoster(l1Reader, transactOpts, redisClient, redisLock, dataPosterConfigFetcher, b.getBatchPosterPosition) + return &(opts.Config().DataPoster) + } + b.dataPoster, err = dataposter.NewDataPoster(ctx, + &dataposter.DataPosterOpts{ + Database: opts.DataPosterDB, + HeaderReader: opts.L1Reader, + Auth: opts.TransactOpts, + RedisClient: redisClient, + RedisLock: redisLock, + Config: dataPosterConfigFetcher, + MetadataRetriever: b.getBatchPosterPosition, + RedisKey: "data-poster.queue", + }) if err != nil { return nil, err } return b, nil } -func (b *BatchPoster) getBatchPosterPosition(ctx context.Context, blockNum *big.Int) (batchPosterPosition, error) { +type AccessListOpts struct { + SequencerInboxAddr common.Address + BridgeAddr common.Address + DataPosterAddr common.Address + GasRefunderAddr common.Address + SequencerInboxAccs int + AfterDelayedMessagesRead int +} + +// AccessList returns access list (contracts, storage slots) for batchposter. +func AccessList(opts *AccessListOpts) types.AccessList { + l := types.AccessList{ + types.AccessTuple{ + Address: opts.SequencerInboxAddr, + StorageKeys: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), // totalDelayedMessagesRead + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), // bridge + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"), // maxTimeVariation.delayBlocks + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // maxTimeVariation.futureBlocks + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"), // maxTimeVariation.delaySeconds + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000007"), // maxTimeVariation.futureSeconds + // ADMIN_SLOT from OpenZeppelin, keccak-256 hash of + // "eip1967.proxy.admin" subtracted by 1. + common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103"), + // IMPLEMENTATION_SLOT from OpenZeppelin, keccak-256 hash + // of "eip1967.proxy.implementation" subtracted by 1. + common.HexToHash("0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc"), + // isBatchPoster[batchPosterAddr]; for mainnnet it's: "0xa10aa54071443520884ed767b0684edf43acec528b7da83ab38ce60126562660". + common.Hash(arbutil.PaddedKeccak256(opts.DataPosterAddr.Bytes(), []byte{3})), + }, + }, + types.AccessTuple{ + Address: opts.BridgeAddr, + StorageKeys: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"), // delayedInboxAccs.length + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000007"), // sequencerInboxAccs.length + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000009"), // sequencerInbox + common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000000a"), // sequencerReportedSubMessageCount + // ADMIN_SLOT from OpenZeppelin, keccak-256 hash of + // "eip1967.proxy.admin" subtracted by 1. + common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103"), + // IMPLEMENTATION_SLOT from OpenZeppelin, keccak-256 hash + // of "eip1967.proxy.implementation" subtracted by 1. + common.HexToHash("0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc"), + // These below may change when transaction is actually executed: + // - delayedInboxAccs[delayedInboxAccs.length - 1] + // - delayedInboxAccs.push(...); + }, + }, + } + + for _, v := range []struct{ slotIdx, val int }{ + {7, opts.SequencerInboxAccs - 1}, // - sequencerInboxAccs[sequencerInboxAccs.length - 1]; (keccak256(7, sequencerInboxAccs.length - 1)) + {7, opts.SequencerInboxAccs}, // - sequencerInboxAccs.push(...); (keccak256(7, sequencerInboxAccs.length)) + {6, opts.AfterDelayedMessagesRead - 1}, // - delayedInboxAccs[afterDelayedMessagesRead - 1]; (keccak256(6, afterDelayedMessagesRead - 1)) + } { + sb := arbutil.SumBytes(arbutil.PaddedKeccak256([]byte{byte(v.slotIdx)}), big.NewInt(int64(v.val)).Bytes()) + l[1].StorageKeys = append(l[1].StorageKeys, common.Hash(sb)) + } + + if (opts.GasRefunderAddr != common.Address{}) { + l = append(l, types.AccessTuple{ + Address: opts.GasRefunderAddr, + StorageKeys: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"), // CommonParameters.{maxRefundeeBalance, extraGasMargin, calldataCost, maxGasTip} + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // CommonParameters.{maxGasCost, maxSingleGasUsage} + // allowedContracts[msg.sender]; for mainnet it's: "0x7686888b19bb7b75e46bb1aa328b65150743f4899443d722f0adf8e252ccda41". + common.Hash(arbutil.PaddedKeccak256(opts.SequencerInboxAddr.Bytes(), []byte{1})), + // allowedRefundees[refundee]; for mainnet it's: "0xe85fd79f89ff278fc57d40aecb7947873df9f0beac531c8f71a98f630e1eab62". + common.Hash(arbutil.PaddedKeccak256(opts.DataPosterAddr.Bytes(), []byte{2})), + }, + }) + } + return l +} + +// checkRevert checks blocks with number in range [from, to] whether they +// contain reverted batch_poster transaction. +// It returns true if it finds batch posting needs to halt, which is true if a batch reverts +// unless the data poster is configured with noop storage which can tolerate reverts. +func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) { + if b.nextRevertCheckBlock > to { + return false, fmt.Errorf("wrong range, from: %d > to: %d", b.nextRevertCheckBlock, to) + } + for ; b.nextRevertCheckBlock <= to; b.nextRevertCheckBlock++ { + number := big.NewInt(b.nextRevertCheckBlock) + block, err := b.l1Reader.Client().BlockByNumber(ctx, number) + if err != nil { + return false, fmt.Errorf("getting block: %v by number: %w", number, err) + } + for idx, tx := range block.Transactions() { + from, err := b.l1Reader.Client().TransactionSender(ctx, tx, block.Hash(), uint(idx)) + if err != nil { + return false, fmt.Errorf("getting sender of transaction tx: %v, %w", tx.Hash(), err) + } + if from == b.dataPoster.Sender() { + r, err := b.l1Reader.Client().TransactionReceipt(ctx, tx.Hash()) + if err != nil { + return false, fmt.Errorf("getting a receipt for transaction: %v, %w", tx.Hash(), err) + } + if r.Status == types.ReceiptStatusFailed { + shouldHalt := !b.config().DataPoster.UseNoOpStorage + logLevel := log.Warn + if shouldHalt { + logLevel = log.Error + } + logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash) + return shouldHalt, nil + } + } + } + } + return false, nil +} + +// pollForReverts runs a gouroutine that listens to l1 block headers, checks +// if any transaction made by batch poster was reverted. +func (b *BatchPoster) pollForReverts(ctx context.Context) { + headerCh, unsubscribe := b.l1Reader.Subscribe(false) + defer unsubscribe() + + for { + // Poll until: + // - L1 headers reader channel is closed, or + // - polling is through context, or + // - we see a transaction in the block from dataposter that was reverted. + select { + case h, ok := <-headerCh: + if !ok { + log.Info("L1 headers channel checking for batch poster reverts has been closed") + return + } + blockNum := h.Number.Int64() + // If this is the first block header, set last seen as number-1. + // We may see same block number again if there is L1 reorg, in that + // case we check the block again. + if b.nextRevertCheckBlock == 0 || b.nextRevertCheckBlock > blockNum { + b.nextRevertCheckBlock = blockNum + } + if blockNum-b.nextRevertCheckBlock > 100 { + log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", b.nextRevertCheckBlock, "current", blockNum) + b.nextRevertCheckBlock = blockNum + continue + } + + reverted, err := b.checkReverts(ctx, blockNum) + if err != nil { + logLevel := log.Warn + if strings.Contains(err.Error(), "not found") { + // Just parent chain node inconsistency + // One node sent us a block, but another didn't have it + // We'll try to check this block again next loop + logLevel = log.Debug + } + logLevel("Error checking batch reverts", "err", err) + continue + } + if reverted { + b.batchReverted.Store(true) + return + } + case <-ctx.Done(): + return + } + } +} + +func (b *BatchPoster) getBatchPosterPosition(ctx context.Context, blockNum *big.Int) ([]byte, error) { bigInboxBatchCount, err := b.seqInbox.BatchCount(&bind.CallOpts{Context: ctx, BlockNumber: blockNum}) if err != nil { - return batchPosterPosition{}, fmt.Errorf("error getting latest batch count: %w", err) + return nil, fmt.Errorf("error getting latest batch count: %w", err) } inboxBatchCount := bigInboxBatchCount.Uint64() var prevBatchMeta BatchMetadata @@ -219,14 +481,14 @@ func (b *BatchPoster) getBatchPosterPosition(ctx context.Context, blockNum *big. var err error prevBatchMeta, err = b.inbox.GetBatchMetadata(inboxBatchCount - 1) if err != nil { - return batchPosterPosition{}, fmt.Errorf("error getting latest batch metadata: %w", err) + return nil, fmt.Errorf("error getting latest batch metadata: %w", err) } } - return batchPosterPosition{ + return rlp.EncodeToBytes(batchPosterPosition{ MessageCount: prevBatchMeta.MessageCount, DelayedMessageCount: prevBatchMeta.DelayedMessageCount, NextSeqNum: inboxBatchCount, - }, nil + }) } var errBatchAlreadyClosed = errors.New("batch segments already closed") @@ -255,8 +517,8 @@ type buildingBatch struct { } func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64) *batchSegments { - compressedBuffer := bytes.NewBuffer(make([]byte, 0, config.MaxBatchSize*2)) - if config.MaxBatchSize <= 40 { + compressedBuffer := bytes.NewBuffer(make([]byte, 0, config.MaxSize*2)) + if config.MaxSize <= 40 { panic("MaxBatchSize too small") } compressionLevel := config.CompressionLevel @@ -282,7 +544,7 @@ func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog ui return &batchSegments{ compressedBuffer: compressedBuffer, compressedWriter: brotli.NewWriterLevel(compressedBuffer, compressionLevel), - sizeLimit: config.MaxBatchSize - 40, // TODO + sizeLimit: config.MaxSize - 40, // TODO recompressionLevel: recompressionLevel, rawSegments: make([][]byte, 0, 128), delayedMsg: firstDelayed, @@ -531,7 +793,7 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, return 0, err } gas, err := b.l1Reader.Client().EstimateGas(ctx, ethereum.CallMsg{ - From: b.dataPoster.From(), + From: b.dataPoster.Sender(), To: &b.seqInboxAddr, Data: data, }) @@ -553,11 +815,20 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, return gas + config.ExtraBatchGas, nil } +const ethPosBlockTime = 12 * time.Second + func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) { - nonce, batchPosition, err := b.dataPoster.GetNextNonceAndMeta(ctx) + if b.batchReverted.Load() { + return false, fmt.Errorf("batch was reverted, not posting any more batches") + } + nonce, batchPositionBytes, err := b.dataPoster.GetNextNonceAndMeta(ctx) if err != nil { return false, err } + var batchPosition batchPosterPosition + if err := rlp.DecodeBytes(batchPositionBytes, &batchPosition); err != nil { + return false, fmt.Errorf("decoding batch position: %w", err) + } dbBatchCount, err := b.inbox.GetBatchCount() if err != nil { @@ -589,7 +860,69 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) firstMsgTime := time.Unix(int64(firstMsg.Message.Header.Timestamp), 0) config := b.config() - forcePostBatch := time.Since(firstMsgTime) >= config.MaxBatchPostDelay + forcePostBatch := time.Since(firstMsgTime) >= config.MaxDelay + + var l1BoundMaxBlockNumber uint64 = math.MaxUint64 + var l1BoundMaxTimestamp uint64 = math.MaxUint64 + var l1BoundMinBlockNumber uint64 + var l1BoundMinTimestamp uint64 + hasL1Bound := config.l1BlockBound != l1BlockBoundIgnore + if hasL1Bound { + var l1Bound *types.Header + var err error + if config.l1BlockBound == l1BlockBoundLatest { + l1Bound, err = b.l1Reader.LastHeader(ctx) + } else if config.l1BlockBound == l1BlockBoundSafe || config.l1BlockBound == l1BlockBoundDefault { + l1Bound, err = b.l1Reader.LatestSafeBlockHeader(ctx) + if errors.Is(err, headerreader.ErrBlockNumberNotSupported) && config.l1BlockBound == l1BlockBoundDefault { + // If getting the latest safe block is unsupported, and the L1BlockBound configuration is the default, + // fall back to using the latest block instead of the safe block. + l1Bound, err = b.l1Reader.LastHeader(ctx) + } + } else { + if config.l1BlockBound != l1BlockBoundFinalized { + log.Error( + "unknown L1 block bound config value; falling back on using finalized", + "l1BlockBoundString", config.L1BlockBound, + "l1BlockBoundEnum", config.l1BlockBound, + ) + } + l1Bound, err = b.l1Reader.LatestFinalizedBlockHeader(ctx) + } + if err != nil { + return false, fmt.Errorf("error getting L1 bound block: %w", err) + } + + maxTimeVariation, err := b.seqInbox.MaxTimeVariation(&bind.CallOpts{ + Context: ctx, + BlockNumber: l1Bound.Number, + }) + if err != nil { + // This might happen if the latest finalized block is old enough that our L1 node no longer has its state + log.Warn("error getting max time variation on L1 bound block; falling back on latest block", "err", err) + maxTimeVariation, err = b.seqInbox.MaxTimeVariation(&bind.CallOpts{Context: ctx}) + if err != nil { + return false, fmt.Errorf("error getting max time variation: %w", err) + } + } + + l1BoundBlockNumber := arbutil.ParentHeaderToL1BlockNumber(l1Bound) + l1BoundMaxBlockNumber = arbmath.SaturatingUAdd(l1BoundBlockNumber, arbmath.BigToUintSaturating(maxTimeVariation.FutureBlocks)) + l1BoundMaxTimestamp = arbmath.SaturatingUAdd(l1Bound.Time, arbmath.BigToUintSaturating(maxTimeVariation.FutureSeconds)) + + if config.L1BlockBoundBypass > 0 { + latestHeader, err := b.l1Reader.LastHeader(ctx) + if err != nil { + return false, err + } + latestBlockNumber := arbutil.ParentHeaderToL1BlockNumber(latestHeader) + blockNumberWithPadding := arbmath.SaturatingUAdd(latestBlockNumber, uint64(config.L1BlockBoundBypass/ethPosBlockTime)) + timestampWithPadding := arbmath.SaturatingUAdd(latestHeader.Time, uint64(config.L1BlockBoundBypass/time.Second)) + + l1BoundMinBlockNumber = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariation.DelayBlocks)) + l1BoundMinTimestamp = arbmath.SaturatingUSub(timestampWithPadding, arbmath.BigToUintSaturating(maxTimeVariation.DelaySeconds)) + } + } for b.building.msgCount < msgCount { msg, err := b.streamer.GetMessage(b.building.msgCount) @@ -597,6 +930,28 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) log.Error("error getting message from streamer", "error", err) break } + if msg.Message.Header.BlockNumber < l1BoundMinBlockNumber || msg.Message.Header.Timestamp < l1BoundMinTimestamp { + log.Error( + "disabling L1 bound as batch posting message is close to the maximum delay", + "blockNumber", msg.Message.Header.BlockNumber, + "l1BoundMinBlockNumber", l1BoundMinBlockNumber, + "timestamp", msg.Message.Header.Timestamp, + "l1BoundMinTimestamp", l1BoundMinTimestamp, + ) + l1BoundMaxBlockNumber = math.MaxUint64 + l1BoundMaxTimestamp = math.MaxUint64 + } + if msg.Message.Header.BlockNumber > l1BoundMaxBlockNumber || msg.Message.Header.Timestamp > l1BoundMaxTimestamp { + b.lastHitL1Bounds = time.Now() + log.Info( + "not posting more messages because block number or timestamp exceed L1 bounds", + "blockNumber", msg.Message.Header.BlockNumber, + "l1BoundMaxBlockNumber", l1BoundMaxBlockNumber, + "timestamp", msg.Message.Header.Timestamp, + "l1BoundMaxTimestamp", l1BoundMaxTimestamp, + ) + break + } success, err := b.building.segments.AddMessage(msg) if err != nil { // Clear our cache @@ -605,7 +960,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } if !success { // this batch is full - if !config.WaitForMaxBatchPostDelay { + if !config.WaitForMaxDelay { forcePostBatch = true } b.building.haveUsefulMessage = true @@ -636,7 +991,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) cert, err := b.daWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}) // b.daWriter will append signature if enabled if errors.Is(err, das.BatchToDasFailed) { if config.DisableDasFallbackStoreDataOnChain { - return false, errors.New("Unable to batch to DAS and fallback storing data on chain is disabled") + return false, errors.New("unable to batch to DAS and fallback storing data on chain is disabled") } log.Warn("Falling back to storing data on chain", "err", err) } else if err != nil { @@ -654,12 +1009,26 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - newMeta := batchPosterPosition{ + newMeta, err := rlp.EncodeToBytes(batchPosterPosition{ MessageCount: b.building.msgCount, DelayedMessageCount: b.building.segments.delayedMsg, NextSeqNum: batchPosition.NextSeqNum + 1, + }) + if err != nil { + return false, err } - err = b.dataPoster.PostTransaction(ctx, firstMsgTime, nonce, newMeta, b.seqInboxAddr, data, gasLimit) + tx, err := b.dataPoster.PostTransaction(ctx, + firstMsgTime, + nonce, + newMeta, + b.seqInboxAddr, + data, + gasLimit, + new(big.Int), + b.accessList( + int(batchPosition.NextSeqNum), + int(b.building.segments.delayedMsg)), + ) if err != nil { return false, err } @@ -672,16 +1041,20 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "current delayed", b.building.segments.delayedMsg, "total segments", len(b.building.segments.rawSegments), ) + recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount unpostedMessages := msgCount - b.building.msgCount b.backlog = uint64(unpostedMessages) / uint64(postedMessages) if b.backlog > 10 { logLevel := log.Warn - if b.backlog > 30 { + if recentlyHitL1Bounds { + logLevel = log.Info + } else if b.backlog > 30 { logLevel = log.Error } logLevel( "a large batch posting backlog exists", + "recentlyHitL1Bounds", recentlyHitL1Bounds, "currentPosition", b.building.msgCount, "messageCount", msgCount, "lastPostedMessages", postedMessages, @@ -689,7 +1062,22 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "batchBacklogEstimate", b.backlog, ) } + if recentlyHitL1Bounds { + // This backlog isn't "real" in that we don't want to post any more messages. + // Setting the backlog to 0 here ensures that we don't lower compression as a result. + b.backlog = 0 + } b.building = nil + + // If we aren't queueing up transactions, wait for the receipt before moving on to the next batch. + if config.DataPoster.UseNoOpStorage { + receipt, err := b.l1Reader.WaitForTxApproval(tx).Await(ctx) + if err != nil { + return false, fmt.Errorf("error waiting for tx receipt: %w", err) + } + log.Info("Got successful receipt from batch poster transaction", "txHash", tx.Hash(), "blockNumber", receipt.BlockNumber, "blockHash", receipt.BlockHash) + } + return true, nil } @@ -697,6 +1085,7 @@ func (b *BatchPoster) Start(ctxIn context.Context) { b.dataPoster.Start(ctxIn) b.redisLock.Start(ctxIn) b.StopWaiter.Start(ctxIn, b) + b.LaunchThread(b.pollForReverts) b.CallIteratively(func(ctx context.Context) time.Duration { var err error if common.HexToAddress(b.config().GasRefunderAddress) != (common.Address{}) { @@ -707,8 +1096,8 @@ func (b *BatchPoster) Start(ctxIn context.Context) { batchPosterGasRefunderBalance.Update(arbmath.BalancePerEther(gasRefunderBalance)) } } - if b.dataPoster.From() != (common.Address{}) { - walletBalance, err := b.l1Reader.Client().BalanceAt(ctx, b.dataPoster.From(), nil) + if b.dataPoster.Sender() != (common.Address{}) { + walletBalance, err := b.l1Reader.Client().BalanceAt(ctx, b.dataPoster.Sender(), nil) if err != nil { log.Warn("error fetching batch poster wallet balance", "err", err) } else { @@ -717,30 +1106,32 @@ func (b *BatchPoster) Start(ctxIn context.Context) { } if !b.redisLock.AttemptLock(ctx) { b.building = nil - return b.config().BatchPollDelay + return b.config().PollInterval } posted, err := b.maybePostSequencerBatch(ctx) + ephemeralError := errors.Is(err, AccumulatorNotFoundErr) || errors.Is(err, storage.ErrStorageRace) + if !ephemeralError { + b.firstEphemeralError = time.Time{} + } if err != nil { b.building = nil logLevel := log.Error - if errors.Is(err, AccumulatorNotFoundErr) || errors.Is(err, dataposter.ErrStorageRace) { + if ephemeralError { // Likely the inbox tracker just isn't caught up. // Let's see if this error disappears naturally. - if b.firstAccErr == (time.Time{}) { - b.firstAccErr = time.Now() + if b.firstEphemeralError == (time.Time{}) { + b.firstEphemeralError = time.Now() logLevel = log.Debug - } else if time.Since(b.firstAccErr) < time.Minute { + } else if time.Since(b.firstEphemeralError) < time.Minute { logLevel = log.Debug } - } else { - b.firstAccErr = time.Time{} } logLevel("error posting batch", "err", err) - return b.config().PostingErrorDelay + return b.config().ErrorDelay } else if posted { return 0 } else { - return b.config().BatchPollDelay + return b.config().PollInterval } }) } diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index ff0dcfebcf..b5be06af56 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -1,222 +1,346 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2023, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE +// Package dataposter implements generic functionality to post transactions. package dataposter import ( "context" + "crypto/tls" + "crypto/x509" "errors" "fmt" "math/big" + "net/http" + "os" "strings" "sync" "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/go-redis/redis/v8" + "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" + "github.com/offchainlabs/nitro/arbnode/dataposter/noop" + "github.com/offchainlabs/nitro/arbnode/dataposter/slice" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/stopwaiter" - flag "github.com/spf13/pflag" -) - -type queuedTransaction[Meta any] struct { - FullTx *types.Transaction - Data types.DynamicFeeTx - Meta Meta - Sent bool - Created time.Time // may be earlier than the tx was given to the tx poster - NextReplacement time.Time -} - -type QueueStorage[Item any] interface { - GetContents(ctx context.Context, startingIndex uint64, maxResults uint64) ([]*Item, error) - GetLast(ctx context.Context) (*Item, error) - Prune(ctx context.Context, keepStartingAt uint64) error - Put(ctx context.Context, index uint64, prevItem *Item, newItem *Item) error - Length(ctx context.Context) (int, error) - IsPersistent() bool -} - -type DataPosterConfig struct { - RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` - ReplacementTimes string `koanf:"replacement-times"` - WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` - MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` - MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` - TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` - UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` - MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` - MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` -} - -type DataPosterConfigFetcher func() *DataPosterConfig - -func DataPosterConfigAddOptions(prefix string, f *flag.FlagSet) { - f.String(prefix+".replacement-times", DefaultDataPosterConfig.ReplacementTimes, "comma-separated list of durations since first posting to attempt a replace-by-fee") - f.Bool(prefix+".wait-for-l1-finality", DefaultDataPosterConfig.WaitForL1Finality, "only treat a transaction as confirmed after L1 finality has been achieved (recommended)") - f.Uint64(prefix+".max-mempool-transactions", DefaultDataPosterConfig.MaxMempoolTransactions, "the maximum number of transactions to have queued in the mempool at once (0 = unlimited)") - f.Int(prefix+".max-queued-transactions", DefaultDataPosterConfig.MaxQueuedTransactions, "the maximum number of unconfirmed transactions to track at once (0 = unlimited)") - f.Float64(prefix+".target-price-gwei", DefaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") - f.Float64(prefix+".urgency-gwei", DefaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") - f.Float64(prefix+".min-fee-cap-gwei", DefaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") - f.Float64(prefix+".min-tip-cap-gwei", DefaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") - signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f) -} - -var DefaultDataPosterConfig = DataPosterConfig{ - ReplacementTimes: "5m,10m,20m,30m,1h,2h,4h,6h,8h,12h,16h,18h,20h,22h", - WaitForL1Finality: true, - TargetPriceGwei: 60., - UrgencyGwei: 2., - MaxMempoolTransactions: 64, - MinTipCapGwei: 0.05, -} + "github.com/spf13/pflag" -var TestDataPosterConfig = DataPosterConfig{ - ReplacementTimes: "1s,2s,5s,10s,20s,30s,1m,5m", - RedisSigner: signature.TestSimpleHmacConfig, - WaitForL1Finality: false, - TargetPriceGwei: 60., - UrgencyGwei: 2., - MaxMempoolTransactions: 64, - MinTipCapGwei: 0.05, -} + redisstorage "github.com/offchainlabs/nitro/arbnode/dataposter/redis" +) +// Dataposter implements functionality to post transactions on the chain. It +// is initialized with specified sender/signer and keeps nonce of that address +// as it posts transactions. +// Transactions are also saved in the queue when it's being sent, and when +// persistent storage is used for the queue, after restarting the node +// dataposter will pick up where it left. // DataPoster must be RLP serializable and deserializable -type DataPoster[Meta any] struct { +type DataPoster struct { stopwaiter.StopWaiter headerReader *headerreader.HeaderReader client arbutil.L1Interface - auth *bind.TransactOpts + sender common.Address + signer signerFn redisLock AttemptLocker - config DataPosterConfigFetcher + config ConfigFetcher replacementTimes []time.Duration - metadataRetriever func(ctx context.Context, blockNum *big.Int) (Meta, error) + metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) - // these fields are protected by the mutex + // These fields are protected by the mutex. + // TODO: factor out these fields into separate structure, since now one + // needs to make sure call sites of methods that change these values hold + // the lock (currently ensured by having comments like: + // "the mutex must be held by the caller" above the function). mutex sync.Mutex lastBlock *big.Int balance *big.Int nonce uint64 - queue QueueStorage[queuedTransaction[Meta]] + queue QueueStorage errorCount map[uint64]int // number of consecutive intermittent errors rbf-ing or sending, per nonce } +// signerFn is a signer function callback when a contract requires a method to +// sign the transaction before submission. +// This can be local or external, hence the context parameter. +type signerFn func(context.Context, common.Address, *types.Transaction) (*types.Transaction, error) + type AttemptLocker interface { AttemptLock(context.Context) bool } -func NewDataPoster[Meta any](headerReader *headerreader.HeaderReader, auth *bind.TransactOpts, redisClient redis.UniversalClient, redisLock AttemptLocker, config DataPosterConfigFetcher, metadataRetriever func(ctx context.Context, blockNum *big.Int) (Meta, error)) (*DataPoster[Meta], error) { - var replacementTimes []time.Duration +func parseReplacementTimes(val string) ([]time.Duration, error) { + var res []time.Duration var lastReplacementTime time.Duration - for _, s := range strings.Split(config().ReplacementTimes, ",") { + for _, s := range strings.Split(val, ",") { t, err := time.ParseDuration(s) if err != nil { - return nil, err + return nil, fmt.Errorf("parsing durations: %w", err) } if t <= lastReplacementTime { return nil, errors.New("replacement times must be increasing") } - replacementTimes = append(replacementTimes, t) + res = append(res, t) lastReplacementTime = t } - if len(replacementTimes) == 0 { - log.Warn("disabling replace-by-fee for data poster") + if len(res) == 0 { + log.Warn("Disabling replace-by-fee for data poster") } - // To avoid special casing "don't replace again", replace in 10 years - replacementTimes = append(replacementTimes, time.Hour*24*365*10) - var queue QueueStorage[queuedTransaction[Meta]] - if redisClient == nil { - queue = NewSliceStorage[queuedTransaction[Meta]]() - } else { + // To avoid special casing "don't replace again", replace in 10 years. + return append(res, time.Hour*24*365*10), nil +} + +type DataPosterOpts struct { + Database ethdb.Database + HeaderReader *headerreader.HeaderReader + Auth *bind.TransactOpts + RedisClient redis.UniversalClient + RedisLock AttemptLocker + Config ConfigFetcher + MetadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) + RedisKey string // Redis storage key +} + +func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, error) { + cfg := opts.Config() + replacementTimes, err := parseReplacementTimes(cfg.ReplacementTimes) + if err != nil { + return nil, err + } + if opts.HeaderReader.IsParentChainArbitrum() && !cfg.UseNoOpStorage { + cfg.UseNoOpStorage = true + log.Info("Disabling data poster storage, as parent chain appears to be an Arbitrum chain without a mempool") + } + encF := func() storage.EncoderDecoderInterface { + if opts.Config().LegacyStorageEncoding { + return &storage.LegacyEncoderDecoder{} + } + return &storage.EncoderDecoder{} + } + var queue QueueStorage + switch { + case cfg.UseNoOpStorage: + queue = &noop.Storage{} + case opts.RedisClient != nil: var err error - queue, err = NewRedisStorage[queuedTransaction[Meta]](redisClient, "data-poster.queue", &config().RedisSigner) + queue, err = redisstorage.NewStorage(opts.RedisClient, opts.RedisKey, &cfg.RedisSigner, encF) if err != nil { return nil, err } - } - return &DataPoster[Meta]{ - headerReader: headerReader, - client: headerReader.Client(), - auth: auth, - config: config, + case cfg.UseDBStorage: + storage := dbstorage.New(opts.Database, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) + if cfg.Dangerous.ClearDBStorage { + if err := storage.PruneAll(ctx); err != nil { + return nil, err + } + } + queue = storage + default: + queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) + } + dp := &DataPoster{ + headerReader: opts.HeaderReader, + client: opts.HeaderReader.Client(), + sender: opts.Auth.From, + signer: func(_ context.Context, addr common.Address, tx *types.Transaction) (*types.Transaction, error) { + return opts.Auth.Signer(addr, tx) + }, + config: opts.Config, replacementTimes: replacementTimes, - metadataRetriever: metadataRetriever, + metadataRetriever: opts.MetadataRetriever, queue: queue, - redisLock: redisLock, + redisLock: opts.RedisLock, errorCount: make(map[uint64]int), - }, nil + } + if cfg.ExternalSigner.URL != "" { + signer, sender, err := externalSigner(ctx, &cfg.ExternalSigner) + if err != nil { + return nil, err + } + dp.signer, dp.sender = signer, sender + } + return dp, nil } -func (p *DataPoster[Meta]) From() common.Address { - return p.auth.From +func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error) { + tlsCfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + if opts.ClientCert != "" && opts.ClientPrivateKey != "" { + log.Info("Client certificate for external signer is enabled") + clientCert, err := tls.LoadX509KeyPair(opts.ClientCert, opts.ClientPrivateKey) + if err != nil { + return nil, fmt.Errorf("error loading client certificate and private key: %w", err) + } + tlsCfg.Certificates = []tls.Certificate{clientCert} + } + + if opts.RootCA != "" { + rootCrt, err := os.ReadFile(opts.RootCA) + if err != nil { + return nil, fmt.Errorf("error reading external signer root CA: %w", err) + } + rootCertPool := x509.NewCertPool() + rootCertPool.AppendCertsFromPEM(rootCrt) + tlsCfg.RootCAs = rootCertPool + } + + return rpc.DialOptions( + ctx, + opts.URL, + rpc.WithHTTPClient( + &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + }, + ), + ) } -func (p *DataPoster[Meta]) GetNextNonceAndMeta(ctx context.Context) (uint64, Meta, error) { - config := p.config() - var emptyMeta Meta - p.mutex.Lock() - defer p.mutex.Unlock() +// externalSigner returns signer function and ethereum address of the signer. +// Returns an error if address isn't specified or if it can't connect to the +// signer RPC server. +func externalSigner(ctx context.Context, opts *ExternalSignerCfg) (signerFn, common.Address, error) { + if opts.Address == "" { + return nil, common.Address{}, errors.New("external signer (From) address specified") + } + + client, err := rpcClient(ctx, opts) + if err != nil { + return nil, common.Address{}, fmt.Errorf("error connecting external signer: %w", err) + } + sender := common.HexToAddress(opts.Address) + + var hasher types.Signer + return func(ctx context.Context, addr common.Address, tx *types.Transaction) (*types.Transaction, error) { + // According to the "eth_signTransaction" API definition, this should be + // RLP encoded transaction object. + // https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_signtransaction + var data hexutil.Bytes + if err := client.CallContext(ctx, &data, opts.Method, tx); err != nil { + return nil, fmt.Errorf("signing transaction: %w", err) + } + var signedTx types.Transaction + if err := rlp.DecodeBytes(data, &signedTx); err != nil { + return nil, fmt.Errorf("error decoding signed transaction: %w", err) + } + if hasher == nil { + hasher = types.LatestSignerForChainID(tx.ChainId()) + } + if hasher.Hash(tx) != hasher.Hash(&signedTx) { + return nil, fmt.Errorf("transaction: %x from external signer differs from request: %x", hasher.Hash(&signedTx), hasher.Hash(tx)) + } + return &signedTx, nil + }, sender, nil +} + +func (p *DataPoster) Sender() common.Address { + return p.sender +} + +// Does basic check whether posting transaction with specified nonce would +// result in exceeding maximum queue length or maximum transactions in mempool. +func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) error { + cfg := p.config() + // If the queue has reached configured max size, don't post a transaction. + if cfg.MaxQueuedTransactions > 0 { + queueLen, err := p.queue.Length(ctx) + if err != nil { + return fmt.Errorf("getting queue length: %w", err) + } + if queueLen >= cfg.MaxQueuedTransactions { + return fmt.Errorf("posting a transaction with nonce: %d will exceed max allowed dataposter queued transactions: %d, current nonce: %d", nextNonce, cfg.MaxQueuedTransactions, p.nonce) + } + } + // Check that posting a new transaction won't exceed maximum pending + // transactions in mempool. + if cfg.MaxMempoolTransactions > 0 { + unconfirmedNonce, err := p.client.NonceAt(ctx, p.sender, nil) + if err != nil { + return fmt.Errorf("getting nonce of a dataposter sender: %w", err) + } + if nextNonce >= cfg.MaxMempoolTransactions+unconfirmedNonce { + return fmt.Errorf("posting a transaction with nonce: %d will exceed max mempool size: %d, unconfirmed nonce: %d", nextNonce, cfg.MaxMempoolTransactions, unconfirmedNonce) + } + } + return nil +} + +func (p *DataPoster) waitForL1Finality() bool { + return p.config().WaitForL1Finality && !p.headerReader.IsParentChainArbitrum() +} + +// Requires the caller hold the mutex. +// Returns the next nonce, its metadata if stored, a bool indicating if the metadata is present, and an error. +// Unlike GetNextNonceAndMeta, this does not call the metadataRetriever if the metadata is not stored in the queue. +func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []byte, bool, error) { + // Ensure latest finalized block state is available. blockNum, err := p.client.BlockNumber(ctx) if err != nil { - return 0, emptyMeta, err + return 0, nil, false, err } - lastQueueItem, err := p.queue.GetLast(ctx) + lastQueueItem, err := p.queue.FetchLast(ctx) if err != nil { - return 0, emptyMeta, err + return 0, nil, false, fmt.Errorf("fetching last element from queue: %w", err) } if lastQueueItem != nil { nextNonce := lastQueueItem.Data.Nonce + 1 - if config.MaxQueuedTransactions > 0 { - queueLen, err := p.queue.Length(ctx) - if err != nil { - return 0, emptyMeta, err - } - if queueLen >= config.MaxQueuedTransactions { - return 0, emptyMeta, fmt.Errorf("attempting to post a transaction with nonce %v while current nonce is %v would exceed max data poster queue length of %v", nextNonce, p.nonce, config.MaxQueuedTransactions) - } + if err := p.canPostWithNonce(ctx, nextNonce); err != nil { + return 0, nil, false, err } - if config.MaxMempoolTransactions > 0 { - unconfirmedNonce, err := p.client.NonceAt(ctx, p.auth.From, nil) - if err != nil { - return 0, emptyMeta, fmt.Errorf("failed to get unconfirmed nonce: %w", err) - } - if nextNonce >= unconfirmedNonce+config.MaxMempoolTransactions { - return 0, emptyMeta, fmt.Errorf("attempting to post a transaction with nonce %v while unconfirmed nonce is %v would exceed max mempool transactions of %v", nextNonce, unconfirmedNonce, config.MaxMempoolTransactions) - } - } - return nextNonce, lastQueueItem.Meta, nil + return nextNonce, lastQueueItem.Meta, true, nil } - err = p.updateNonce(ctx) - if err != nil { - if !p.queue.IsPersistent() && config.WaitForL1Finality { - return 0, emptyMeta, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) + + if err := p.updateNonce(ctx); err != nil { + if !p.queue.IsPersistent() && p.waitForL1Finality() { + return 0, nil, false, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) } // Fall back to using a recent block to get the nonce. This is safe because there's nothing in the queue. nonceQueryBlock := arbmath.UintToBig(arbmath.SaturatingUSub(blockNum, 1)) log.Warn("failed to update nonce with queue empty; falling back to using a recent block", "recentBlock", nonceQueryBlock, "err", err) - nonce, err := p.client.NonceAt(ctx, p.auth.From, nonceQueryBlock) + nonce, err := p.client.NonceAt(ctx, p.sender, nonceQueryBlock) if err != nil { - return 0, emptyMeta, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) + return 0, nil, false, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) } p.lastBlock = nonceQueryBlock p.nonce = nonce } - meta, err := p.metadataRetriever(ctx, p.lastBlock) - return p.nonce, meta, err + return p.nonce, nil, false, nil +} + +// GetNextNonceAndMeta retrieves generates next nonce, validates that a +// transaction can be posted with that nonce, and fetches "Meta" either last +// queued iterm (if queue isn't empty) or retrieves with last block. +func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, error) { + p.mutex.Lock() + defer p.mutex.Unlock() + nonce, meta, hasMeta, err := p.getNextNonceAndMaybeMeta(ctx) + if err != nil { + return 0, nil, err + } + if !hasMeta { + meta, err = p.metadataRetriever(ctx, p.lastBlock) + } + return nonce, meta, err } const minRbfIncrease = arbmath.OneInBips * 11 / 10 -func (p *DataPoster[Meta]) getFeeAndTipCaps(ctx context.Context, gasLimit uint64, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, backlogOfBatches uint64) (*big.Int, *big.Int, error) { +func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, backlogOfBatches uint64) (*big.Int, *big.Int, error) { config := p.config() latestHeader, err := p.headerReader.LastHeader(ctx) if err != nil { @@ -225,6 +349,11 @@ func (p *DataPoster[Meta]) getFeeAndTipCaps(ctx context.Context, gasLimit uint64 if latestHeader.BaseFee == nil { return nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) } + softConfBlock := arbmath.BigSubByUint(latestHeader.Number, config.NonceRbfSoftConfs) + softConfNonce, err := p.client.NonceAt(ctx, p.sender, softConfBlock) + if err != nil { + return nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) + } newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, big.NewInt(2)) newFeeCap = arbmath.BigMax(newFeeCap, arbmath.FloatToBig(config.MinFeeCapGwei*params.GWei)) @@ -233,6 +362,7 @@ func (p *DataPoster[Meta]) getFeeAndTipCaps(ctx context.Context, gasLimit uint64 return nil, nil, err } newTipCap = arbmath.BigMax(newTipCap, arbmath.FloatToBig(config.MinTipCapGwei*params.GWei)) + newTipCap = arbmath.BigMin(newTipCap, arbmath.FloatToBig(config.MaxTipCapGwei*params.GWei)) hugeTipIncrease := false if lastTipCap != nil { @@ -266,14 +396,29 @@ func (p *DataPoster[Meta]) getFeeAndTipCaps(ctx context.Context, gasLimit uint64 newFeeCap = maxFeeCap } - balanceFeeCap := new(big.Int).Div(p.balance, new(big.Int).SetUint64(gasLimit)) + latestBalance := p.balance + balanceForTx := new(big.Int).Set(latestBalance) + if config.AllocateMempoolBalance && !config.UseNoOpStorage { + // We reserve half the balance for the first transaction, and then split the remaining balance for all after that. + // With noop storage, we don't try to replace-by-fee, so we don't need to worry about this. + balanceForTx.Div(balanceForTx, common.Big2) + if nonce != softConfNonce && config.MaxMempoolTransactions > 1 { + // balanceForTx /= config.MaxMempoolTransactions-1 + balanceForTx.Div(balanceForTx, arbmath.UintToBig(config.MaxMempoolTransactions-1)) + } + } + balanceFeeCap := arbmath.BigDivByUint(balanceForTx, gasLimit) if arbmath.BigGreaterThan(newFeeCap, balanceFeeCap) { log.Error( "lack of L1 balance prevents posting transaction with desired fee cap", - "balance", p.balance, + "balance", latestBalance, + "maxTransactions", config.MaxMempoolTransactions, + "balanceForTransaction", balanceForTx, "gasLimit", gasLimit, "desiredFeeCap", newFeeCap, "balanceFeeCap", balanceFeeCap, + "nonce", nonce, + "softConfNonce", softConfNonce, ) newFeeCap = balanceFeeCap } @@ -290,31 +435,42 @@ func (p *DataPoster[Meta]) getFeeAndTipCaps(ctx context.Context, gasLimit uint64 return newFeeCap, newTipCap, nil } -func (p *DataPoster[Meta]) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta Meta, to common.Address, calldata []byte, gasLimit uint64) error { +func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, accessList types.AccessList) (*types.Transaction, error) { p.mutex.Lock() defer p.mutex.Unlock() - err := p.updateBalance(ctx) + + expectedNonce, _, _, err := p.getNextNonceAndMaybeMeta(ctx) if err != nil { - return fmt.Errorf("failed to update data poster balance: %w", err) + return nil, err + } + if nonce != expectedNonce { + return nil, fmt.Errorf("data poster expected next transaction to have nonce %v but was requested to post transaction with nonce %v", expectedNonce, nonce) } - feeCap, tipCap, err := p.getFeeAndTipCaps(ctx, gasLimit, nil, nil, dataCreatedAt, 0) + + err = p.updateBalance(ctx) if err != nil { - return err + return nil, fmt.Errorf("failed to update data poster balance: %w", err) + } + + feeCap, tipCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, nil, nil, dataCreatedAt, 0) + if err != nil { + return nil, err } inner := types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: tipCap, - GasFeeCap: feeCap, - Gas: gasLimit, - To: &to, - Value: new(big.Int), - Data: calldata, - } - fullTx, err := p.auth.Signer(p.auth.From, types.NewTx(&inner)) + Nonce: nonce, + GasTipCap: tipCap, + GasFeeCap: feeCap, + Gas: gasLimit, + To: &to, + Value: value, + Data: calldata, + AccessList: accessList, + } + fullTx, err := p.signer(ctx, p.sender, types.NewTx(&inner)) if err != nil { - return err + return nil, fmt.Errorf("signing transaction: %w", err) } - queuedTx := queuedTransaction[Meta]{ + queuedTx := storage.QueuedTransaction{ Data: inner, FullTx: fullTx, Meta: meta, @@ -322,33 +478,32 @@ func (p *DataPoster[Meta]) PostTransaction(ctx context.Context, dataCreatedAt ti Created: dataCreatedAt, NextReplacement: time.Now().Add(p.replacementTimes[0]), } - return p.sendTx(ctx, nil, &queuedTx) + return fullTx, p.sendTx(ctx, nil, &queuedTx) } // the mutex must be held by the caller -func (p *DataPoster[Meta]) saveTx(ctx context.Context, prevTx *queuedTransaction[Meta], newTx *queuedTransaction[Meta]) error { +func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTransaction) error { if prevTx != nil && prevTx.Data.Nonce != newTx.Data.Nonce { return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.Data.Nonce, newTx.Data.Nonce) } - return p.queue.Put(ctx, newTx.Data.Nonce, prevTx, newTx) + if err := p.queue.Put(ctx, newTx.Data.Nonce, prevTx, newTx); err != nil { + return fmt.Errorf("putting new tx in the queue: %w", err) + } + return nil } -func (p *DataPoster[Meta]) sendTx(ctx context.Context, prevTx *queuedTransaction[Meta], newTx *queuedTransaction[Meta]) error { - if prevTx != newTx { - err := p.saveTx(ctx, prevTx, newTx) - if err != nil { +func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransaction, newTx *storage.QueuedTransaction) error { + if prevTx == nil || (newTx.FullTx.Hash() != prevTx.FullTx.Hash()) { + if err := p.saveTx(ctx, prevTx, newTx); err != nil { return err } } - err := p.client.SendTransaction(ctx, newTx.FullTx) - if err != nil { - if strings.Contains(err.Error(), "already known") || strings.Contains(err.Error(), "nonce too low") { - log.Info("DataPoster transaction already known", "err", err, "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash()) - err = nil - } else { + if err := p.client.SendTransaction(ctx, newTx.FullTx); err != nil { + if !strings.Contains(err.Error(), "already known") && !strings.Contains(err.Error(), "nonce too low") { log.Warn("DataPoster failed to send transaction", "err", err, "nonce", newTx.FullTx.Nonce(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap()) return err } + log.Info("DataPoster transaction already known", "err", err, "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash()) } else { log.Info("DataPoster sent transaction", "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash(), "feeCap", newTx.FullTx.GasFeeCap()) } @@ -357,9 +512,9 @@ func (p *DataPoster[Meta]) sendTx(ctx context.Context, prevTx *queuedTransaction return p.saveTx(ctx, newTx, &newerTx) } -// the mutex must be held by the caller -func (p *DataPoster[Meta]) replaceTx(ctx context.Context, prevTx *queuedTransaction[Meta], backlogOfBatches uint64) error { - newFeeCap, newTipCap, err := p.getFeeAndTipCaps(ctx, prevTx.Data.Gas, prevTx.Data.GasFeeCap, prevTx.Data.GasTipCap, prevTx.Created, backlogOfBatches) +// The mutex must be held by the caller. +func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogOfBatches uint64) error { + newFeeCap, newTipCap, err := p.feeAndTipCaps(ctx, prevTx.Data.Nonce, prevTx.Data.Gas, prevTx.Data.GasFeeCap, prevTx.Data.GasTipCap, prevTx.Created, backlogOfBatches) if err != nil { return err } @@ -390,7 +545,7 @@ func (p *DataPoster[Meta]) replaceTx(ctx context.Context, prevTx *queuedTransact newTx.Sent = false newTx.Data.GasFeeCap = newFeeCap newTx.Data.GasTipCap = newTipCap - newTx.FullTx, err = p.auth.Signer(p.auth.From, types.NewTx(&newTx.Data)) + newTx.FullTx, err = p.signer(ctx, p.sender, types.NewTx(&newTx.Data)) if err != nil { return err } @@ -398,10 +553,12 @@ func (p *DataPoster[Meta]) replaceTx(ctx context.Context, prevTx *queuedTransact return p.sendTx(ctx, prevTx, &newTx) } -// the mutex must be held by the caller -func (p *DataPoster[Meta]) updateNonce(ctx context.Context) error { +// Gets latest known or finalized block header (depending on config flag), +// gets the nonce of the dataposter sender and stores it if it has increased. +// The mutex must be held by the caller. +func (p *DataPoster) updateNonce(ctx context.Context) error { var blockNumQuery *big.Int - if p.config().WaitForL1Finality { + if p.waitForL1Finality() { blockNumQuery = big.NewInt(int64(rpc.FinalizedBlockNumber)) } header, err := p.client.HeaderByNumber(ctx, blockNumQuery) @@ -411,39 +568,45 @@ func (p *DataPoster[Meta]) updateNonce(ctx context.Context) error { if p.lastBlock != nil && arbmath.BigEquals(p.lastBlock, header.Number) { return nil } - nonce, err := p.client.NonceAt(ctx, p.auth.From, header.Number) + nonce, err := p.client.NonceAt(ctx, p.sender, header.Number) if err != nil { if p.lastBlock != nil { - log.Warn("failed to get current nonce", "lastBlock", p.lastBlock, "newBlock", header.Number, "err", err) + log.Warn("Failed to get current nonce", "lastBlock", p.lastBlock, "newBlock", header.Number, "err", err) return nil } return err } - if nonce > p.nonce { - log.Info("data poster transactions confirmed", "previousNonce", p.nonce, "newNonce", nonce, "previousL1Block", p.lastBlock, "newL1Block", header.Number) - if len(p.errorCount) > 0 { - for x := p.nonce; x < nonce; x++ { - delete(p.errorCount, x) - } + // Ignore if nonce hasn't increased. + if nonce <= p.nonce { + // Still update last block number. + if nonce == p.nonce { + p.lastBlock = header.Number } - // We don't prune the most recent transaction in order to ensure that the data poster - // always has a reference point in its queue of the latest transaction nonce and metadata. - // nonce > 0 is implied by nonce > p.nonce, so this won't underflow. - err := p.queue.Prune(ctx, nonce-1) - if err != nil { - return err + return nil + } + log.Info("Data poster transactions confirmed", "previousNonce", p.nonce, "newNonce", nonce, "previousL1Block", p.lastBlock, "newL1Block", header.Number) + if len(p.errorCount) > 0 { + for x := p.nonce; x < nonce; x++ { + delete(p.errorCount, x) } } + // We don't prune the most recent transaction in order to ensure that the data poster + // always has a reference point in its queue of the latest transaction nonce and metadata. + // nonce > 0 is implied by nonce > p.nonce, so this won't underflow. + if err := p.queue.Prune(ctx, nonce-1); err != nil { + return err + } // We update these two variables together because they should remain in sync even if there's an error. p.lastBlock = header.Number p.nonce = nonce return nil } -func (p *DataPoster[Meta]) updateBalance(ctx context.Context) error { +// Updates dataposter balance to balance at pending block. +func (p *DataPoster) updateBalance(ctx context.Context) error { // Use the pending (representated as -1) balance because we're looking at batches we'd post, // so we want to see how much gas we could afford with our pending state. - balance, err := p.client.BalanceAt(ctx, p.auth.From, big.NewInt(-1)) + balance, err := p.client.BalanceAt(ctx, p.sender, big.NewInt(-1)) if err != nil { return err } @@ -453,14 +616,14 @@ func (p *DataPoster[Meta]) updateBalance(ctx context.Context) error { const maxConsecutiveIntermittentErrors = 10 -func (p *DataPoster[Meta]) maybeLogError(err error, tx *queuedTransaction[Meta], msg string) { +func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg string) { nonce := tx.Data.Nonce if err == nil { delete(p.errorCount, nonce) return } logLevel := log.Error - if errors.Is(err, ErrStorageRace) { + if errors.Is(err, storage.ErrStorageRace) { p.errorCount[nonce]++ if p.errorCount[nonce] <= maxConsecutiveIntermittentErrors { logLevel = log.Debug @@ -468,12 +631,13 @@ func (p *DataPoster[Meta]) maybeLogError(err error, tx *queuedTransaction[Meta], } else { delete(p.errorCount, nonce) } - logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.Data.GasFeeCap, "tipCap", tx.Data.GasTipCap) + logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.Data.GasFeeCap, "tipCap", tx.Data.GasTipCap, "gas", tx.Data.Gas) } const minWait = time.Second * 10 -func (p *DataPoster[Meta]) Start(ctxIn context.Context) { +// Tries to acquire redis lock, updates balance and nonce, +func (p *DataPoster) Start(ctxIn context.Context) { p.StopWaiter.Start(ctxIn, p) p.CallIteratively(func(ctx context.Context) time.Duration { p.mutex.Lock() @@ -497,17 +661,17 @@ func (p *DataPoster[Meta]) Start(ctxIn context.Context) { if maxTxsToRbf == 0 { maxTxsToRbf = 512 } - unconfirmedNonce, err := p.client.NonceAt(ctx, p.auth.From, nil) + unconfirmedNonce, err := p.client.NonceAt(ctx, p.sender, nil) if err != nil { - log.Warn("failed to get latest nonce", "err", err) + log.Warn("Failed to get latest nonce", "err", err) return minWait } // We use unconfirmedNonce here to replace-by-fee transactions that aren't in a block, // excluding those that are in an unconfirmed block. If a reorg occurs, we'll continue // replacing them by fee. - queueContents, err := p.queue.GetContents(ctx, unconfirmedNonce, maxTxsToRbf) + queueContents, err := p.queue.FetchContents(ctx, unconfirmedNonce, maxTxsToRbf) if err != nil { - log.Warn("failed to get tx queue contents", "err", err) + log.Error("Failed to fetch tx queue contents", "err", err) return minWait } for index, tx := range queueContents { @@ -539,3 +703,156 @@ func (p *DataPoster[Meta]) Start(ctxIn context.Context) { return wait }) } + +// Implements queue-alike storage that can +// - Insert item at specified index +// - Update item with the condition that existing value equals assumed value +// - Delete all the items up to specified index (prune) +// - Calculate length +// Note: one of the implementation of this interface (Redis storage) does not +// support duplicate values. +type QueueStorage interface { + // Returns at most maxResults items starting from specified index. + FetchContents(ctx context.Context, startingIndex uint64, maxResults uint64) ([]*storage.QueuedTransaction, error) + // Returns item with the biggest index. + FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) + // Prunes items up to (excluding) specified index. + Prune(ctx context.Context, until uint64) error + // Inserts new item at specified index if previous value matches specified value. + Put(ctx context.Context, index uint64, prevItem, newItem *storage.QueuedTransaction) error + // Returns the size of a queue. + Length(ctx context.Context) (int, error) + // Indicates whether queue stored at disk. + IsPersistent() bool +} + +type DataPosterConfig struct { + RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` + ReplacementTimes string `koanf:"replacement-times"` + // This is forcibly disabled if the parent chain is an Arbitrum chain, + // so you should probably use DataPoster's waitForL1Finality method instead of reading this field directly. + WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` + MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` + MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` + TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` + UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` + MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` + MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` + MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` + NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` + AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` + UseDBStorage bool `koanf:"use-db-storage"` + UseNoOpStorage bool `koanf:"use-noop-storage"` + LegacyStorageEncoding bool `koanf:"legacy-storage-encoding" reload:"hot"` + Dangerous DangerousConfig `koanf:"dangerous"` + ExternalSigner ExternalSignerCfg `koanf:"external-signer"` +} + +type ExternalSignerCfg struct { + // URL of the external signer rpc server, if set this overrides transaction + // options and uses external signer + // for signing transactions. + URL string `koanf:"url"` + // Hex encoded ethereum address of the external signer. + Address string `koanf:"address"` + // API method name (e.g. eth_signTransaction). + Method string `koanf:"method"` + // (Optional) Path to the external signer root CA certificate. + // This allows us to use self-signed certificats on the external signer. + RootCA string `koanf:"root-ca"` + // (Optional) Client certificate for mtls. + ClientCert string `koanf:"client-cert"` + // (Optional) Client certificate key for mtls. + // This is required when client-cert is set. + ClientPrivateKey string `koanf:"client-private-key"` +} + +type DangerousConfig struct { + // This should be used with caution, only when dataposter somehow gets in a + // bad state and we require clearing it. + ClearDBStorage bool `koanf:"clear-dbstorage"` +} + +// ConfigFetcher function type is used instead of directly passing config so +// that flags can be reloaded dynamically. +type ConfigFetcher func() *DataPosterConfig + +func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet, defaultDataPosterConfig DataPosterConfig) { + f.String(prefix+".replacement-times", defaultDataPosterConfig.ReplacementTimes, "comma-separated list of durations since first posting to attempt a replace-by-fee") + f.Bool(prefix+".wait-for-l1-finality", defaultDataPosterConfig.WaitForL1Finality, "only treat a transaction as confirmed after L1 finality has been achieved (recommended)") + f.Uint64(prefix+".max-mempool-transactions", defaultDataPosterConfig.MaxMempoolTransactions, "the maximum number of transactions to have queued in the mempool at once (0 = unlimited)") + f.Int(prefix+".max-queued-transactions", defaultDataPosterConfig.MaxQueuedTransactions, "the maximum number of unconfirmed transactions to track at once (0 = unlimited)") + f.Float64(prefix+".target-price-gwei", defaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") + f.Float64(prefix+".urgency-gwei", defaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") + f.Float64(prefix+".min-fee-cap-gwei", defaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") + f.Float64(prefix+".min-tip-cap-gwei", defaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") + f.Float64(prefix+".max-tip-cap-gwei", defaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") + f.Uint64(prefix+".nonce-rbf-soft-confs", defaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") + f.Bool(prefix+".allocate-mempool-balance", defaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") + f.Bool(prefix+".use-db-storage", defaultDataPosterConfig.UseDBStorage, "uses database storage when enabled") + f.Bool(prefix+".use-noop-storage", defaultDataPosterConfig.UseNoOpStorage, "uses noop storage, it doesn't store anything") + f.Bool(prefix+".legacy-storage-encoding", defaultDataPosterConfig.LegacyStorageEncoding, "encodes items in a legacy way (as it was before dropping generics)") + + signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f) + addDangerousOptions(prefix+".dangerous", f) + addExternalSignerOptions(prefix+".external-signer", f) +} + +func addDangerousOptions(prefix string, f *pflag.FlagSet) { + f.Bool(prefix+".clear-dbstorage", DefaultDataPosterConfig.Dangerous.ClearDBStorage, "clear database storage") +} + +func addExternalSignerOptions(prefix string, f *pflag.FlagSet) { + f.String(prefix+".url", DefaultDataPosterConfig.ExternalSigner.URL, "external signer url") + f.String(prefix+".address", DefaultDataPosterConfig.ExternalSigner.Address, "external signer address") + f.String(prefix+".method", DefaultDataPosterConfig.ExternalSigner.Method, "external signer method") + f.String(prefix+".root-ca", DefaultDataPosterConfig.ExternalSigner.RootCA, "external signer root CA") + f.String(prefix+".client-cert", DefaultDataPosterConfig.ExternalSigner.ClientCert, "rpc client cert") + f.String(prefix+".client-private-key", DefaultDataPosterConfig.ExternalSigner.ClientPrivateKey, "rpc client private key") +} + +var DefaultDataPosterConfig = DataPosterConfig{ + ReplacementTimes: "5m,10m,20m,30m,1h,2h,4h,6h,8h,12h,16h,18h,20h,22h", + WaitForL1Finality: true, + TargetPriceGwei: 60., + UrgencyGwei: 2., + MaxMempoolTransactions: 10, + MinTipCapGwei: 0.05, + MaxTipCapGwei: 5, + NonceRbfSoftConfs: 1, + AllocateMempoolBalance: true, + UseDBStorage: true, + UseNoOpStorage: false, + LegacyStorageEncoding: false, + Dangerous: DangerousConfig{ClearDBStorage: false}, + ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction"}, +} + +var DefaultDataPosterConfigForValidator = func() DataPosterConfig { + config := DefaultDataPosterConfig + config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + return config +}() + +var TestDataPosterConfig = DataPosterConfig{ + ReplacementTimes: "1s,2s,5s,10s,20s,30s,1m,5m", + RedisSigner: signature.TestSimpleHmacConfig, + WaitForL1Finality: false, + TargetPriceGwei: 60., + UrgencyGwei: 2., + MaxMempoolTransactions: 10, + MinTipCapGwei: 0.05, + MaxTipCapGwei: 5, + NonceRbfSoftConfs: 1, + AllocateMempoolBalance: true, + UseDBStorage: false, + UseNoOpStorage: false, + LegacyStorageEncoding: false, + ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction"}, +} + +var TestDataPosterConfigForValidator = func() DataPosterConfig { + config := TestDataPosterConfig + config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + return config +}() diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go new file mode 100644 index 0000000000..d4d72bbbf4 --- /dev/null +++ b/arbnode/dataposter/dataposter_test.go @@ -0,0 +1,244 @@ +package dataposter + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "math/big" + "net/http" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/signer/core/apitypes" + "github.com/google/go-cmp/cmp" +) + +func TestParseReplacementTimes(t *testing.T) { + for _, tc := range []struct { + desc, replacementTimes string + want []time.Duration + wantErr bool + }{ + { + desc: "valid case", + replacementTimes: "1s,2s,1m,5m", + want: []time.Duration{ + time.Duration(time.Second), + time.Duration(2 * time.Second), + time.Duration(time.Minute), + time.Duration(5 * time.Minute), + time.Duration(time.Hour * 24 * 365 * 10), + }, + }, + { + desc: "non-increasing replacement times", + replacementTimes: "1s,2s,1m,5m,1s", + wantErr: true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + got, err := parseReplacementTimes(tc.replacementTimes) + if gotErr := (err != nil); gotErr != tc.wantErr { + t.Fatalf("Got error: %t, want: %t", gotErr, tc.wantErr) + } + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("parseReplacementTimes(%s) unexpected diff:\n%s", tc.replacementTimes, diff) + } + }) + } +} + +func TestExternalSigner(t *testing.T) { + ctx := context.Background() + httpSrv, srv := newServer(ctx, t) + t.Cleanup(func() { + if err := httpSrv.Shutdown(ctx); err != nil { + t.Fatalf("Error shutting down http server: %v", err) + } + }) + cert, key := "./testdata/localhost.crt", "./testdata/localhost.key" + go func() { + fmt.Println("Server is listening on port 1234...") + if err := httpSrv.ListenAndServeTLS(cert, key); err != nil && err != http.ErrServerClosed { + t.Errorf("ListenAndServeTLS() unexpected error: %v", err) + return + } + }() + signer, addr, err := externalSigner(ctx, + &ExternalSignerCfg{ + Address: srv.address.Hex(), + URL: "https://localhost:1234", + Method: "test_signTransaction", + RootCA: cert, + ClientCert: "./testdata/client.crt", + ClientPrivateKey: "./testdata/client.key", + }) + if err != nil { + t.Fatalf("Error getting external signer: %v", err) + } + tx := types.NewTransaction(13, common.HexToAddress("0x01"), big.NewInt(1), 2, big.NewInt(3), []byte{0x01, 0x02, 0x03}) + got, err := signer(ctx, addr, tx) + if err != nil { + t.Fatalf("Error signing transaction with external signer: %v", err) + } + want, err := srv.signerFn(addr, tx) + if err != nil { + t.Fatalf("Error signing transaction: %v", err) + } + if diff := cmp.Diff(want.Hash(), got.Hash()); diff != "" { + t.Errorf("Signing transaction: unexpected diff: %v\n", diff) + } +} + +type server struct { + handlers map[string]func(*json.RawMessage) (string, error) + signerFn bind.SignerFn + address common.Address +} + +type request struct { + ID *json.RawMessage `json:"id"` + Method string `json:"method"` + Params *json.RawMessage `json:"params"` +} + +type response struct { + ID *json.RawMessage `json:"id"` + Result string `json:"result,omitempty"` +} + +// newServer returns http server and server struct that implements RPC methods. +// It sets up an account in temporary directory and cleans up after test is +// done. +func newServer(ctx context.Context, t *testing.T) (*http.Server, *server) { + t.Helper() + signer, address, err := setupAccount("/tmp/keystore") + if err != nil { + t.Fatalf("Error setting up account: %v", err) + } + t.Cleanup(func() { os.RemoveAll("/tmp/keystore") }) + + s := &server{signerFn: signer, address: address} + s.handlers = map[string]func(*json.RawMessage) (string, error){ + "test_signTransaction": s.signTransaction, + } + m := http.NewServeMux() + + clientCert, err := os.ReadFile("./testdata/client.crt") + if err != nil { + t.Fatalf("Error reading client certificate: %v", err) + } + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(clientCert) + + httpSrv := &http.Server{ + Addr: ":1234", + Handler: m, + ReadTimeout: 5 * time.Second, + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: pool, + }, + } + m.HandleFunc("/", s.mux) + return httpSrv, s +} + +// setupAccount creates a new account in a given directory, unlocks it, creates +// signer with that account and returns it along with account address. +func setupAccount(dir string) (bind.SignerFn, common.Address, error) { + ks := keystore.NewKeyStore( + dir, + keystore.StandardScryptN, + keystore.StandardScryptP, + ) + a, err := ks.NewAccount("password") + if err != nil { + return nil, common.Address{}, fmt.Errorf("creating account account: %w", err) + } + if err := ks.Unlock(a, "password"); err != nil { + return nil, common.Address{}, fmt.Errorf("unlocking account: %w", err) + } + txOpts, err := bind.NewKeyStoreTransactorWithChainID(ks, a, big.NewInt(1)) + if err != nil { + return nil, common.Address{}, fmt.Errorf("creating transactor: %w", err) + } + return txOpts.Signer, a.Address, nil +} + +// UnmarshallFirst unmarshalls slice of params and returns the first one. +// Parameters in Go ethereum RPC calls are marashalled as slices. E.g. +// eth_sendRawTransaction or eth_signTransaction, marshall transaction as a +// slice of transactions in a message: +// https://github.com/ethereum/go-ethereum/blob/0004c6b229b787281760b14fb9460ffd9c2496f1/rpc/client.go#L548 +func unmarshallFirst(params []byte) (*types.Transaction, error) { + var arr []apitypes.SendTxArgs + if err := json.Unmarshal(params, &arr); err != nil { + return nil, fmt.Errorf("unmarshaling first param: %w", err) + } + if len(arr) != 1 { + return nil, fmt.Errorf("argument should be a single transaction, but got: %d", len(arr)) + } + return arr[0].ToTransaction(), nil +} + +func (s *server) signTransaction(params *json.RawMessage) (string, error) { + tx, err := unmarshallFirst(*params) + if err != nil { + return "", err + } + signedTx, err := s.signerFn(s.address, tx) + if err != nil { + return "", fmt.Errorf("signing transaction: %w", err) + } + data, err := rlp.EncodeToBytes(signedTx) + if err != nil { + return "", fmt.Errorf("rlp encoding transaction: %w", err) + } + return hexutil.Encode(data), nil +} + +func (s *server) mux(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "can't read body", http.StatusBadRequest) + return + } + var req request + if err := json.Unmarshal(body, &req); err != nil { + http.Error(w, "can't unmarshal JSON request", http.StatusBadRequest) + return + } + method, ok := s.handlers[req.Method] + if !ok { + http.Error(w, "method not found", http.StatusNotFound) + return + } + result, err := method(req.Params) + if err != nil { + fmt.Printf("error calling method: %v\n", err) + http.Error(w, "error calling method", http.StatusInternalServerError) + return + } + resp := response{ID: req.ID, Result: result} + respBytes, err := json.Marshal(resp) + if err != nil { + http.Error(w, fmt.Sprintf("error encoding response: %v", err), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + if _, err := w.Write(respBytes); err != nil { + fmt.Printf("error writing response: %v\n", err) + } +} diff --git a/arbnode/dataposter/dbstorage/storage.go b/arbnode/dataposter/dbstorage/storage.go new file mode 100644 index 0000000000..f2b1854492 --- /dev/null +++ b/arbnode/dataposter/dbstorage/storage.go @@ -0,0 +1,192 @@ +// Copyright 2021-2023, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package dbstorage + +import ( + "bytes" + "context" + "errors" + "fmt" + "strconv" + + "github.com/cockroachdb/pebble" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" + "github.com/syndtr/goleveldb/leveldb" +) + +// Storage implements db based storage for batch poster. +type Storage struct { + db ethdb.Database + encDec storage.EncoderDecoderF +} + +var ( + // Value at this index holds the *index* of last item. + // Keys that we never want to be accidentally deleted by "Prune()" should be + // lexicographically less than minimum index (that is "0"), hence the prefix + // ".". + lastItemIdxKey = []byte(".last_item_idx_key") + countKey = []byte(".count_key") +) + +func New(db ethdb.Database, enc storage.EncoderDecoderF) *Storage { + return &Storage{db: db, encDec: enc} +} + +func idxToKey(idx uint64) []byte { + return []byte(fmt.Sprintf("%020d", idx)) +} + +func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResults uint64) ([]*storage.QueuedTransaction, error) { + var res []*storage.QueuedTransaction + it := s.db.NewIterator([]byte(""), idxToKey(startingIndex)) + defer it.Release() + for i := 0; i < int(maxResults); i++ { + if !it.Next() { + break + } + item, err := s.encDec().Decode(it.Value()) + if err != nil { + return nil, err + } + res = append(res, item) + } + return res, it.Error() +} + +func (s *Storage) lastItemIdx(context.Context) ([]byte, error) { + return s.db.Get(lastItemIdxKey) +} + +func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) { + size, err := s.Length(ctx) + if err != nil { + return nil, err + } + if size == 0 { + return nil, nil + } + lastItemIdx, err := s.lastItemIdx(ctx) + if err != nil { + return nil, fmt.Errorf("getting last item index: %w", err) + } + val, err := s.db.Get(lastItemIdx) + if err != nil { + return nil, err + } + return s.encDec().Decode(val) +} + +func (s *Storage) PruneAll(ctx context.Context) error { + idx, err := s.lastItemIdx(ctx) + if err != nil { + return fmt.Errorf("pruning all keys: %w", err) + } + until, err := strconv.Atoi(string(idx)) + if err != nil { + return fmt.Errorf("converting last item index bytes to integer: %w", err) + } + return s.Prune(ctx, uint64(until+1)) +} + +func (s *Storage) Prune(ctx context.Context, until uint64) error { + cnt, err := s.Length(ctx) + if err != nil { + return err + } + end := idxToKey(until) + it := s.db.NewIterator([]byte{}, idxToKey(0)) + defer it.Release() + b := s.db.NewBatch() + for it.Next() { + if bytes.Compare(it.Key(), end) >= 0 { + break + } + if err := b.Delete(it.Key()); err != nil { + return fmt.Errorf("deleting key: %w", err) + } + cnt-- + } + if err := b.Put(countKey, []byte(strconv.Itoa(cnt))); err != nil { + return fmt.Errorf("updating length counter: %w", err) + } + return b.Write() +} + +// valueAt gets returns the value at key. If it doesn't exist then it returns +// encoded bytes of nil. +func (s *Storage) valueAt(_ context.Context, key []byte) ([]byte, error) { + val, err := s.db.Get(key) + if err != nil { + if isErrNotFound(err) { + return s.encDec().Encode((*storage.QueuedTransaction)(nil)) + } + return nil, err + } + return val, nil +} + +func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.QueuedTransaction) error { + key := idxToKey(index) + stored, err := s.valueAt(ctx, key) + if err != nil { + return err + } + prevEnc, err := s.encDec().Encode(prev) + if err != nil { + return fmt.Errorf("encoding previous item: %w", err) + } + if !bytes.Equal(stored, prevEnc) { + return fmt.Errorf("replacing different item than expected at index: %v, stored: %v, prevEnc: %v", index, stored, prevEnc) + } + newEnc, err := s.encDec().Encode(new) + if err != nil { + return fmt.Errorf("encoding new item: %w", err) + } + b := s.db.NewBatch() + cnt, err := s.Length(ctx) + if err != nil { + return err + } + if err := b.Put(key, newEnc); err != nil { + return fmt.Errorf("updating value at: %v: %w", key, err) + } + lastItemIdx, err := s.lastItemIdx(ctx) + if err != nil && !isErrNotFound(err) { + return err + } + if isErrNotFound(err) { + lastItemIdx = []byte{} + } + if cnt == 0 || bytes.Compare(key, lastItemIdx) > 0 { + if err := b.Put(lastItemIdxKey, key); err != nil { + return fmt.Errorf("updating last item: %w", err) + } + if err := b.Put(countKey, []byte(strconv.Itoa(cnt+1))); err != nil { + return fmt.Errorf("updating length counter: %w", err) + } + } + return b.Write() +} + +func (s *Storage) Length(context.Context) (int, error) { + val, err := s.db.Get(countKey) + if err != nil { + if isErrNotFound(err) { + return 0, nil + } + return 0, err + } + return strconv.Atoi(string(val)) +} + +func (s *Storage) IsPersistent() bool { + return true +} + +func isErrNotFound(err error) bool { + return errors.Is(err, leveldb.ErrNotFound) || errors.Is(err, pebble.ErrNotFound) || errors.Is(err, memorydb.ErrMemorydbNotFound) +} diff --git a/arbnode/dataposter/noop/storage.go b/arbnode/dataposter/noop/storage.go new file mode 100644 index 0000000000..b3947bcaa0 --- /dev/null +++ b/arbnode/dataposter/noop/storage.go @@ -0,0 +1,37 @@ +// Copyright 2021-2023, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE +package noop + +import ( + "context" + + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" +) + +// Storage implements noop storage for dataposter. This is for clients that want +// to have option to directly post to geth without keeping state. +type Storage struct{} + +func (s *Storage) FetchContents(_ context.Context, _, _ uint64) ([]*storage.QueuedTransaction, error) { + return nil, nil +} + +func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) { + return nil, nil +} + +func (s *Storage) Prune(_ context.Context, _ uint64) error { + return nil +} + +func (s *Storage) Put(_ context.Context, _ uint64, _, _ *storage.QueuedTransaction) error { + return nil +} + +func (s *Storage) Length(context.Context) (int, error) { + return 0, nil +} + +func (s *Storage) IsPersistent() bool { + return false +} diff --git a/arbnode/dataposter/redis_storage.go b/arbnode/dataposter/redis/redisstorage.go similarity index 60% rename from arbnode/dataposter/redis_storage.go rename to arbnode/dataposter/redis/redisstorage.go index df3e894539..f2393611b2 100644 --- a/arbnode/dataposter/redis_storage.go +++ b/arbnode/dataposter/redis/redisstorage.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package dataposter +package redis import ( "bytes" @@ -9,24 +9,28 @@ import ( "errors" "fmt" - "github.com/ethereum/go-ethereum/rlp" "github.com/go-redis/redis/v8" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/util/signature" ) -// RedisStorage requires that Item is RLP encodable/decodable -type RedisStorage[Item any] struct { +// Storage implements redis sorted set backed storage. It does not support +// duplicate keys or values. That is, putting the same element on different +// indexes will not yield expected behavior. +// More at: https://redis.io/commands/zadd/. +type Storage struct { client redis.UniversalClient signer *signature.SimpleHmac key string + encDec storage.EncoderDecoderF } -func NewRedisStorage[Item any](client redis.UniversalClient, key string, signerConf *signature.SimpleHmacConfig) (*RedisStorage[Item], error) { +func NewStorage(client redis.UniversalClient, key string, signerConf *signature.SimpleHmacConfig, enc storage.EncoderDecoderF) (*Storage, error) { signer, err := signature.NewSimpleHmac(signerConf) if err != nil { return nil, err } - return &RedisStorage[Item]{client, signer, key}, nil + return &Storage{client, signer, key, enc}, nil } func joinHmacMsg(msg []byte, sig []byte) ([]byte, error) { @@ -36,7 +40,7 @@ func joinHmacMsg(msg []byte, sig []byte) ([]byte, error) { return append(sig, msg...), nil } -func (s *RedisStorage[Item]) peelVerifySignature(data []byte) ([]byte, error) { +func (s *Storage) peelVerifySignature(data []byte) ([]byte, error) { if len(data) < 32 { return nil, errors.New("data is too short to contain message signature") } @@ -48,7 +52,7 @@ func (s *RedisStorage[Item]) peelVerifySignature(data []byte) ([]byte, error) { return data[32:], nil } -func (s *RedisStorage[Item]) GetContents(ctx context.Context, startingIndex uint64, maxResults uint64) ([]*Item, error) { +func (s *Storage) FetchContents(ctx context.Context, startingIndex uint64, maxResults uint64) ([]*storage.QueuedTransaction, error) { query := redis.ZRangeArgs{ Key: s.key, ByScore: true, @@ -59,23 +63,22 @@ func (s *RedisStorage[Item]) GetContents(ctx context.Context, startingIndex uint if err != nil { return nil, err } - var items []*Item + var items []*storage.QueuedTransaction for _, itemString := range itemStrings { - var item Item data, err := s.peelVerifySignature([]byte(itemString)) if err != nil { return nil, err } - err = rlp.DecodeBytes(data, &item) + item, err := s.encDec().Decode(data) if err != nil { return nil, err } - items = append(items, &item) + items = append(items, item) } return items, nil } -func (s *RedisStorage[Item]) GetLast(ctx context.Context) (*Item, error) { +func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) { query := redis.ZRangeArgs{ Key: s.key, Start: 0, @@ -89,33 +92,41 @@ func (s *RedisStorage[Item]) GetLast(ctx context.Context) (*Item, error) { if len(itemStrings) > 1 { return nil, fmt.Errorf("expected only one return value for GetLast but got %v", len(itemStrings)) } - var ret *Item + var ret *storage.QueuedTransaction if len(itemStrings) > 0 { - var item Item data, err := s.peelVerifySignature([]byte(itemStrings[0])) if err != nil { return nil, err } - err = rlp.DecodeBytes(data, &item) + item, err := s.encDec().Decode(data) if err != nil { return nil, err } - ret = &item + ret = item } return ret, nil } -func (s *RedisStorage[Item]) Prune(ctx context.Context, keepStartingAt uint64) error { - if keepStartingAt > 0 { - return s.client.ZRemRangeByScore(ctx, s.key, "-inf", fmt.Sprintf("%v", keepStartingAt-1)).Err() +func (s *Storage) Prune(ctx context.Context, until uint64) error { + if until > 0 { + return s.client.ZRemRangeByScore(ctx, s.key, "-inf", fmt.Sprintf("%v", until-1)).Err() } return nil } -var ErrStorageRace = errors.New("storage race error") +// normalizeDecoding decodes data (regardless of what encoding it used), and +// encodes it according to current encoding for storage. +// As a result, encoded data is transformed to currently used encoding. +func (s *Storage) normalizeDecoding(data []byte) ([]byte, error) { + item, err := s.encDec().Decode(data) + if err != nil { + return nil, err + } + return s.encDec().Encode(item) +} -func (s *RedisStorage[Item]) Put(ctx context.Context, index uint64, prevItem *Item, newItem *Item) error { - if newItem == nil { +func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.QueuedTransaction) error { + if new == nil { return fmt.Errorf("tried to insert nil item at index %v", index) } action := func(tx *redis.Tx) error { @@ -131,32 +142,35 @@ func (s *RedisStorage[Item]) Put(ctx context.Context, index uint64, prevItem *It } pipe := tx.TxPipeline() if len(haveItems) == 0 { - if prevItem != nil { - return fmt.Errorf("%w: tried to replace item at index %v but no item exists there", ErrStorageRace, index) + if prev != nil { + return fmt.Errorf("%w: tried to replace item at index %v but no item exists there", storage.ErrStorageRace, index) } } else if len(haveItems) == 1 { - if prevItem == nil { - return fmt.Errorf("%w: tried to insert new item at index %v but an item exists there", ErrStorageRace, index) + if prev == nil { + return fmt.Errorf("%w: tried to insert new item at index %v but an item exists there", storage.ErrStorageRace, index) } verifiedItem, err := s.peelVerifySignature([]byte(haveItems[0])) if err != nil { return fmt.Errorf("failed to validate item already in redis at index%v: %w", index, err) } - prevItemEncoded, err := rlp.EncodeToBytes(prevItem) + verifiedItem, err = s.normalizeDecoding(verifiedItem) + if err != nil { + return fmt.Errorf("error normalizing encoding for verified item: %w", err) + } + prevItemEncoded, err := s.encDec().Encode(prev) if err != nil { return err } if !bytes.Equal(verifiedItem, prevItemEncoded) { - return fmt.Errorf("%w: replacing different item than expected at index %v", ErrStorageRace, index) + return fmt.Errorf("%w: replacing different item than expected at index %v", storage.ErrStorageRace, index) } - err = pipe.ZRem(ctx, s.key, haveItems[0]).Err() - if err != nil { + if err := pipe.ZRem(ctx, s.key, haveItems[0]).Err(); err != nil { return err } } else { return fmt.Errorf("expected only one return value for Put but got %v", len(haveItems)) } - newItemEncoded, err := rlp.EncodeToBytes(*newItem) + newItemEncoded, err := s.encDec().Encode(new) if err != nil { return err } @@ -168,18 +182,17 @@ func (s *RedisStorage[Item]) Put(ctx context.Context, index uint64, prevItem *It if err != nil { return err } - err = pipe.ZAdd(ctx, s.key, &redis.Z{ + if err := pipe.ZAdd(ctx, s.key, &redis.Z{ Score: float64(index), Member: string(signedItem), - }).Err() - if err != nil { + }).Err(); err != nil { return err } _, err = pipe.Exec(ctx) if errors.Is(err, redis.TxFailedErr) { // Unfortunately, we can't wrap two errors. //nolint:errorlint - err = fmt.Errorf("%w: %v", ErrStorageRace, err.Error()) + err = fmt.Errorf("%w: %v", storage.ErrStorageRace, err.Error()) } return err } @@ -187,7 +200,7 @@ func (s *RedisStorage[Item]) Put(ctx context.Context, index uint64, prevItem *It return s.client.Watch(ctx, action, s.key) } -func (s *RedisStorage[Item]) Length(ctx context.Context) (int, error) { +func (s *Storage) Length(ctx context.Context) (int, error) { count, err := s.client.ZCount(ctx, s.key, "-inf", "+inf").Result() if err != nil { return 0, err @@ -195,6 +208,6 @@ func (s *RedisStorage[Item]) Length(ctx context.Context) (int, error) { return int(count), nil } -func (s *RedisStorage[Item]) IsPersistent() bool { +func (s *Storage) IsPersistent() bool { return true } diff --git a/arbnode/dataposter/slice/slicestorage.go b/arbnode/dataposter/slice/slicestorage.go new file mode 100644 index 0000000000..04286df411 --- /dev/null +++ b/arbnode/dataposter/slice/slicestorage.go @@ -0,0 +1,108 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package slice + +import ( + "bytes" + "context" + "errors" + "fmt" + + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" +) + +type Storage struct { + firstNonce uint64 + queue [][]byte + encDec func() storage.EncoderDecoderInterface +} + +func NewStorage(encDec func() storage.EncoderDecoderInterface) *Storage { + return &Storage{encDec: encDec} +} + +func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResults uint64) ([]*storage.QueuedTransaction, error) { + txs := s.queue + if startingIndex >= s.firstNonce+uint64(len(s.queue)) || maxResults == 0 { + return nil, nil + } + if startingIndex > s.firstNonce { + txs = txs[startingIndex-s.firstNonce:] + } + if uint64(len(txs)) > maxResults { + txs = txs[:maxResults] + } + var res []*storage.QueuedTransaction + for _, r := range txs { + item, err := s.encDec().Decode(r) + if err != nil { + return nil, err + } + res = append(res, item) + } + return res, nil +} + +func (s *Storage) FetchLast(context.Context) (*storage.QueuedTransaction, error) { + if len(s.queue) == 0 { + return nil, nil + } + return s.encDec().Decode(s.queue[len(s.queue)-1]) +} + +func (s *Storage) Prune(_ context.Context, until uint64) error { + if until >= s.firstNonce+uint64(len(s.queue)) { + s.queue = nil + } else if until >= s.firstNonce { + s.queue = s.queue[until-s.firstNonce:] + s.firstNonce = until + } + return nil +} + +func (s *Storage) Put(_ context.Context, index uint64, prev, new *storage.QueuedTransaction) error { + if new == nil { + return fmt.Errorf("tried to insert nil item at index %v", index) + } + newEnc, err := s.encDec().Encode(new) + if err != nil { + return fmt.Errorf("encoding new item: %w", err) + } + if len(s.queue) == 0 { + if prev != nil { + return errors.New("prevItem isn't nil but queue is empty") + } + s.queue = append(s.queue, newEnc) + s.firstNonce = index + } else if index == s.firstNonce+uint64(len(s.queue)) { + if prev != nil { + return errors.New("prevItem isn't nil but item is just after end of queue") + } + s.queue = append(s.queue, newEnc) + } else if index >= s.firstNonce { + queueIdx := int(index - s.firstNonce) + if queueIdx > len(s.queue) { + return fmt.Errorf("attempted to set out-of-bounds index %v in queue starting at %v of length %v", index, s.firstNonce, len(s.queue)) + } + prevEnc, err := s.encDec().Encode(prev) + if err != nil { + return fmt.Errorf("encoding previous item: %w", err) + } + if !bytes.Equal(prevEnc, s.queue[queueIdx]) { + return fmt.Errorf("replacing different item than expected at index: %v, stored: %v, prevEnc: %v", index, s.queue[queueIdx], prevEnc) + } + s.queue[queueIdx] = newEnc + } else { + return fmt.Errorf("attempted to set too low index %v in queue starting at %v", index, s.firstNonce) + } + return nil +} + +func (s *Storage) Length(context.Context) (int, error) { + return len(s.queue), nil +} + +func (s *Storage) IsPersistent() bool { + return false +} diff --git a/arbnode/dataposter/slice_storage.go b/arbnode/dataposter/slice_storage.go deleted file mode 100644 index 4364523d99..0000000000 --- a/arbnode/dataposter/slice_storage.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package dataposter - -import ( - "context" - "errors" - "fmt" -) - -type SliceStorage[Item any] struct { - firstNonce uint64 - queue []*Item -} - -func NewSliceStorage[Item any]() *SliceStorage[Item] { - return &SliceStorage[Item]{} -} - -func (s *SliceStorage[Item]) GetContents(ctx context.Context, startingIndex uint64, maxResults uint64) ([]*Item, error) { - ret := s.queue - if startingIndex >= s.firstNonce+uint64(len(s.queue)) { - ret = nil - } else if startingIndex > s.firstNonce { - ret = ret[startingIndex-s.firstNonce:] - } - if uint64(len(ret)) > maxResults { - ret = ret[:maxResults] - } - return ret, nil -} - -func (s *SliceStorage[Item]) GetLast(ctx context.Context) (*Item, error) { - if len(s.queue) == 0 { - return nil, nil - } - return s.queue[len(s.queue)-1], nil -} - -func (s *SliceStorage[Item]) Prune(ctx context.Context, keepStartingAt uint64) error { - if keepStartingAt >= s.firstNonce+uint64(len(s.queue)) { - s.queue = nil - } else if keepStartingAt >= s.firstNonce { - s.queue = s.queue[keepStartingAt-s.firstNonce:] - s.firstNonce = keepStartingAt - } - return nil -} - -func (s *SliceStorage[Item]) Put(ctx context.Context, index uint64, prevItem *Item, newItem *Item) error { - if newItem == nil { - return fmt.Errorf("tried to insert nil item at index %v", index) - } - if len(s.queue) == 0 { - if prevItem != nil { - return errors.New("prevItem isn't nil but queue is empty") - } - s.queue = append(s.queue, newItem) - s.firstNonce = index - } else if index == s.firstNonce+uint64(len(s.queue)) { - if prevItem != nil { - return errors.New("prevItem isn't nil but item is just after end of queue") - } - s.queue = append(s.queue, newItem) - } else if index >= s.firstNonce { - queueIdx := int(index - s.firstNonce) - if queueIdx > len(s.queue) { - return fmt.Errorf("attempted to set out-of-bounds index %v in queue starting at %v of length %v", index, s.firstNonce, len(s.queue)) - } - if prevItem != s.queue[queueIdx] { - return errors.New("prevItem isn't nil but item is just after end of queue") - } - s.queue[queueIdx] = newItem - } else { - return fmt.Errorf("attempted to set too low index %v in queue starting at %v", index, s.firstNonce) - } - return nil -} - -func (s *SliceStorage[Item]) Length(ctx context.Context) (int, error) { - return len(s.queue), nil -} - -func (s *SliceStorage[Item]) IsPersistent() bool { - return false -} diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go new file mode 100644 index 0000000000..70637c48e0 --- /dev/null +++ b/arbnode/dataposter/storage/storage.go @@ -0,0 +1,151 @@ +package storage + +import ( + "errors" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbutil" +) + +var ( + ErrStorageRace = errors.New("storage race error") + + BlockValidatorPrefix string = "v" // the prefix for all block validator keys + StakerPrefix string = "S" // the prefix for all staker keys + BatchPosterPrefix string = "b" // the prefix for all batch poster keys + // TODO(anodar): move everything else from schema.go file to here once + // execution split is complete. +) + +type QueuedTransaction struct { + FullTx *types.Transaction + Data types.DynamicFeeTx + Meta []byte + Sent bool + Created time.Time // may be earlier than the tx was given to the tx poster + NextReplacement time.Time +} + +// LegacyQueuedTransaction is used for backwards compatibility. +// Before https://github.com/OffchainLabs/nitro/pull/1773: the queuedTransaction +// looked like this and was rlp encoded directly. After the pr, we are store +// rlp encoding of Meta into queuedTransaction and rlp encoding it once more +// to store it. +type LegacyQueuedTransaction struct { + FullTx *types.Transaction + Data types.DynamicFeeTx + Meta BatchPosterPosition + Sent bool + Created time.Time // may be earlier than the tx was given to the tx poster + NextReplacement time.Time +} + +// This is also for legacy reason. Since Batchposter is in arbnode package, +// we can't refer to BatchPosterPosition type there even if we export it (that +// would create cyclic dependency). +// We'll drop this struct in a few releases when we drop legacy encoding. +type BatchPosterPosition struct { + MessageCount arbutil.MessageIndex + DelayedMessageCount uint64 + NextSeqNum uint64 +} + +func DecodeLegacyQueuedTransaction(data []byte) (*LegacyQueuedTransaction, error) { + var val LegacyQueuedTransaction + if err := rlp.DecodeBytes(data, &val); err != nil { + return nil, fmt.Errorf("decoding legacy queued transaction: %w", err) + } + return &val, nil +} + +func LegacyToQueuedTransaction(legacyQT *LegacyQueuedTransaction) (*QueuedTransaction, error) { + meta, err := rlp.EncodeToBytes(legacyQT.Meta) + if err != nil { + return nil, fmt.Errorf("converting legacy to queued transaction: %w", err) + } + return &QueuedTransaction{ + FullTx: legacyQT.FullTx, + Data: legacyQT.Data, + Meta: meta, + Sent: legacyQT.Sent, + Created: legacyQT.Created, + NextReplacement: legacyQT.NextReplacement, + }, nil +} + +func QueuedTransactionToLegacy(qt *QueuedTransaction) (*LegacyQueuedTransaction, error) { + if qt == nil { + return nil, nil + } + var meta BatchPosterPosition + if qt.Meta != nil { + if err := rlp.DecodeBytes(qt.Meta, &meta); err != nil { + return nil, fmt.Errorf("converting queued transaction to legacy: %w", err) + } + } + return &LegacyQueuedTransaction{ + FullTx: qt.FullTx, + Data: qt.Data, + Meta: meta, + Sent: qt.Sent, + Created: qt.Created, + NextReplacement: qt.NextReplacement, + }, nil +} + +// Decode tries to decode QueuedTransaction, if that fails it tries to decode +// into legacy queued transaction and converts to queued +func decode(data []byte) (*QueuedTransaction, error) { + var item QueuedTransaction + if err := rlp.DecodeBytes(data, &item); err != nil { + log.Debug("Failed to decode QueuedTransaction, attempting to decide legacy queued transaction", "error", err) + val, err := DecodeLegacyQueuedTransaction(data) + if err != nil { + return nil, fmt.Errorf("decoding legacy item: %w", err) + } + log.Debug("Succeeded decoding QueuedTransaction with legacy encoder") + return LegacyToQueuedTransaction(val) + } + return &item, nil +} + +type EncoderDecoder struct{} + +func (e *EncoderDecoder) Encode(qt *QueuedTransaction) ([]byte, error) { + return rlp.EncodeToBytes(qt) +} + +func (e *EncoderDecoder) Decode(data []byte) (*QueuedTransaction, error) { + return decode(data) +} + +type LegacyEncoderDecoder struct{} + +func (e *LegacyEncoderDecoder) Encode(qt *QueuedTransaction) ([]byte, error) { + legacyQt, err := QueuedTransactionToLegacy(qt) + if err != nil { + return nil, fmt.Errorf("encoding legacy item: %w", err) + } + return rlp.EncodeToBytes(legacyQt) +} + +func (le *LegacyEncoderDecoder) Decode(data []byte) (*QueuedTransaction, error) { + return decode(data) +} + +// Typically interfaces belong to where they are being used, not at implementing +// site, but this is used in all storages (besides no-op) and all of them +// require all the functions for this interface. +type EncoderDecoderInterface interface { + Encode(*QueuedTransaction) ([]byte, error) + Decode([]byte) (*QueuedTransaction, error) +} + +// EncoderDecoderF is a function type that returns encoder/decoder interface. +// This is needed to implement hot-reloading flag to switch encoding/decoding +// strategy on the fly. +type EncoderDecoderF func() EncoderDecoderInterface diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go new file mode 100644 index 0000000000..adea2073e2 --- /dev/null +++ b/arbnode/dataposter/storage_test.go @@ -0,0 +1,368 @@ +package dataposter + +import ( + "context" + "math/big" + "path" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" + "github.com/offchainlabs/nitro/arbnode/dataposter/redis" + "github.com/offchainlabs/nitro/arbnode/dataposter/slice" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" + "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/redisutil" + "github.com/offchainlabs/nitro/util/signature" +) + +var ignoreData = cmp.Options{ + cmpopts.IgnoreUnexported( + types.Transaction{}, + types.DynamicFeeTx{}, + big.Int{}, + ), + cmpopts.IgnoreFields(types.Transaction{}, "hash", "size", "from"), +} + +func newLevelDBStorage(t *testing.T, encF storage.EncoderDecoderF) *dbstorage.Storage { + t.Helper() + db, err := rawdb.NewLevelDBDatabase(path.Join(t.TempDir(), "level.db"), 0, 0, "default", false) + if err != nil { + t.Fatalf("NewLevelDBDatabase() unexpected error: %v", err) + } + return dbstorage.New(db, encF) +} + +func newPebbleDBStorage(t *testing.T, encF storage.EncoderDecoderF) *dbstorage.Storage { + t.Helper() + db, err := rawdb.NewPebbleDBDatabase(path.Join(t.TempDir(), "pebble.db"), 0, 0, "default", false) + if err != nil { + t.Fatalf("NewPebbleDBDatabase() unexpected error: %v", err) + } + return dbstorage.New(db, encF) +} + +func newSliceStorage(encF storage.EncoderDecoderF) *slice.Storage { + return slice.NewStorage(encF) +} + +func newRedisStorage(ctx context.Context, t *testing.T, encF storage.EncoderDecoderF) *redis.Storage { + t.Helper() + redisUrl := redisutil.CreateTestRedis(ctx, t) + client, err := redisutil.RedisClientFromURL(redisUrl) + if err != nil { + t.Fatalf("RedisClientFromURL(%q) unexpected error: %v", redisUrl, err) + } + s, err := redis.NewStorage(client, "", &signature.TestSimpleHmacConfig, encF) + if err != nil { + t.Fatalf("redis.NewStorage() unexpected error: %v", err) + } + return s +} + +func valueOf(t *testing.T, i int) *storage.QueuedTransaction { + t.Helper() + meta, err := rlp.EncodeToBytes(storage.BatchPosterPosition{DelayedMessageCount: uint64(i)}) + if err != nil { + t.Fatalf("Encoding batch poster position, error: %v", err) + } + return &storage.QueuedTransaction{ + FullTx: types.NewTransaction( + uint64(i), + common.Address{}, + big.NewInt(int64(i)), + uint64(i), + big.NewInt(int64(i)), + []byte{byte(i)}), + Meta: meta, + Data: types.DynamicFeeTx{ + ChainID: big.NewInt(int64(i)), + Nonce: uint64(i), + GasTipCap: big.NewInt(int64(i)), + GasFeeCap: big.NewInt(int64(i)), + Gas: uint64(i), + Value: big.NewInt(int64(i)), + Data: []byte{byte(i % 8)}, + AccessList: types.AccessList{}, + V: big.NewInt(int64(i)), + R: big.NewInt(int64(i)), + S: big.NewInt(int64(i)), + }, + } +} + +func values(t *testing.T, from, to int) []*storage.QueuedTransaction { + var res []*storage.QueuedTransaction + for i := from; i <= to; i++ { + res = append(res, valueOf(t, i)) + } + return res +} + +// Initializes the QueueStorage. Returns the same object (for convenience). +func initStorage(ctx context.Context, t *testing.T, s QueueStorage) QueueStorage { + t.Helper() + for i := 0; i < 20; i++ { + if err := s.Put(ctx, uint64(i), nil, valueOf(t, i)); err != nil { + t.Fatalf("Error putting a key/value: %v", err) + } + } + return s +} + +// Returns a map of all empty storages. +func storages(t *testing.T) map[string]QueueStorage { + t.Helper() + f := func(enc storage.EncoderDecoderInterface) storage.EncoderDecoderF { + return func() storage.EncoderDecoderInterface { + return enc + } + } + return map[string]QueueStorage{ + "levelDBLegacy": newLevelDBStorage(t, f(&storage.LegacyEncoderDecoder{})), + "sliceLegacy": newSliceStorage(f(&storage.LegacyEncoderDecoder{})), + "redisLegacy": newRedisStorage(context.Background(), t, f(&storage.LegacyEncoderDecoder{})), + "levelDB": newLevelDBStorage(t, f(&storage.EncoderDecoder{})), + "pebbleDB": newPebbleDBStorage(t, f(&storage.EncoderDecoder{})), + "slice": newSliceStorage(f(&storage.EncoderDecoder{})), + "redis": newRedisStorage(context.Background(), t, f(&storage.EncoderDecoder{})), + } +} + +// Returns a map of all initialized storages. +func initStorages(ctx context.Context, t *testing.T) map[string]QueueStorage { + t.Helper() + m := map[string]QueueStorage{} + for k, v := range storages(t) { + m[k] = initStorage(ctx, t, v) + } + return m +} + +func TestPruneAll(t *testing.T) { + s := newLevelDBStorage(t, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) + ctx := context.Background() + for i := 0; i < 20; i++ { + if err := s.Put(ctx, uint64(i), nil, valueOf(t, i)); err != nil { + t.Fatalf("Error putting a key/value: %v", err) + } + } + size, err := s.Length(ctx) + if err != nil { + t.Fatalf("Length() unexpected error %v", err) + } + if size != 20 { + t.Errorf("Length()=%v want 20", size) + } + if err := s.PruneAll(ctx); err != nil { + t.Fatalf("PruneAll() unexpected error: %v", err) + } + size, err = s.Length(ctx) + if err != nil { + t.Fatalf("Length() unexpected error %v", err) + } + if size != 0 { + t.Errorf("Length()=%v want 0", size) + } +} + +func TestFetchContents(t *testing.T) { + ctx := context.Background() + for name, s := range initStorages(ctx, t) { + for _, tc := range []struct { + desc string + startIdx uint64 + maxResults uint64 + want []*storage.QueuedTransaction + }{ + { + desc: "sequence with single digits", + startIdx: 5, + maxResults: 3, + want: values(t, 5, 7), + }, + { + desc: "corner case of single element", + startIdx: 0, + maxResults: 1, + want: values(t, 0, 0), + }, + { + desc: "no elements", + startIdx: 3, + maxResults: 0, + }, + { + // Making sure it's correctly ordered lexicographically. + desc: "sequence with variable number of digits", + startIdx: 9, + maxResults: 3, + want: values(t, 9, 11), + }, + { + desc: "max results goes over the last element", + startIdx: 13, + maxResults: 10, + want: values(t, 13, 19), + }, + } { + t.Run(name+"_"+tc.desc, func(t *testing.T) { + values, err := s.FetchContents(ctx, tc.startIdx, tc.maxResults) + if err != nil { + t.Fatalf("FetchContents(%d, %d) unexpected error: %v", tc.startIdx, tc.maxResults, err) + } + if diff := cmp.Diff(tc.want, values, ignoreData); diff != "" { + t.Errorf("FetchContents(%d, %d) unexpected diff:\n%s", tc.startIdx, tc.maxResults, diff) + } + }) + } + } +} + +func TestLast(t *testing.T) { + cnt := 100 + for name, s := range storages(t) { + t.Run(name, func(t *testing.T) { + ctx := context.Background() + for i := 0; i < cnt; i++ { + val := valueOf(t, i) + if err := s.Put(ctx, uint64(i), nil, val); err != nil { + t.Fatalf("Error putting a key/value: %v", err) + } + got, err := s.FetchLast(ctx) + if err != nil { + t.Fatalf("Error getting a last element: %v", err) + } + if diff := cmp.Diff(val, got, ignoreData); diff != "" { + t.Errorf("FetchLast() unexpected diff:\n%s", diff) + } + + } + }) + last := valueOf(t, cnt-1) + t.Run(name+"_update_entries", func(t *testing.T) { + ctx := context.Background() + for i := 0; i < cnt-1; i++ { + prev := valueOf(t, i) + newVal := valueOf(t, cnt+i) + if err := s.Put(ctx, uint64(i), prev, newVal); err != nil { + t.Fatalf("Error putting a key/value: %v, prev: %v, new: %v", err, prev, newVal) + } + got, err := s.FetchLast(ctx) + if err != nil { + t.Fatalf("Error getting a last element: %v", err) + } + if diff := cmp.Diff(last, got, ignoreData); diff != "" { + t.Errorf("FetchLast() unexpected diff:\n%s", diff) + } + gotCnt, err := s.Length(ctx) + if err != nil { + t.Fatalf("Length() unexpected error: %v", err) + } + if gotCnt != cnt { + t.Errorf("Length() = %d want %d", gotCnt, cnt) + } + } + }) + } +} + +func TestPrune(t *testing.T) { + ctx := context.Background() + for _, tc := range []struct { + desc string + pruneFrom uint64 + want []*storage.QueuedTransaction + }{ + { + desc: "prune all elements", + pruneFrom: 20, + }, + { + desc: "prune all but one", + pruneFrom: 19, + want: values(t, 19, 19), + }, + { + desc: "pruning first element", + pruneFrom: 1, + want: values(t, 1, 19), + }, + { + desc: "pruning first 11 elements", + pruneFrom: 11, + want: values(t, 11, 19), + }, + { + desc: "pruning from higher than biggest index", + pruneFrom: 30, + }, + } { + // Storages must be re-initialized in each test-case. + for name, s := range initStorages(ctx, t) { + t.Run(name+"_"+tc.desc, func(t *testing.T) { + if err := s.Prune(ctx, tc.pruneFrom); err != nil { + t.Fatalf("Prune(%d) unexpected error: %v", tc.pruneFrom, err) + } + got, err := s.FetchContents(ctx, 0, 20) + if err != nil { + t.Fatalf("FetchContents() unexpected error: %v", err) + } + if diff := cmp.Diff(tc.want, got, ignoreData); diff != "" { + t.Errorf("Prune(%d) unexpected diff:\n%s", tc.pruneFrom, diff) + } + }) + } + } +} + +func TestLength(t *testing.T) { + ctx := context.Background() + for _, tc := range []struct { + desc string + pruneFrom uint64 + }{ + { + desc: "not prune any elements", + }, + { + desc: "prune all but one", + pruneFrom: 19, + }, + { + desc: "pruning first element", + pruneFrom: 1, + }, + { + desc: "pruning first 11 elements", + pruneFrom: 11, + }, + { + desc: "pruning from higher than biggest index", + pruneFrom: 30, + }, + } { + // Storages must be re-initialized in each test-case. + for name, s := range initStorages(ctx, t) { + t.Run(name+"_"+tc.desc, func(t *testing.T) { + if err := s.Prune(ctx, tc.pruneFrom); err != nil { + t.Fatalf("Prune(%d) unexpected error: %v", tc.pruneFrom, err) + } + got, err := s.Length(ctx) + if err != nil { + t.Fatalf("Length() unexpected error: %v", err) + } + if want := arbmath.MaxInt(0, 20-int(tc.pruneFrom)); got != want { + t.Errorf("Length() = %d want %d", got, want) + } + }) + } + + } +} diff --git a/arbnode/dataposter/testdata/client.cnf b/arbnode/dataposter/testdata/client.cnf new file mode 100644 index 0000000000..8c15cc3dbc --- /dev/null +++ b/arbnode/dataposter/testdata/client.cnf @@ -0,0 +1,52 @@ +[req] +default_bits = 2048 +default_keyfile = server-key.pem +distinguished_name = subject +req_extensions = req_ext +x509_extensions = x509_ext +string_mask = utf8only + +[subject] +countryName = CH +countryName_default = CH + +stateOrProvinceName = Zurich +stateOrProvinceName_default = ZH + +localityName = city +localityName_default = Zurich + +organizationName = Offchain Labs +organizationName_default = Offchain Labs + +commonName = offchainlabs.ch +commonName_default = localhost + +emailAddress = Email Address +emailAddress_default = notabigdeal@offchainlabs.ch + +[x509_ext] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer + +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment +subjectAltName = @alternate_names +nsComment = "OpenSSL Generated Certificate" + +[req_ext] +subjectKeyIdentifier = hash + +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment +subjectAltName = @alternate_names +nsComment = "OpenSSL Generated Certificate" + +[alternate_names] +DNS.1 = localhost +DNS.2 = 127.0.0.1 + +[alternate_names] +DNS.1 = localhost +DNS.2 = 127.0.0.1 + diff --git a/arbnode/dataposter/testdata/client.crt b/arbnode/dataposter/testdata/client.crt new file mode 100644 index 0000000000..3d494be820 --- /dev/null +++ b/arbnode/dataposter/testdata/client.crt @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIE0jCCA7qgAwIBAgIUPaBB3/hHMpZfGB3VOw1+mHG4LnUwDQYJKoZIhvcNAQEL +BQAwgYMxCzAJBgNVBAYTAkNIMQswCQYDVQQIDAJaSDEPMA0GA1UEBwwGWnVyaWNo +MRYwFAYDVQQKDA1PZmZjaGFpbiBMYWJzMRIwEAYDVQQDDAlsb2NhbGhvc3QxKjAo +BgkqhkiG9w0BCQEWG25vdGFiaWdkZWFsQG9mZmNoYWlubGFicy5jaDAeFw0yMzEw +MTYxNDU2MjhaFw0yNDEwMTUxNDU2MjhaMIGDMQswCQYDVQQGEwJDSDELMAkGA1UE +CAwCWkgxDzANBgNVBAcMBlp1cmljaDEWMBQGA1UECgwNT2ZmY2hhaW4gTGFiczES +MBAGA1UEAwwJbG9jYWxob3N0MSowKAYJKoZIhvcNAQkBFhtub3RhYmlnZGVhbEBv +ZmZjaGFpbmxhYnMuY2gwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1 +1asfUzv07QTVwlM4o3g51ilIFEApPkpdQej/GIItLEVRQW+GI9jYuEM07wdwMhSH +JPFNbZB3dmBuqDLx13hY03ufyeY+nab0/sO6x13kXChvIqgPRyJtkEAoYkMM3W0D +S6HeL/6DFoTQ2xAlZb/7i/9deuUwDL3MNVSjPCm9PjFzSOFgAQQud2uUT7aENGuG +Whw3oXz9gU/8gv3keLzcIa2PHyEW5M7jeGSYMjfW3wr0d+Z5mSNRc/U6kncKi06c +QrMKrgFfF7a5kHgxUL7bRCGgCMemXe7VfrW6oKT11JcLWDKhe+uo6bNXUptek55H +HfQi6x8cbM46/h3riZA3AgMBAAGjggE6MIIBNjAdBgNVHQ4EFgQUQD2BOems0+JQ +br234cW5noMmXRIwga0GA1UdIwSBpTCBoqGBiaSBhjCBgzELMAkGA1UEBhMCQ0gx +CzAJBgNVBAgMAlpIMQ8wDQYDVQQHDAZadXJpY2gxFjAUBgNVBAoMDU9mZmNoYWlu +IExhYnMxEjAQBgNVBAMMCWxvY2FsaG9zdDEqMCgGCSqGSIb3DQEJARYbbm90YWJp +Z2RlYWxAb2ZmY2hhaW5sYWJzLmNoghQ9oEHf+Ecyll8YHdU7DX6YcbgudTAJBgNV +HRMEAjAAMAsGA1UdDwQEAwIFoDAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4w +LjAuMTAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNh +dGUwDQYJKoZIhvcNAQELBQADggEBAF4EVkOZZeMIvv0JViP7NsmIl2ke/935x6Hd +hQiLUw13XHYXzMa5/8Y5fnKjttBODpFoQlwjgI18vzuYzItYMBc2cabQJcpfG+Wq +M3m/wl1TC2XOuHj1E4RA/nU3tslntahtXG+vkks9RN+f9irHUhDRR6AGSnSB2Gi/ +B2OGmXn7S4Qge8+fGHAjN+tlu+tOoEWP6R3if/a9UIe5EGM8QTe4zw6lr+iPrOhC +M94pK5IEWn5IIGhr3zJIYkm/Dp+rFqhV1sqPOjjFLVCA7KJ3jVVVHlcm4Xa/+fyk +CIm7/VAmnbeUNlMbkXNOfQMeku8Iwsu80pvf3kjhU/PgO/5oojk= +-----END CERTIFICATE----- diff --git a/arbnode/dataposter/testdata/client.key b/arbnode/dataposter/testdata/client.key new file mode 100644 index 0000000000..b14941dd9f --- /dev/null +++ b/arbnode/dataposter/testdata/client.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC11asfUzv07QTV +wlM4o3g51ilIFEApPkpdQej/GIItLEVRQW+GI9jYuEM07wdwMhSHJPFNbZB3dmBu +qDLx13hY03ufyeY+nab0/sO6x13kXChvIqgPRyJtkEAoYkMM3W0DS6HeL/6DFoTQ +2xAlZb/7i/9deuUwDL3MNVSjPCm9PjFzSOFgAQQud2uUT7aENGuGWhw3oXz9gU/8 +gv3keLzcIa2PHyEW5M7jeGSYMjfW3wr0d+Z5mSNRc/U6kncKi06cQrMKrgFfF7a5 +kHgxUL7bRCGgCMemXe7VfrW6oKT11JcLWDKhe+uo6bNXUptek55HHfQi6x8cbM46 +/h3riZA3AgMBAAECggEADUboCYMCpm+LqIhzNCtqswQD6QsiSwCmqs8nuKZGk9ue ++hmZj5IpgMJZLrgvWY4s+PGfgiRR/28QCBrVXkETiZ5zirQFN4tvLlKcSK4xZf29 +FBRUCiPxck36NhiqrBNOi1Mn8BKedl4cESkvSu1cvcmeOh100HPcHfLDVqHx3qsl +D/5yMkT2+zdhtLa+X3nkAa+3aibOvgtyfkV679e20CG6h89N9GBKkTXO8ioLZZVm +84ksnd4FcpTo7ebJJxElEB+ZA4akPHbF6ArUmcpqtGso5GtwqqO2ZlguSn2XQT0d +jqvOG4DwfSXk6SpE/dpWvU92fmxWAxZvGrZNgDyJ2QKBgQDyQ8NN4b80Yza/YXar +LWx8A6B0eMc1dXgt9m3UUI+titt45jEcaXhCX01FRFTznWGmWFtJmcWBoaQVPVel +IcDYQSxEuBUrCeI75ocv/IQtENaiX3TK7Nlz5RHfpQpfDVJq45lpiD38CGkYkAif +9pSzC8aup4W3WR0JJZ1AOHUZaQKBgQDAJNJnaSNzB+eDWTKCIN5V9X3QMkmjsuir +Nf2lBXHYARnlYWAbtYFG12wLJQMTNX5ewVQQrWtsdPkGPpCnPLelUTxMssrsXjej +JlLzYUfzRBqEXMI3AA9bVdiauxId2RTcp2F81SM1keCMcuHYxrzVkBSOC9u3wCnb +Whb6+feInwKBgQCbzgC5AcoaQwReqKvNAvWV/C8hONvFAbs8tBOGTBlbHsZvRnun +Lh1tciUbuwp3cmvuszxiZUakS/RexIitZrvDWIbD2y+h8kVRCL1Am0HWSdH/syxF +pXVkF5obHuVApCyxGZb8S+axRCdy6I7jcY3IaHZqtMpGVEVcMJilSKnmoQKBgQCC +tEmgaMfhhx34nqOaG4vDA4T7LEolnh1h4g9RwztnCZC5FZ1QHA79xqrLhfjqhzgY +cwChe6aYl5WSptq1uLrgLTuMnQ8m7QyB4h8JSkKse8ZiBctjqJnJssLutpSjUzk6 +xG2vgjk6RqpuP/PcB40K5cDlw7FJ9OFEQqthPMsi1wKBgQC0/vv5bY3DQ+wV6gUy +nFoSa/XNHaa8y7jmmlCnWJqs6DAAQQ3VW0tPX03GYL/NDcI+PwzYDHDkSB6Qa/o8 +VzVGK1/kr/+bveNvqmi0vNb54fMFLveGgsY4Cu1cffiw8m6nYJ/V4eCsHfpF1B5L +5HDnt5rFKt1Mi9WsUSRtxipxBA== +-----END PRIVATE KEY----- diff --git a/arbnode/dataposter/testdata/localhost.cnf b/arbnode/dataposter/testdata/localhost.cnf new file mode 100644 index 0000000000..41647cc422 --- /dev/null +++ b/arbnode/dataposter/testdata/localhost.cnf @@ -0,0 +1,52 @@ +[req] +default_bits = 2048 +default_keyfile = server-key.pem +distinguished_name = subject +req_extensions = req_ext +x509_extensions = x509_ext +string_mask = utf8only + +[subject] +countryName = CH +countryName_default = CH + +stateOrProvinceName = Zurich +stateOrProvinceName_default = ZH + +localityName = city +localityName_default = Zurich + +organizationName = Offchain Labs +organizationName_default = Offchain Labs + +commonName = offchainlabs.ch +commonName_default = localhost + +emailAddress = Email Address +emailAddress_default = bigdeal@offchainlabs.ch + +[x509_ext] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer + +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment +subjectAltName = @alternate_names +nsComment = "OpenSSL Generated Certificate" + +[req_ext] +subjectKeyIdentifier = hash + +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment +subjectAltName = @alternate_names +nsComment = "OpenSSL Generated Certificate" + +[alternate_names] +DNS.1 = localhost +DNS.2 = 127.0.0.1 + +[alternate_names] +DNS.1 = localhost +DNS.2 = 127.0.0.1 + diff --git a/arbnode/dataposter/testdata/localhost.crt b/arbnode/dataposter/testdata/localhost.crt new file mode 100644 index 0000000000..ca33dfc8cc --- /dev/null +++ b/arbnode/dataposter/testdata/localhost.crt @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIEwzCCA6ugAwIBAgIUHx3SdpCP5jXZE7USUqX5uRNFKPIwDQYJKoZIhvcNAQEL +BQAwfzELMAkGA1UEBhMCQ0gxCzAJBgNVBAgMAlpIMQ8wDQYDVQQHDAZadXJpY2gx +FjAUBgNVBAoMDU9mZmNoYWluIExhYnMxEjAQBgNVBAMMCWxvY2FsaG9zdDEmMCQG +CSqGSIb3DQEJARYXYmlnZGVhbEBvZmZjaGFpbmxhYnMuY2gwHhcNMjMxMDE2MTQ0 +MDA1WhcNMjQxMDE1MTQ0MDA1WjB/MQswCQYDVQQGEwJDSDELMAkGA1UECAwCWkgx +DzANBgNVBAcMBlp1cmljaDEWMBQGA1UECgwNT2ZmY2hhaW4gTGFiczESMBAGA1UE +AwwJbG9jYWxob3N0MSYwJAYJKoZIhvcNAQkBFhdiaWdkZWFsQG9mZmNoYWlubGFi +cy5jaDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALg7XwaIh4l2Fp8a +MfNMdTQSMPMR0zpnicVTn/eiozWsqlAKaxmQM3PxJ0oVWW3iJ89p4rv5m+UjK6Dr +vsUQOzl8isgyGCTMnkLtxFlyallDNRDawRcuTPuNI9NkdJm+Zz7HooLzFeBDeS13 +iRPEXr1T/4af9MjOxqFvbw5xBY9k4tc2hPp6q00948gPWKIB9Mz4thoB2Hl2rQBY +X/WhjSnre9o9qoyBO0XAsG0mssBs1vPa9/aEp7C5cDY0HCuM1RIjhXnRpb8lC9VQ +aC+FozDffmm23EGVpLmyPs590UOtVJdTUd6Q0TAT6d7fjCRUJ12DendQf2uMFV90 +u6Yj0zUCAwEAAaOCATUwggExMB0GA1UdDgQWBBT2B3FTGFQ49JyBgDGLoZREOIGD +DTCBqAYDVR0jBIGgMIGdoYGEpIGBMH8xCzAJBgNVBAYTAkNIMQswCQYDVQQIDAJa +SDEPMA0GA1UEBwwGWnVyaWNoMRYwFAYDVQQKDA1PZmZjaGFpbiBMYWJzMRIwEAYD +VQQDDAlsb2NhbGhvc3QxJjAkBgkqhkiG9w0BCQEWF2JpZ2RlYWxAb2ZmY2hhaW5s +YWJzLmNoghQfHdJ2kI/mNdkTtRJSpfm5E0Uo8jAJBgNVHRMEAjAAMAsGA1UdDwQE +AwIFoDAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAuMTAsBglghkgBhvhC +AQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDQYJKoZIhvcNAQEL +BQADggEBAIkhBcnLeeNwUwb+sSG4Qm8JdeplHPMeViNfFIflUfIIYS00JA2q9w8W ++6Nh8s6Dn20lQETUnesYj97BdqzLjFuJYAlblhE+zP8g/3Mkpu+wZAGvQjUIRyGT +C17BEtQQgAnv5pD22jr9hpLl2KowN6Oo1gzilCA+AtMkNZFIGDOxzuIv2u8rSD89 +R/V6UEDMCgusFJnZ/GzKkUNbsrAfNUezNUal+KzMhHGHBwg4jfCNhnAAB43eRtJA +0pSRMMLcUEQnVotXDXYC3DhJmkYp1uXOH/tWs6z9xForOkWFxNMVj+zUWBi7n3Jw +N2BXlb64D96uor13U0dmvQJ72ooJc+A= +-----END CERTIFICATE----- diff --git a/arbnode/dataposter/testdata/localhost.key b/arbnode/dataposter/testdata/localhost.key new file mode 100644 index 0000000000..aad9b40b3d --- /dev/null +++ b/arbnode/dataposter/testdata/localhost.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC4O18GiIeJdhaf +GjHzTHU0EjDzEdM6Z4nFU5/3oqM1rKpQCmsZkDNz8SdKFVlt4ifPaeK7+ZvlIyug +677FEDs5fIrIMhgkzJ5C7cRZcmpZQzUQ2sEXLkz7jSPTZHSZvmc+x6KC8xXgQ3kt +d4kTxF69U/+Gn/TIzsahb28OcQWPZOLXNoT6eqtNPePID1iiAfTM+LYaAdh5dq0A +WF/1oY0p63vaPaqMgTtFwLBtJrLAbNbz2vf2hKewuXA2NBwrjNUSI4V50aW/JQvV +UGgvhaMw335pttxBlaS5sj7OfdFDrVSXU1HekNEwE+ne34wkVCddg3p3UH9rjBVf +dLumI9M1AgMBAAECggEAHuc8oyKrQ5xmooUZHGP2pAeqJNfYXAtqoYpLwtUJ9hKy +1e7NdNIKw3fP/J4UrHk7btAm65us8hSCeMGatEErAhNZT0gR4zhcksMCBPQLkVIT ++HINYjdOzAJqoEbRRUnaVT5VDQy8HmyLCtyqhoGR18XbjshNnhKLYKCJ2z0Lrvf2 +3rU7bbt7/rvLitVhxVL8SIe2jWSfIgcEmEAZMigB9WAnUyQ/tAfbPy1I764LLfzD +nLXn7E2OH7GrxkLjOsH9kfERlur7V7IhC9NE/wI0q+rnILRa7Q3+ifRu8qla3bo1 +iyHl1ZmsYJ8Jnzbu9exzZaQmk42OoFPcMFm0mRe+2QKBgQDvRv0Q5JhBuVurkU98 +lzATwEO0uYmeWDMnHzrFSWAKr/x4LNQ9ytSCfe1aLxgOkZq6dQ3TyZiCYzpmwGz9 +K7/gghxmsVDKeCqiGVZOgFAWy7AhQyF6zM60oqqwSvJHhmGTsA/B5LPUiYe9lITW +ZSLVYkOzha7Coa++U8vPzI5VaQKBgQDFG4reFT79j8RKEm9jie6PdRdYMzOSDWty +Gjj5N9Jnlp1k/6RzCxjmp7w7yIorq/7fWZsQtt0UqgayOn25+I8dZeGC0BradUSB +tZbGElxPsF8Jg00ZvvK3G5mpZYDrJCud8Q05EaUZPXv9GuZhozEsTQgylVecVzsN +wyEK8VuZ7QKBgQChx9adUGIdtgzkILiknbh08j8U94mz1SCo5/WdpLHaKAlE29KZ +AQXUQP51Rng2iX4bab9yndCPADZheON3/debHX3EdUkRzFPPC+CN7TW5Y/jvVGtT +kxyDh6Ru1A2iDJr290iAKXjpUB/GL5/tMa5upiTuQYnasOWZgyC/nCf0WQKBgEwn +pRLDMLA1IMjhsInL3BEvU1KvjahLaQ0P1p1rlO6TAcLpBrewPPG5MwACLmhLLtFK +xJ/Dl02Jl8a61KLKxzi7iVLKZuWq00ouR8/FfkcHxOBfC6X74bkff9I0NogjVHrU +jKBVEe3blJEpGIP20mPka1tn2g68oUNi9dxNfm/NAoGAWj/Q0pgnNq0MQ8Lj6m99 +1baaXSo8biks3E3A3cqhHQm/j3SRnkf0lueQW8+r9yR9IWdYFXz5Waq13qK+lopE +KDmww0xr8dyMUYTP1vde7np2XKa/OX3iejDzbI3RcZN/DEV+dCBY8pqHHfaAaESu +fwBWvfD8wtwCZzB3lOZEi80= +-----END PRIVATE KEY----- diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index a8d3d3774b..cd9e012c48 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -43,7 +43,7 @@ type DelayedSequencerConfig struct { type DelayedSequencerConfigFetcher func() *DelayedSequencerConfig func DelayedSequencerConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultSeqCoordinatorConfig.Enable, "enable sequence coordinator") + f.Bool(prefix+".enable", DefaultDelayedSequencerConfig.Enable, "enable delayed sequencer") f.Int64(prefix+".finalize-distance", DefaultDelayedSequencerConfig.FinalizeDistance, "how many blocks in the past L1 block is considered final (ignored when using Merge finality)") f.Bool(prefix+".require-full-finality", DefaultDelayedSequencerConfig.RequireFullFinality, "whether to wait for full finality before sequencing delayed messages") f.Bool(prefix+".use-merge-finality", DefaultDelayedSequencerConfig.UseMergeFinality, "whether to use The Merge's notion of finality before sequencing delayed messages") @@ -52,14 +52,14 @@ func DelayedSequencerConfigAddOptions(prefix string, f *flag.FlagSet) { var DefaultDelayedSequencerConfig = DelayedSequencerConfig{ Enable: false, FinalizeDistance: 20, - RequireFullFinality: true, + RequireFullFinality: false, UseMergeFinality: true, } var TestDelayedSequencerConfig = DelayedSequencerConfig{ Enable: true, FinalizeDistance: 20, - RequireFullFinality: true, + RequireFullFinality: false, UseMergeFinality: true, } diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index dc5ee536e2..71d7719498 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -15,7 +15,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" @@ -24,6 +26,11 @@ import ( "github.com/offchainlabs/nitro/util/containers" ) +var ( + inboxLatestBatchGauge = metrics.NewRegisteredGauge("arb/inbox/latest/batch", nil) + inboxLatestBatchMessageGauge = metrics.NewRegisteredGauge("arb/inbox/latest/batch/message", nil) +) + type InboxTracker struct { db ethdb.Database txStreamer *TransactionStreamer @@ -218,7 +225,7 @@ func (t *InboxTracker) GetBatchCount() (uint64, error) { return count, nil } -func (t *InboxTracker) FindL1BatchForMessage(pos arbutil.MessageIndex) (uint64, error) { +func (t *InboxTracker) FindInboxBatchContainingMessage(pos arbutil.MessageIndex) (uint64, error) { batchCount, err := t.GetBatchCount() if err != nil { return 0, err @@ -719,6 +726,8 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L "l1Block", latestL1Block, "l1Timestamp", time.Unix(int64(latestTimestamp), 0), ) + inboxLatestBatchGauge.Update(int64(pos)) + inboxLatestBatchMessageGauge.Update(int64(newMessageCount)) if t.validator != nil { t.validator.ReorgToBatchCount(startPos) diff --git a/arbnode/maintenance.go b/arbnode/maintenance.go index 3facabbe82..259bf9292c 100644 --- a/arbnode/maintenance.go +++ b/arbnode/maintenance.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" @@ -29,12 +30,12 @@ type MaintenanceRunner struct { // lock is used to ensures that at any given time, only single node is on // maintenance mode. - lock *SimpleRedisLock + lock *redislock.Simple } type MaintenanceConfig struct { - TimeOfDay string `koanf:"time-of-day" reload:"hot"` - Lock SimpleRedisLockConfig `koanf:"lock" reload:"hot"` + TimeOfDay string `koanf:"time-of-day" reload:"hot"` + Lock redislock.SimpleCfg `koanf:"lock" reload:"hot"` // Generated: the minutes since start of UTC day to compact at minutesAfterMidnight int @@ -72,11 +73,12 @@ func (c *MaintenanceConfig) Validate() error { func MaintenanceConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".time-of-day", DefaultMaintenanceConfig.TimeOfDay, "UTC 24-hour time of day to run maintenance (currently only db compaction) at (e.g. 15:00)") - RedisLockConfigAddOptions(prefix+".lock", f) + redislock.AddConfigOptions(prefix+".lock", f) } var DefaultMaintenanceConfig = MaintenanceConfig{ TimeOfDay: "", + Lock: redislock.DefaultCfg, minutesAfterMidnight: 0, } @@ -97,9 +99,9 @@ func NewMaintenanceRunner(config MaintenanceConfigFetcher, seqCoordinator *SeqCo } if seqCoordinator != nil { - c := func() *SimpleRedisLockConfig { return &cfg.Lock } + c := func() *redislock.SimpleCfg { return &cfg.Lock } r := func() bool { return true } // always ready to lock - rl, err := NewSimpleRedisLock(seqCoordinator.Client, c, r) + rl, err := redislock.NewSimple(seqCoordinator.Client, c, r) if err != nil { return nil, fmt.Errorf("creating new simple redis lock: %w", err) } diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index aeee07ca73..31bf1a63ff 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -23,30 +23,33 @@ import ( type MessagePruner struct { stopwaiter.StopWaiter - transactionStreamer *TransactionStreamer - inboxTracker *InboxTracker - config MessagePrunerConfigFetcher - pruningLock sync.Mutex - lastPruneDone time.Time + transactionStreamer *TransactionStreamer + inboxTracker *InboxTracker + config MessagePrunerConfigFetcher + pruningLock sync.Mutex + lastPruneDone time.Time + cachedPrunedMessages uint64 + cachedPrunedDelayedMessages uint64 } type MessagePrunerConfig struct { - Enable bool `koanf:"enable"` - MessagePruneInterval time.Duration `koanf:"prune-interval" reload:"hot"` - MinBatchesLeft uint64 `koanf:"min-batches-left" reload:"hot"` + Enable bool `koanf:"enable"` + // Message pruning interval. + PruneInterval time.Duration `koanf:"prune-interval" reload:"hot"` + MinBatchesLeft uint64 `koanf:"min-batches-left" reload:"hot"` } type MessagePrunerConfigFetcher func() *MessagePrunerConfig var DefaultMessagePrunerConfig = MessagePrunerConfig{ - Enable: true, - MessagePruneInterval: time.Minute, - MinBatchesLeft: 2, + Enable: true, + PruneInterval: time.Minute, + MinBatchesLeft: 2, } func MessagePrunerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultMessagePrunerConfig.Enable, "enable message pruning") - f.Duration(prefix+".prune-interval", DefaultMessagePrunerConfig.MessagePruneInterval, "interval for running message pruner") + f.Duration(prefix+".prune-interval", DefaultMessagePrunerConfig.PruneInterval, "interval for running message pruner") f.Uint64(prefix+".min-batches-left", DefaultMessagePrunerConfig.MinBatchesLeft, "min number of batches not pruned") } @@ -62,13 +65,13 @@ func (m *MessagePruner) Start(ctxIn context.Context) { m.StopWaiter.Start(ctxIn, m) } -func (m *MessagePruner) UpdateLatestStaked(count arbutil.MessageIndex, globalState validator.GoGlobalState) { +func (m *MessagePruner) UpdateLatestConfirmed(count arbutil.MessageIndex, globalState validator.GoGlobalState) { locked := m.pruningLock.TryLock() if !locked { return } - if m.lastPruneDone.Add(m.config().MessagePruneInterval).After(time.Now()) { + if m.lastPruneDone.Add(m.config().PruneInterval).After(time.Now()) { m.pruningLock.Unlock() return } @@ -108,11 +111,11 @@ func (m *MessagePruner) prune(ctx context.Context, count arbutil.MessageIndex, g msgCount := endBatchMetadata.MessageCount delayedCount := endBatchMetadata.DelayedMessageCount - return deleteOldMessageFromDB(ctx, msgCount, delayedCount, m.inboxTracker.db, m.transactionStreamer.db) + return m.deleteOldMessagesFromDB(ctx, msgCount, delayedCount) } -func deleteOldMessageFromDB(ctx context.Context, messageCount arbutil.MessageIndex, delayedMessageCount uint64, inboxTrackerDb ethdb.Database, transactionStreamerDb ethdb.Database) error { - prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, transactionStreamerDb, messagePrefix, uint64(messageCount)) +func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCount arbutil.MessageIndex, delayedMessageCount uint64) error { + prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, &m.cachedPrunedMessages, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting last batch messages: %w", err) } @@ -120,7 +123,7 @@ func deleteOldMessageFromDB(ctx context.Context, messageCount arbutil.MessageInd log.Info("Pruned last batch messages:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) } - prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, inboxTrackerDb, rlpDelayedMessagePrefix, delayedMessageCount) + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.inboxTracker.db, rlpDelayedMessagePrefix, &m.cachedPrunedDelayedMessages, delayedMessageCount) if err != nil { return fmt.Errorf("error deleting last batch delayed messages: %w", err) } @@ -130,15 +133,25 @@ func deleteOldMessageFromDB(ctx context.Context, messageCount arbutil.MessageInd return nil } -func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, prefix []byte, endMinKey uint64) ([]uint64, error) { - startIter := db.NewIterator(prefix, uint64ToKey(1)) - if !startIter.Next() { +// deleteFromLastPrunedUptoEndKey is similar to deleteFromRange but automatically populates the start key +// cachedStartMinKey must not be nil. It's set to the new start key at the end of this function if successful. +func deleteFromLastPrunedUptoEndKey(ctx context.Context, db ethdb.Database, prefix []byte, cachedStartMinKey *uint64, endMinKey uint64) ([]uint64, error) { + startMinKey := *cachedStartMinKey + if startMinKey == 0 { + startIter := db.NewIterator(prefix, uint64ToKey(1)) + if !startIter.Next() { + return nil, nil + } + startMinKey = binary.BigEndian.Uint64(bytes.TrimPrefix(startIter.Key(), prefix)) + startIter.Release() + } + if endMinKey <= startMinKey { + *cachedStartMinKey = startMinKey return nil, nil } - startMinKey := binary.BigEndian.Uint64(bytes.TrimPrefix(startIter.Key(), prefix)) - startIter.Release() - if endMinKey > startMinKey { - return deleteFromRange(ctx, db, prefix, startMinKey, endMinKey-1) + keys, err := deleteFromRange(ctx, db, prefix, startMinKey, endMinKey-1) + if err == nil { + *cachedStartMinKey = endMinKey - 1 } - return nil, nil + return keys, err } diff --git a/arbnode/message_pruner_test.go b/arbnode/message_pruner_test.go index c0cb2cb4fe..0212ed2364 100644 --- a/arbnode/message_pruner_test.go +++ b/arbnode/message_pruner_test.go @@ -17,8 +17,8 @@ func TestMessagePrunerWithPruningEligibleMessagePresent(t *testing.T) { defer cancel() messagesCount := uint64(2 * 100 * 1024) - inboxTrackerDb, transactionStreamerDb := setupDatabase(t, 2*100*1024, 2*100*1024) - err := deleteOldMessageFromDB(ctx, arbutil.MessageIndex(messagesCount), messagesCount, inboxTrackerDb, transactionStreamerDb) + inboxTrackerDb, transactionStreamerDb, pruner := setupDatabase(t, 2*100*1024, 2*100*1024) + err := pruner.deleteOldMessagesFromDB(ctx, arbutil.MessageIndex(messagesCount), messagesCount) Require(t, err) checkDbKeys(t, messagesCount, transactionStreamerDb, messagePrefix) @@ -26,22 +26,21 @@ func TestMessagePrunerWithPruningEligibleMessagePresent(t *testing.T) { } -func TestMessagePrunerTraverseEachMessageOnlyOnce(t *testing.T) { +func TestMessagePrunerTwoHalves(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() messagesCount := uint64(10) - inboxTrackerDb, transactionStreamerDb := setupDatabase(t, messagesCount, messagesCount) - // In first iteration message till messagesCount are tried to be deleted. - err := deleteOldMessageFromDB(ctx, arbutil.MessageIndex(messagesCount), messagesCount, inboxTrackerDb, transactionStreamerDb) + _, transactionStreamerDb, pruner := setupDatabase(t, messagesCount, messagesCount) + // In first iteration message till messagesCount/2 are tried to be deleted. + err := pruner.deleteOldMessagesFromDB(ctx, arbutil.MessageIndex(messagesCount/2), messagesCount/2) Require(t, err) - // After first iteration messagesCount/2 is reinserted in inbox db - err = inboxTrackerDb.Put(dbKey(messagePrefix, messagesCount/2), []byte{}) + // In first iteration all the message till messagesCount/2 are deleted. + checkDbKeys(t, messagesCount/2, transactionStreamerDb, messagePrefix) + // In second iteration message till messagesCount are tried to be deleted. + err = pruner.deleteOldMessagesFromDB(ctx, arbutil.MessageIndex(messagesCount), messagesCount) Require(t, err) - // In second iteration message till messagesCount are again tried to be deleted. - err = deleteOldMessageFromDB(ctx, arbutil.MessageIndex(messagesCount), messagesCount, inboxTrackerDb, transactionStreamerDb) - Require(t, err) - // In second iteration all the message till messagesCount are deleted again. + // In second iteration all the message till messagesCount are deleted. checkDbKeys(t, messagesCount, transactionStreamerDb, messagePrefix) } @@ -50,10 +49,10 @@ func TestMessagePrunerPruneTillLessThenEqualTo(t *testing.T) { defer cancel() messagesCount := uint64(10) - inboxTrackerDb, transactionStreamerDb := setupDatabase(t, 2*messagesCount, 20) + inboxTrackerDb, transactionStreamerDb, pruner := setupDatabase(t, 2*messagesCount, 20) err := inboxTrackerDb.Delete(dbKey(messagePrefix, 9)) Require(t, err) - err = deleteOldMessageFromDB(ctx, arbutil.MessageIndex(messagesCount), messagesCount, inboxTrackerDb, transactionStreamerDb) + err = pruner.deleteOldMessagesFromDB(ctx, arbutil.MessageIndex(messagesCount), messagesCount) Require(t, err) hasKey, err := transactionStreamerDb.Has(dbKey(messagePrefix, messagesCount)) Require(t, err) @@ -67,8 +66,8 @@ func TestMessagePrunerWithNoPruningEligibleMessagePresent(t *testing.T) { defer cancel() messagesCount := uint64(10) - inboxTrackerDb, transactionStreamerDb := setupDatabase(t, messagesCount, messagesCount) - err := deleteOldMessageFromDB(ctx, arbutil.MessageIndex(messagesCount), messagesCount, inboxTrackerDb, transactionStreamerDb) + inboxTrackerDb, transactionStreamerDb, pruner := setupDatabase(t, messagesCount, messagesCount) + err := pruner.deleteOldMessagesFromDB(ctx, arbutil.MessageIndex(messagesCount), messagesCount) Require(t, err) checkDbKeys(t, uint64(messagesCount), transactionStreamerDb, messagePrefix) @@ -76,7 +75,7 @@ func TestMessagePrunerWithNoPruningEligibleMessagePresent(t *testing.T) { } -func setupDatabase(t *testing.T, messageCount, delayedMessageCount uint64) (ethdb.Database, ethdb.Database) { +func setupDatabase(t *testing.T, messageCount, delayedMessageCount uint64) (ethdb.Database, ethdb.Database, *MessagePruner) { transactionStreamerDb := rawdb.NewMemoryDatabase() for i := uint64(0); i < uint64(messageCount); i++ { @@ -90,7 +89,10 @@ func setupDatabase(t *testing.T, messageCount, delayedMessageCount uint64) (ethd Require(t, err) } - return inboxTrackerDb, transactionStreamerDb + return inboxTrackerDb, transactionStreamerDb, &MessagePruner{ + transactionStreamer: &TransactionStreamer{db: transactionStreamerDb}, + inboxTracker: &InboxTracker{db: inboxTrackerDb}, + } } func checkDbKeys(t *testing.T, endCount uint64, db ethdb.Database, prefix []byte) { diff --git a/arbnode/node.go b/arbnode/node.go index 05a6438e85..96c4f7d11a 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "math/big" + "strings" "time" flag "github.com/spf13/pflag" @@ -23,7 +24,9 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" - + "github.com/offchainlabs/nitro/arbnode/dataposter" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" + "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbnode/resourcemanager" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" @@ -35,16 +38,19 @@ import ( consensusapi "github.com/offchainlabs/nitro/consensus/consensusserver" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/execution" - "github.com/offchainlabs/nitro/execution/execclient" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/ospgen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/staker/validatorwallet" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/redisutil" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/wsbroadcastserver" @@ -61,21 +67,23 @@ func andTxSucceeded(ctx context.Context, l1Reader *headerreader.HeaderReader, tx return nil } -func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts) (common.Address, error) { +func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int) (common.Address, error) { client := l1Reader.Client() + + /// deploy eth based templates bridgeTemplate, tx, _, err := bridgegen.DeployBridge(auth, client) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client) + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) } - inboxTemplate, tx, _, err := bridgegen.DeployInbox(auth, client) + inboxTemplate, tx, _, err := bridgegen.DeployInbox(auth, client, maxDataSize) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("inbox deploy error: %w", err) @@ -93,16 +101,51 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("outbox deploy error: %w", err) } - bridgeCreatorAddr, tx, bridgeCreator, err := rollupgen.DeployBridgeCreator(auth, client) + ethBasedTemplates := rollupgen.BridgeCreatorBridgeContracts{ + Bridge: bridgeTemplate, + SequencerInbox: seqInboxTemplate, + Inbox: inboxTemplate, + RollupEventInbox: rollupEventBridgeTemplate, + Outbox: outboxTemplate, + } + + /// deploy ERC20 based templates + erc20BridgeTemplate, tx, _, err := bridgegen.DeployERC20Bridge(auth, client) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { - return common.Address{}, fmt.Errorf("bridge creator deploy error: %w", err) + return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) + } + + erc20InboxTemplate, tx, _, err := bridgegen.DeployERC20Inbox(auth, client, maxDataSize) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("inbox deploy error: %w", err) } - tx, err = bridgeCreator.UpdateTemplates(auth, bridgeTemplate, seqInboxTemplate, inboxTemplate, rollupEventBridgeTemplate, outboxTemplate) + erc20RollupEventBridgeTemplate, tx, _, err := rollupgen.DeployERC20RollupEventInbox(auth, client) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { - return common.Address{}, fmt.Errorf("bridge creator update templates error: %w", err) + return common.Address{}, fmt.Errorf("rollup event bridge deploy error: %w", err) + } + + erc20OutboxTemplate, tx, _, err := bridgegen.DeployERC20Outbox(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("outbox deploy error: %w", err) + } + + erc20BasedTemplates := rollupgen.BridgeCreatorBridgeContracts{ + Bridge: erc20BridgeTemplate, + SequencerInbox: seqInboxTemplate, + Inbox: erc20InboxTemplate, + RollupEventInbox: erc20RollupEventBridgeTemplate, + Outbox: erc20OutboxTemplate, + } + + bridgeCreatorAddr, tx, _, err := rollupgen.DeployBridgeCreator(auth, client, ethBasedTemplates, erc20BasedTemplates) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("bridge creator deploy error: %w", err) } return bridgeCreatorAddr, nil @@ -149,10 +192,10 @@ func deployChallengeFactory(ctx context.Context, l1Reader *headerreader.HeaderRe return ospEntryAddr, challengeManagerAddr, nil } -func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { - bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth) +func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { + bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth, maxDataSize) if err != nil { - return nil, common.Address{}, common.Address{}, common.Address{}, err + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("bridge creator deploy error: %w", err) } ospEntryAddr, challengeManagerAddr, err := deployChallengeFactory(ctx, l1Reader, auth) @@ -178,6 +221,12 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("rollup creator deploy error: %w", err) } + upgradeExecutor, tx, _, err := upgrade_executorgen.DeployUpgradeExecutor(auth, l1Reader.Client()) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("upgrade executor deploy error: %w", err) + } + validatorUtils, tx, _, err := rollupgen.DeployValidatorUtils(auth, l1Reader.Client()) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { @@ -190,6 +239,12 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("validator wallet creator deploy error: %w", err) } + l2FactoriesDeployHelper, tx, _, err := rollupgen.DeployDeployHelper(auth, l1Reader.Client()) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("deploy helper creator deploy error: %w", err) + } + tx, err = rollupCreator.SetTemplates( auth, bridgeCreator, @@ -197,8 +252,10 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade challengeManagerAddr, rollupAdminLogic, rollupUserLogic, + upgradeExecutor, validatorUtils, validatorWalletCreator, + l2FactoriesDeployHelper, ) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { @@ -235,31 +292,39 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com } } -func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, sequencer common.Address, authorizeValidators uint64, readerConfig headerreader.ConfigFetcher, config rollupgen.Config) (*chaininfo.RollupAddresses, error) { - l1Reader, err := headerreader.New(ctx, l1client, readerConfig) - if err != nil { - return nil, err - } - l1Reader.Start(ctx) - defer l1Reader.StopAndWait() - +func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address, maxDataSize *big.Int) (*chaininfo.RollupAddresses, error) { if config.WasmModuleRoot == (common.Hash{}) { return nil, errors.New("no machine specified") } - rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, l1Reader, deployAuth) + rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } + var validatorAddrs []common.Address + for i := uint64(1); i <= authorizeValidators; i++ { + validatorAddrs = append(validatorAddrs, crypto.CreateAddress(validatorWalletCreator, i)) + } + + deployParams := rollupgen.RollupCreatorRollupDeploymentParams{ + Config: config, + BatchPoster: batchPoster, + Validators: validatorAddrs, + MaxDataSize: maxDataSize, + NativeToken: nativeToken, + DeployFactoriesToL2: false, + MaxFeePerGasForRetryables: big.NewInt(0), // needed when utility factories are deployed + } + tx, err := rollupCreator.CreateRollup( deployAuth, - config, + deployParams, ) if err != nil { return nil, fmt.Errorf("error submitting create rollup tx: %w", err) } - receipt, err := l1Reader.WaitForTxApproval(tx).Await(ctx) + receipt, err := parentChainReader.WaitForTxApproval(tx).Await(ctx) if err != nil { return nil, fmt.Errorf("error executing create rollup tx: %w", err) } @@ -268,44 +333,14 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b return nil, fmt.Errorf("error parsing rollup created log: %w", err) } - sequencerInbox, err := bridgegen.NewSequencerInbox(info.SequencerInbox, l1client) - if err != nil { - return nil, fmt.Errorf("error getting sequencer inbox: %w", err) - } - - // if a zero sequencer address is specified, don't authorize any sequencers - if sequencer != (common.Address{}) { - tx, err = sequencerInbox.SetIsBatchPoster(deployAuth, sequencer, true) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return nil, fmt.Errorf("error setting is batch poster: %w", err) - } - } - - var allowValidators []bool - var validatorAddrs []common.Address - for i := uint64(1); i <= authorizeValidators; i++ { - validatorAddrs = append(validatorAddrs, crypto.CreateAddress(validatorWalletCreator, i)) - allowValidators = append(allowValidators, true) - } - if len(validatorAddrs) > 0 { - rollup, err := rollupgen.NewRollupAdminLogic(info.RollupAddress, l1client) - if err != nil { - return nil, fmt.Errorf("error getting rollup admin: %w", err) - } - tx, err = rollup.SetValidator(deployAuth, validatorAddrs, allowValidators) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return nil, fmt.Errorf("error setting validator: %w", err) - } - } - return &chaininfo.RollupAddresses{ Bridge: info.Bridge, Inbox: info.InboxAddress, SequencerInbox: info.SequencerInbox, DeployedAt: receipt.BlockNumber.Uint64(), Rollup: info.RollupAddress, + NativeToken: nativeToken, + UpgradeExecutor: info.UpgradeExecutor, ValidatorUtils: validatorUtils, ValidatorWalletCreator: validatorWalletCreator, }, nil @@ -328,19 +363,19 @@ var TestConfigConsensusRPC = ConfigConsensusRPC{ func ConsensusRPCAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".public", DefaultConfigConsensusRPC.Public, "consensus rpc is public") - f.Bool(prefix+".authenticated", DefaultConfigConsensusRPC.Public, "consensus rpc is authenticated") + f.Bool(prefix+".authenticated", DefaultConfigConsensusRPC.Authenticated, "consensus rpc is authenticated") } type Config struct { Sequencer bool `koanf:"sequencer"` - L1Reader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` + ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` InboxReader InboxReaderConfig `koanf:"inbox-reader" reload:"hot"` DelayedSequencer DelayedSequencerConfig `koanf:"delayed-sequencer" reload:"hot"` BatchPoster BatchPosterConfig `koanf:"batch-poster" reload:"hot"` MessagePruner MessagePrunerConfig `koanf:"message-pruner" reload:"hot"` BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` - Staker staker.L1ValidatorConfig `koanf:"staker"` + Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` @@ -349,11 +384,11 @@ type Config struct { ExecutionServer rpcclient.ClientConfig `koanf:"execution-server" reload:"hot"` ConsensusRPC ConfigConsensusRPC `koanf:"consensus-rpc"` Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` - ResourceManagement resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` + ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` } func (c *Config) Validate() error { - if c.L1Reader.Enable && c.Sequencer && !c.DelayedSequencer.Enable { + if c.ParentChainReader.Enable && c.Sequencer && !c.DelayedSequencer.Enable { log.Warn("delayed sequencer is not enabled, despite sequencer and l1 reader being enabled") } if c.DelayedSequencer.Enable && !c.Sequencer { @@ -411,7 +446,8 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed } var ConfigDefault = Config{ - L1Reader: headerreader.DefaultConfig, + Sequencer: false, + ParentChainReader: headerreader.DefaultConfig, InboxReader: DefaultInboxReaderConfig, DelayedSequencer: DefaultDelayedSequencerConfig, BatchPoster: DefaultBatchPosterConfig, @@ -425,6 +461,9 @@ var ConfigDefault = Config{ Dangerous: DefaultDangerousConfig, TransactionStreamer: DefaultTransactionStreamerConfig, ExecutionServer: rpcclient.DefaultClientConfig, + ConsensusRPC: DefaultConfigConsensusRPC, + Maintenance: DefaultMaintenanceConfig, + ResourceMgmt: resourcemanager.DefaultConfig, } func ConfigDefaultL1Test() *Config { @@ -433,14 +472,14 @@ func ConfigDefaultL1Test() *Config { config.BatchPoster = TestBatchPosterConfig config.SeqCoordinator = TestSeqCoordinatorConfig config.Sequencer = true - config.Dangerous.NoCoordinator = true + config.Dangerous.NoSequencerCoordinator = true return config } func ConfigDefaultL1NonSequencerTest() *Config { config := ConfigDefault - config.L1Reader = headerreader.TestConfig + config.ParentChainReader = headerreader.TestConfig config.InboxReader = TestInboxReaderConfig config.DelayedSequencer.Enable = false config.BatchPoster.Enable = false @@ -449,6 +488,7 @@ func ConfigDefaultL1NonSequencerTest() *Config { config.SyncMonitor = TestSyncMonitorConfig config.ConsensusRPC = TestConfigConsensusRPC config.ExecutionServer = rpcclient.TestClientConfig + config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServer.URL = "" @@ -457,12 +497,13 @@ func ConfigDefaultL1NonSequencerTest() *Config { func ConfigDefaultL2Test() *Config { config := ConfigDefault - config.L1Reader.Enable = false + config.ParentChainReader.Enable = false config.SeqCoordinator = TestSeqCoordinatorConfig - config.Feed.Input.Verifier.Dangerous.AcceptMissing = true + config.Feed.Input.Verify.Dangerous.AcceptMissing = true config.Feed.Output.Signed = false - config.SeqCoordinator.Signing.ECDSA.AcceptSequencer = false - config.SeqCoordinator.Signing.ECDSA.Dangerous.AcceptMissing = true + config.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false + config.SeqCoordinator.Signer.ECDSA.Dangerous.AcceptMissing = true + config.Staker = staker.TestL1ValidatorConfig config.SyncMonitor = TestSyncMonitorConfig config.ConsensusRPC = TestConfigConsensusRPC config.ExecutionServer = rpcclient.TestClientConfig @@ -474,18 +515,18 @@ func ConfigDefaultL2Test() *Config { } type DangerousConfig struct { - NoL1Listener bool `koanf:"no-l1-listener"` - NoCoordinator bool `koanf:"no-seq-coordinator"` + NoL1Listener bool `koanf:"no-l1-listener"` + NoSequencerCoordinator bool `koanf:"no-sequencer-coordinator"` } var DefaultDangerousConfig = DangerousConfig{ - NoL1Listener: false, - NoCoordinator: false, + NoL1Listener: false, + NoSequencerCoordinator: false, } func DangerousConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".no-l1-listener", DefaultDangerousConfig.NoL1Listener, "DANGEROUS! disables listening to L1. To be used in test nodes only") - f.Bool(prefix+".no-seq-coordinator", DefaultDangerousConfig.NoCoordinator, "DANGEROUS! allows sequencing without sequencer-coordinator") + f.Bool(prefix+".no-sequencer-coordinator", DefaultDangerousConfig.NoSequencerCoordinator, "DANGEROUS! allows sequencing without sequencer-coordinator") } type Node struct { @@ -560,6 +601,45 @@ func checkArbDbSchemaVersion(arbDb ethdb.Database) error { return nil } +func StakerDataposter( + ctx context.Context, db ethdb.Database, l1Reader *headerreader.HeaderReader, + transactOpts *bind.TransactOpts, cfgFetcher ConfigFetcher, syncMonitor *SyncMonitor, +) (*dataposter.DataPoster, error) { + if transactOpts == nil { + return nil, nil + } + cfg := cfgFetcher.Get() + mdRetriever := func(ctx context.Context, blockNum *big.Int) ([]byte, error) { + return nil, nil + } + redisC, err := redisutil.RedisClientFromURL(cfg.Staker.RedisUrl) + if err != nil { + return nil, fmt.Errorf("creating redis client from url: %w", err) + } + lockCfgFetcher := func() *redislock.SimpleCfg { + return &cfg.Staker.RedisLock + } + redisLock, err := redislock.NewSimple(redisC, lockCfgFetcher, func() bool { return syncMonitor.Synced() }) + if err != nil { + return nil, err + } + dpCfg := func() *dataposter.DataPosterConfig { + return &cfg.Staker.DataPoster + } + return dataposter.NewDataPoster(ctx, + &dataposter.DataPosterOpts{ + Database: db, + HeaderReader: l1Reader, + Auth: transactOpts, + RedisClient: redisC, + RedisLock: redisLock, + Config: dpCfg, + MetadataRetriever: mdRetriever, + // transactOpts is non-nil, it's checked at the beginning. + RedisKey: transactOpts.From.String() + ".staker-data-poster.queue", + }) +} + func createNodeImpl( ctx context.Context, stack *node.Node, @@ -589,8 +669,9 @@ func createNodeImpl( syncMonitor := NewSyncMonitor(syncConfigFetcher) var l1Reader *headerreader.HeaderReader - if config.L1Reader.Enable { - l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().L1Reader }) + if config.ParentChainReader.Enable { + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }, arbSys) if err != nil { return nil, err } @@ -614,7 +695,7 @@ func createNodeImpl( return nil, err } var coordinator *SeqCoordinator - var bpVerifier *contracts.BatchPosterVerifier + var bpVerifier *contracts.AddressVerifier if deployInfo != nil && l1client != nil { sequencerInboxAddr := deployInfo.SequencerInbox @@ -622,7 +703,7 @@ func createNodeImpl( if err != nil { return nil, err } - bpVerifier = contracts.NewBatchPosterVerifier(seqInboxCaller) + bpVerifier = contracts.NewAddressVerifier(seqInboxCaller) } if config.SeqCoordinator.Enable { @@ -630,8 +711,8 @@ func createNodeImpl( if err != nil { return nil, err } - } else if config.Sequencer && !config.Dangerous.NoCoordinator { - return nil, errors.New("sequencer must be enabled with coordinator, unless dangerous.no-coordinator set") + } else if config.Sequencer && !config.Dangerous.NoSequencerCoordinator { + return nil, errors.New("sequencer must be enabled with coordinator, unless dangerous.no-sequencer-coordinator set") } dbs := []ethdb.Database{arbDb} maintenanceRunner, err := NewMaintenanceRunner(func() *MaintenanceConfig { return &configFetcher.Get().Maintenance }, coordinator, dbs, exec) @@ -660,30 +741,30 @@ func createNodeImpl( } } - if !config.L1Reader.Enable { + if !config.ParentChainReader.Enable { return &Node{ - arbDb, - stack, - exec, - nil, - txStreamer, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - broadcastServer, - broadcastClients, - coordinator, - maintenanceRunner, - nil, - syncMonitor, - configFetcher, - ctx, + ArbDB: arbDb, + Stack: stack, + Execution: exec, + L1Reader: nil, + TxStreamer: txStreamer, + DeployInfo: nil, + InboxReader: nil, + InboxTracker: nil, + DelayedSequencer: nil, + BatchPoster: nil, + MessagePruner: nil, + BlockValidator: nil, + StatelessBlockValidator: nil, + Staker: nil, + BroadcastServer: broadcastServer, + BroadcastClients: broadcastClients, + SeqCoordinator: coordinator, + MaintenanceRunner: maintenanceRunner, + DASLifecycleManager: nil, + SyncMonitor: syncMonitor, + configFetcher: configFetcher, + ctx: ctx, }, nil } @@ -744,7 +825,7 @@ func createNodeImpl( inboxTracker, txStreamer, exec, - rawdb.NewTable(arbDb, BlockValidatorPrefix), + rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), daReader, func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator }, stack, @@ -778,44 +859,59 @@ func createNodeImpl( var messagePruner *MessagePruner if config.Staker.Enable { - var wallet staker.ValidatorWalletInterface - if config.Staker.UseSmartContractWallet || txOptsValidator == nil { - var existingWalletAddress *common.Address - if len(config.Staker.ContractWalletAddress) > 0 { - if !common.IsHexAddress(config.Staker.ContractWalletAddress) { - log.Error("invalid validator smart contract wallet", "addr", config.Staker.ContractWalletAddress) - return nil, errors.New("invalid validator smart contract wallet address") + dp, err := StakerDataposter( + ctx, + rawdb.NewTable(arbDb, storage.StakerPrefix), + l1Reader, + txOptsValidator, + configFetcher, + syncMonitor, + ) + if err != nil { + return nil, err + } + getExtraGas := func() uint64 { return configFetcher.Get().Staker.ExtraGas } + // TODO: factor this out into separate helper, and split rest of node + // creation into multiple helpers. + var wallet staker.ValidatorWalletInterface = validatorwallet.NewNoOp(l1client, deployInfo.Rollup) + if !strings.EqualFold(config.Staker.Strategy, "watchtower") { + if config.Staker.UseSmartContractWallet || txOptsValidator == nil { + var existingWalletAddress *common.Address + if len(config.Staker.ContractWalletAddress) > 0 { + if !common.IsHexAddress(config.Staker.ContractWalletAddress) { + log.Error("invalid validator smart contract wallet", "addr", config.Staker.ContractWalletAddress) + return nil, errors.New("invalid validator smart contract wallet address") + } + tmpAddress := common.HexToAddress(config.Staker.ContractWalletAddress) + existingWalletAddress = &tmpAddress + } + wallet, err = validatorwallet.NewContract(dp, existingWalletAddress, deployInfo.ValidatorWalletCreator, deployInfo.Rollup, l1Reader, txOptsValidator, int64(deployInfo.DeployedAt), func(common.Address) {}, getExtraGas) + if err != nil { + return nil, err + } + } else { + if len(config.Staker.ContractWalletAddress) > 0 { + return nil, errors.New("validator contract wallet specified but flag to use a smart contract wallet was not specified") + } + wallet, err = validatorwallet.NewEOA(dp, deployInfo.Rollup, l1client, txOptsValidator, getExtraGas) + if err != nil { + return nil, err } - tmpAddress := common.HexToAddress(config.Staker.ContractWalletAddress) - existingWalletAddress = &tmpAddress - } - wallet, err = staker.NewContractValidatorWallet(existingWalletAddress, deployInfo.ValidatorWalletCreator, deployInfo.Rollup, l1Reader, txOptsValidator, int64(deployInfo.DeployedAt), func(common.Address) {}) - if err != nil { - return nil, err - } - } else { - if len(config.Staker.ContractWalletAddress) > 0 { - return nil, errors.New("validator contract wallet specified but flag to use a smart contract wallet was not specified") - } - wallet, err = staker.NewEoaValidatorWallet(deployInfo.Rollup, l1client, txOptsValidator) - if err != nil { - return nil, err } } - notifiers := make([]staker.LatestStakedNotifier, 0) + var confirmedNotifiers []staker.LatestConfirmedNotifier if config.MessagePruner.Enable { messagePruner = NewMessagePruner(txStreamer, inboxTracker, func() *MessagePrunerConfig { return &configFetcher.Get().MessagePruner }) - notifiers = append(notifiers, messagePruner) + confirmedNotifiers = append(confirmedNotifiers, messagePruner) } - stakerObj, err = staker.NewStaker(l1Reader, wallet, bind.CallOpts{}, config.Staker, blockValidator, statelessBlockValidator, notifiers, deployInfo.ValidatorUtils, fatalErrChan) + stakerObj, err = staker.NewStaker(l1Reader, wallet, bind.CallOpts{}, config.Staker, blockValidator, statelessBlockValidator, nil, confirmedNotifiers, deployInfo.ValidatorUtils, fatalErrChan) if err != nil { return nil, err } - if stakerObj.Strategy() != staker.WatchtowerStrategy { - err := wallet.Initialize(ctx) - if err != nil { + if stakerObj.Strategy() == staker.WatchtowerStrategy { + if err := wallet.Initialize(ctx); err != nil { return nil, err } } @@ -836,7 +932,17 @@ func createNodeImpl( if txOptsBatchPoster == nil { return nil, errors.New("batchposter, but no TxOpts") } - batchPoster, err = NewBatchPoster(l1Reader, inboxTracker, txStreamer, syncMonitor, func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, deployInfo, txOptsBatchPoster, daWriter) + batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{ + DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix), + L1Reader: l1Reader, + Inbox: inboxTracker, + Streamer: txStreamer, + SyncMonitor: syncMonitor, + Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, + DeployInfo: deployInfo, + TransactOpts: txOptsBatchPoster, + DAWriter: daWriter, + }) if err != nil { return nil, err } @@ -849,28 +955,28 @@ func createNodeImpl( } return &Node{ - arbDb, - stack, - exec, - l1Reader, - txStreamer, - deployInfo, - inboxReader, - inboxTracker, - delayedSequencer, - batchPoster, - messagePruner, - blockValidator, - statelessBlockValidator, - stakerObj, - broadcastServer, - broadcastClients, - coordinator, - maintenanceRunner, - dasLifecycleManager, - syncMonitor, - configFetcher, - ctx, + ArbDB: arbDb, + Stack: stack, + Execution: exec, + L1Reader: l1Reader, + TxStreamer: txStreamer, + DeployInfo: deployInfo, + InboxReader: inboxReader, + InboxTracker: inboxTracker, + DelayedSequencer: delayedSequencer, + BatchPoster: batchPoster, + MessagePruner: messagePruner, + BlockValidator: blockValidator, + StatelessBlockValidator: statelessBlockValidator, + Staker: stakerObj, + BroadcastServer: broadcastServer, + BroadcastClients: broadcastClients, + SeqCoordinator: coordinator, + MaintenanceRunner: maintenanceRunner, + DASLifecycleManager: dasLifecycleManager, + SyncMonitor: syncMonitor, + configFetcher: configFetcher, + ctx: ctx, }, nil } @@ -933,10 +1039,6 @@ func CreateNode( func (n *Node) Start(ctx context.Context) error { // config is the static config at start, not a dynamic config config := n.configFetcher.Get() - execClient, ok := n.Execution.(*execclient.Client) - if !ok { - execClient = nil - } gethExec, ok := n.Execution.(*gethexec.ExecutionNode) if !ok { gethExec = nil @@ -956,14 +1058,8 @@ func (n *Node) Start(ctx context.Context) error { if err != nil { return fmt.Errorf("error starting geth stack: %w", err) } - if gethExec != nil { - err := gethExec.Start(ctx) - if err != nil { - return fmt.Errorf("error starting exec client: %w", err) - } - } - if execClient != nil { - err := execClient.Start(ctx) + if n.Execution != nil { + err := n.Execution.Start(ctx) if err != nil { return fmt.Errorf("error starting exec client: %w", err) } @@ -1072,12 +1168,8 @@ func (n *Node) Start(ctx context.Context) error { } func (n *Node) StopAndWait() { - gethExec, ok := n.Execution.(*gethexec.ExecutionNode) - if !ok { - gethExec = nil - } - if gethExec != nil { - gethExec.StopAndWait() + if n.Execution != nil { + n.Execution.StopAndWait() } if n.MaintenanceRunner != nil && n.MaintenanceRunner.Started() { n.MaintenanceRunner.StopAndWait() @@ -1141,8 +1233,8 @@ func (n *Node) FetchBatch(batchNum uint64) containers.PromiseInterface[[]byte] { return n.InboxReader.GetSequencerMessageBytes(batchNum) } -func (n *Node) FindL1BatchForMessage(message arbutil.MessageIndex) containers.PromiseInterface[uint64] { - return containers.NewReadyPromise[uint64](n.InboxTracker.FindL1BatchForMessage(message)) +func (n *Node) FindInboxBatchContainingMessage(message arbutil.MessageIndex) containers.PromiseInterface[uint64] { + return containers.NewReadyPromise[uint64](n.InboxTracker.FindInboxBatchContainingMessage(message)) } func (n *Node) GetBatchParentChainBlock(seqNum uint64) containers.PromiseInterface[uint64] { diff --git a/arbnode/simple_redis_lock.go b/arbnode/redislock/redis.go similarity index 70% rename from arbnode/simple_redis_lock.go rename to arbnode/redislock/redis.go index f6f37cc42d..c8252e059f 100644 --- a/arbnode/simple_redis_lock.go +++ b/arbnode/redislock/redis.go @@ -1,4 +1,4 @@ -package arbnode +package redislock import ( "context" @@ -8,6 +8,7 @@ import ( "math/big" "strconv" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/log" @@ -16,10 +17,10 @@ import ( flag "github.com/spf13/pflag" ) -type SimpleRedisLock struct { +type Simple struct { stopwaiter.StopWaiter client redis.UniversalClient - config SimpleRedisLockConfigFetcher + config SimpleCfgFetcher lockedUntil int64 mutex sync.Mutex stopping bool @@ -27,7 +28,7 @@ type SimpleRedisLock struct { myId string } -type SimpleRedisLockConfig struct { +type SimpleCfg struct { MyId string `koanf:"my-id"` LockoutDuration time.Duration `koanf:"lockout-duration" reload:"hot"` RefreshDuration time.Duration `koanf:"refresh-duration" reload:"hot"` @@ -35,22 +36,22 @@ type SimpleRedisLockConfig struct { BackgroundLock bool `koanf:"background-lock"` } -type SimpleRedisLockConfigFetcher func() *SimpleRedisLockConfig +type SimpleCfgFetcher func() *SimpleCfg -func RedisLockConfigAddOptions(prefix string, f *flag.FlagSet) { +func AddConfigOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".my-id", "", "this node's id prefix when acquiring the lock (optional)") - f.Duration(prefix+".lockout-duration", DefaultRedisLockConfig.LockoutDuration, "how long lock is held") - f.Duration(prefix+".refresh-duration", DefaultRedisLockConfig.RefreshDuration, "how long between consecutive calls to redis") - f.String(prefix+".key", prefix+".simple-lock-key", "key for lock") - f.Bool(prefix+".background-lock", DefaultRedisLockConfig.BackgroundLock, "should node always try grabing lock in background") + f.Duration(prefix+".lockout-duration", DefaultCfg.LockoutDuration, "how long lock is held") + f.Duration(prefix+".refresh-duration", DefaultCfg.RefreshDuration, "how long between consecutive calls to redis") + f.String(prefix+".key", DefaultCfg.Key, "key for lock") + f.Bool(prefix+".background-lock", DefaultCfg.BackgroundLock, "should node always try grabing lock in background") } -func NewSimpleRedisLock(client redis.UniversalClient, config SimpleRedisLockConfigFetcher, readyToLock func() bool) (*SimpleRedisLock, error) { +func NewSimple(client redis.UniversalClient, config SimpleCfgFetcher, readyToLock func() bool) (*Simple, error) { randBig, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) if err != nil { return nil, err } - return &SimpleRedisLock{ + return &Simple{ myId: config().MyId + "-" + strconv.FormatInt(randBig.Int64(), 16), // unique even if config is not client: client, config: config, @@ -58,14 +59,14 @@ func NewSimpleRedisLock(client redis.UniversalClient, config SimpleRedisLockConf }, nil } -var DefaultRedisLockConfig = SimpleRedisLockConfig{ +var DefaultCfg = SimpleCfg{ LockoutDuration: time.Minute, RefreshDuration: time.Second * 10, Key: "", BackgroundLock: false, } -func (l *SimpleRedisLock) attemptLock(ctx context.Context) (bool, error) { +func (l *Simple) attemptLock(ctx context.Context) (bool, error) { l.mutex.Lock() defer l.mutex.Unlock() if l.stopping || l.client == nil { @@ -120,7 +121,7 @@ func (l *SimpleRedisLock) attemptLock(ctx context.Context) (bool, error) { return gotLock, nil } -func (l *SimpleRedisLock) AttemptLock(ctx context.Context) bool { +func (l *Simple) AttemptLock(ctx context.Context) bool { if l.Locked() { return true } @@ -135,14 +136,14 @@ func (l *SimpleRedisLock) AttemptLock(ctx context.Context) bool { return res } -func (l *SimpleRedisLock) Locked() bool { +func (l *Simple) Locked() bool { if l.client == nil { return true } return time.Now().Before(atomicTimeRead(&l.lockedUntil)) } -func (l *SimpleRedisLock) Release(ctx context.Context) { +func (l *Simple) Release(ctx context.Context) { l.mutex.Lock() defer l.mutex.Unlock() @@ -179,7 +180,7 @@ func (l *SimpleRedisLock) Release(ctx context.Context) { } } -func (l *SimpleRedisLock) Start(ctxin context.Context) { +func (l *Simple) Start(ctxin context.Context) { l.StopWaiter.Start(ctxin, l) if l.config().BackgroundLock && l.client != nil { l.CallIteratively(func(ctx context.Context) time.Duration { @@ -192,10 +193,34 @@ func (l *SimpleRedisLock) Start(ctxin context.Context) { } } -func (l *SimpleRedisLock) StopAndWait() { +func (l *Simple) StopAndWait() { l.mutex.Lock() l.stopping = true l.mutex.Unlock() l.Release(l.GetContext()) l.StopWaiter.StopAndWait() } + +func execTestPipe(pipe redis.Pipeliner, ctx context.Context) error { + cmders, err := pipe.Exec(ctx) + if err != nil { + return err + } + for _, cmder := range cmders { + if err := cmder.Err(); err != nil { + return err + } + } + return nil +} + +// notice: It is possible for two consecutive reads to get decreasing values. That shouldn't matter. +func atomicTimeRead(addr *int64) time.Time { + asint64 := atomic.LoadInt64(addr) + return time.UnixMilli(asint64) +} + +func atomicTimeWrite(addr *int64, t time.Time) { + asint64 := t.UnixMilli() + atomic.StoreInt64(addr, asint64) +} diff --git a/arbnode/resourcemanager/resource_management.go b/arbnode/resourcemanager/resource_management.go index acb5355987..cb1ae9d6ea 100644 --- a/arbnode/resourcemanager/resource_management.go +++ b/arbnode/resourcemanager/resource_management.go @@ -11,6 +11,7 @@ import ( "os" "regexp" "strconv" + "strings" "time" "github.com/ethereum/go-ethereum/log" @@ -23,6 +24,9 @@ var ( limitCheckDurationHistogram = metrics.NewRegisteredHistogram("arb/rpc/limitcheck/duration", nil, metrics.NewBoundedHistogramSample()) limitCheckSuccessCounter = metrics.NewRegisteredCounter("arb/rpc/limitcheck/success", nil) limitCheckFailureCounter = metrics.NewRegisteredCounter("arb/rpc/limitcheck/failure", nil) + nitroMemLimit = metrics.GetOrRegisterGauge("arb/memory/limit", nil) + nitroMemUsage = metrics.GetOrRegisterGauge("arb/memory/usage", nil) + errNotSupported = errors.New("not supported") ) // Init adds the resource manager's httpServer to a custom hook in geth. @@ -30,30 +34,71 @@ var ( // prior to RPC request handling. // // Must be run before the go-ethereum stack is set up (ethereum/go-ethereum/node.New). -func Init(conf *Config) { - if conf.MemoryLimitPercent > 0 { - node.WrapHTTPHandler = func(srv http.Handler) (http.Handler, error) { - return newHttpServer(srv, newLimitChecker(conf)), nil +func Init(conf *Config) error { + if conf.MemFreeLimit == "" { + return nil + } + + limit, err := parseMemLimit(conf.MemFreeLimit) + if err != nil { + return err + } + + node.WrapHTTPHandler = func(srv http.Handler) (http.Handler, error) { + var c limitChecker + c, err := newCgroupsMemoryLimitCheckerIfSupported(limit) + if errors.Is(err, errNotSupported) { + log.Error("No method for determining memory usage and limits was discovered, disabled memory limit RPC throttling") + c = &trivialLimitChecker{} } + + return newHttpServer(srv, c), nil + } + return nil +} + +func parseMemLimit(limitStr string) (int, error) { + var ( + limit int = 1 + s string + ) + if _, err := fmt.Sscanf(limitStr, "%d%s", &limit, &s); err != nil { + return 0, err + } + + switch strings.ToUpper(s) { + case "K", "KB": + limit <<= 10 + case "M", "MB": + limit <<= 20 + case "G", "GB": + limit <<= 30 + case "T", "TB": + limit <<= 40 + case "B": + default: + return 0, fmt.Errorf("unsupported memory limit suffix string %s", s) } + + return limit, nil } // Config contains the configuration for resourcemanager functionality. // Currently only a memory limit is supported, other limits may be added // in the future. type Config struct { - MemoryLimitPercent int `koanf:"mem-limit-percent" reload:"hot"` + MemFreeLimit string `koanf:"mem-free-limit" reload:"hot"` } // DefaultConfig has the defaul resourcemanager configuration, // all limits are disabled. var DefaultConfig = Config{ - MemoryLimitPercent: 0, + MemFreeLimit: "", } // ConfigAddOptions adds the configuration options for resourcemanager. func ConfigAddOptions(prefix string, f *pflag.FlagSet) { - f.Int(prefix+".mem-limit-percent", DefaultConfig.MemoryLimitPercent, "RPC calls are throttled if system memory utilization exceeds this percent value, zero (default) is disabled") + f.String(prefix+".mem-free-limit", DefaultConfig.MemFreeLimit, "RPC calls are throttled if free system memory excluding the page cache is below this amount, expressed in bytes or multiples of bytes with suffix B, K, M, G. The limit should be set such that sufficient free memory is left for the page cache in order for the system to be performant") } // httpServer implements http.Handler and wraps calls to inner with a resource @@ -74,7 +119,7 @@ func (s *httpServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { exceeded, err := s.c.isLimitExceeded() limitCheckDurationHistogram.Update(time.Since(start).Nanoseconds()) if err != nil { - log.Error("Error checking memory limit", "err", err, "checker", s.c) + log.Error("Error checking memory limit", "err", err, "checker", s.c.String()) } else if exceeded { http.Error(w, "Too many requests", http.StatusTooManyRequests) limitCheckFailureCounter.Inc(1) @@ -90,20 +135,27 @@ type limitChecker interface { String() string } -// newLimitChecker attempts to auto-discover the mechanism by which it -// can check system limits. Currently Cgroups V1 is supported, -// with Cgroups V2 likely to be implmemented next. If no supported -// mechanism is discovered, it logs an error and fails open, ie -// it creates a trivialLimitChecker that does no checks. -func newLimitChecker(conf *Config) limitChecker { - c := newCgroupsV1MemoryLimitChecker(DefaultCgroupsV1MemoryDirectory, conf.MemoryLimitPercent) +func isSupported(c limitChecker) bool { + _, err := c.isLimitExceeded() + return err == nil +} + +// newCgroupsMemoryLimitCheckerIfSupported attempts to auto-discover whether +// Cgroups V1 or V2 is supported for checking system memory limits. +func newCgroupsMemoryLimitCheckerIfSupported(memLimitBytes int) (*cgroupsMemoryLimitChecker, error) { + c := newCgroupsMemoryLimitChecker(cgroupsV1MemoryFiles, memLimitBytes) if isSupported(c) { log.Info("Cgroups v1 detected, enabling memory limit RPC throttling") - return c + return c, nil + } + + c = newCgroupsMemoryLimitChecker(cgroupsV2MemoryFiles, memLimitBytes) + if isSupported(c) { + log.Info("Cgroups v2 detected, enabling memory limit RPC throttling") + return c, nil } - log.Error("No method for determining memory usage and limits was discovered, disabled memory limit RPC throttling") - return &trivialLimitChecker{} + return nil, errNotSupported } // trivialLimitChecker checks no limits, so its limits are never exceeded. @@ -115,52 +167,88 @@ func (_ trivialLimitChecker) isLimitExceeded() (bool, error) { func (_ trivialLimitChecker) String() string { return "trivial" } -const DefaultCgroupsV1MemoryDirectory = "/sys/fs/cgroup/memory/" +type cgroupsMemoryFiles struct { + limitFile, usageFile, statsFile string + activeRe, inactiveRe *regexp.Regexp +} -type cgroupsV1MemoryLimitChecker struct { - cgroupDir string - memoryLimitPercent int +const defaultCgroupsV1MemoryDirectory = "/sys/fs/cgroup/memory/" +const defaultCgroupsV2MemoryDirectory = "/sys/fs/cgroup/" - limitFile, usageFile, statsFile string +var cgroupsV1MemoryFiles = cgroupsMemoryFiles{ + limitFile: defaultCgroupsV1MemoryDirectory + "/memory.limit_in_bytes", + usageFile: defaultCgroupsV1MemoryDirectory + "/memory.usage_in_bytes", + statsFile: defaultCgroupsV1MemoryDirectory + "/memory.stat", + activeRe: regexp.MustCompile(`^total_active_file (\d+)`), + inactiveRe: regexp.MustCompile(`^total_inactive_file (\d+)`), +} +var cgroupsV2MemoryFiles = cgroupsMemoryFiles{ + limitFile: defaultCgroupsV2MemoryDirectory + "/memory.max", + usageFile: defaultCgroupsV2MemoryDirectory + "/memory.current", + statsFile: defaultCgroupsV2MemoryDirectory + "/memory.stat", + activeRe: regexp.MustCompile(`^active_file (\d+)`), + inactiveRe: regexp.MustCompile(`^inactive_file (\d+)`), } -func newCgroupsV1MemoryLimitChecker(cgroupDir string, memoryLimitPercent int) *cgroupsV1MemoryLimitChecker { - return &cgroupsV1MemoryLimitChecker{ - cgroupDir: cgroupDir, - memoryLimitPercent: memoryLimitPercent, - limitFile: cgroupDir + "/memory.limit_in_bytes", - usageFile: cgroupDir + "/memory.usage_in_bytes", - statsFile: cgroupDir + "/memory.stat", - } +type cgroupsMemoryLimitChecker struct { + files cgroupsMemoryFiles + memLimitBytes int } -func isSupported(c limitChecker) bool { - _, err := c.isLimitExceeded() - return err == nil +func newCgroupsMemoryLimitChecker(files cgroupsMemoryFiles, memLimitBytes int) *cgroupsMemoryLimitChecker { + return &cgroupsMemoryLimitChecker{ + files: files, + memLimitBytes: memLimitBytes, + } } -// isLimitExceeded checks if the system memory used exceeds the limit -// scaled by the configured memoryLimitPercent. +// isLimitExceeded checks if the system memory free is less than the limit. +// It returns true if the limit is exceeded. // -// See the following page for details of calculating the memory used, -// which is reported as container_memory_working_set_bytes in prometheus: +// container_memory_working_set_bytes in prometheus is calculated as +// memory.usage_in_bytes - inactive page cache bytes, see // https://mihai-albert.com/2022/02/13/out-of-memory-oom-in-kubernetes-part-3-memory-metrics-sources-and-tools-to-collect-them/ -func (c *cgroupsV1MemoryLimitChecker) isLimitExceeded() (bool, error) { - var limit, usage, inactive int +// This metric is used by kubernetes to report memory in use by the pod, +// but memory.usage_in_bytes also includes the active page cache, which +// can be evicted by the kernel when more memory is needed, see +// https://github.com/kubernetes/kubernetes/issues/43916 +// The kernel cannot be guaranteed to move a page from a file from +// active to inactive even when the file is closed, or Nitro is exited. +// For larger chains, Nitro's page cache can grow quite large due to +// the large amount of state that is randomly accessed from disk as each +// block is added. So in checking the limit we also include the active +// page cache. +// +// The limit should be set such that the system has a reasonable amount of +// free memory for the page cache, to avoid cache thrashing on chain state +// access. How much "reasonable" is will depend on access patterns, state +// size, and your application's tolerance for latency. +func (c *cgroupsMemoryLimitChecker) isLimitExceeded() (bool, error) { + var limit, usage, active, inactive int var err error - limit, err = readIntFromFile(c.limitFile) - if err != nil { + if limit, err = readIntFromFile(c.files.limitFile); err != nil { return false, err } - usage, err = readIntFromFile(c.usageFile) - if err != nil { + if usage, err = readIntFromFile(c.files.usageFile); err != nil { return false, err } - inactive, err = readInactive(c.statsFile) - if err != nil { + if active, err = readFromMemStats(c.files.statsFile, c.files.activeRe); err != nil { + return false, err + } + if inactive, err = readFromMemStats(c.files.statsFile, c.files.inactiveRe); err != nil { return false, err } - return usage-inactive >= ((limit * c.memoryLimitPercent) / 100), nil + + memLimit := limit - c.memLimitBytes + memUsage := usage - (active + inactive) + nitroMemLimit.Update(int64(memLimit)) + nitroMemUsage.Update(int64(memUsage)) + + return memUsage >= memLimit, nil +} + +func (c cgroupsMemoryLimitChecker) String() string { + return "CgroupsMemoryLimitChecker" } func readIntFromFile(fileName string) (int, error) { @@ -176,9 +264,7 @@ func readIntFromFile(fileName string) (int, error) { return limit, nil } -var re = regexp.MustCompile(`total_inactive_file (\d+)`) - -func readInactive(fileName string) (int, error) { +func readFromMemStats(fileName string, re *regexp.Regexp) (int, error) { file, err := os.Open(fileName) if err != nil { return 0, err @@ -201,7 +287,3 @@ func readInactive(fileName string) (int, error) { return 0, errors.New("total_inactive_file not found in " + fileName) } - -func (c cgroupsV1MemoryLimitChecker) String() string { - return "CgroupsV1MemoryLimitChecker" -} diff --git a/arbnode/resourcemanager/resource_management_test.go b/arbnode/resourcemanager/resource_management_test.go index fe470e706b..4f52ad017e 100644 --- a/arbnode/resourcemanager/resource_management_test.go +++ b/arbnode/resourcemanager/resource_management_test.go @@ -6,73 +6,140 @@ package resourcemanager import ( "fmt" "os" + "regexp" "testing" ) -func updateFakeCgroupv1Files(c *cgroupsV1MemoryLimitChecker, limit, usage, inactive int) error { - limitFile, err := os.Create(c.limitFile) +func updateFakeCgroupFiles(c *cgroupsMemoryLimitChecker, limit, usage, inactive, active int) error { + limitFile, err := os.Create(c.files.limitFile) if err != nil { return err } - _, err = fmt.Fprintf(limitFile, "%d\n", limit) - if err != nil { + if _, err = fmt.Fprintf(limitFile, "%d\n", limit); err != nil { return err } - usageFile, err := os.Create(c.usageFile) + usageFile, err := os.Create(c.files.usageFile) if err != nil { return err } - _, err = fmt.Fprintf(usageFile, "%d\n", usage) - if err != nil { + if _, err = fmt.Fprintf(usageFile, "%d\n", usage); err != nil { return err } - statsFile, err := os.Create(c.statsFile) + statsFile, err := os.Create(c.files.statsFile) if err != nil { return err } _, err = fmt.Fprintf(statsFile, `total_cache 1029980160 total_rss 1016209408 total_inactive_file %d -total_active_file 321544192 -`, inactive) - if err != nil { - return err - } - return nil +total_active_file %d +`, inactive, active) + return err } -func TestCgroupsv1MemoryLimit(t *testing.T) { - cgroupDir := t.TempDir() - c := newCgroupsV1MemoryLimitChecker(cgroupDir, 95) - _, err := c.isLimitExceeded() - if err == nil { - t.Error("Should fail open if can't read files") +func makeCgroupsTestDir(cgroupDir string) cgroupsMemoryFiles { + return cgroupsMemoryFiles{ + limitFile: cgroupDir + "/memory.limit_in_bytes", + usageFile: cgroupDir + "/memory.usage_in_bytes", + statsFile: cgroupDir + "/memory.stat", + activeRe: regexp.MustCompile(`^total_active_file (\d+)`), + inactiveRe: regexp.MustCompile(`^total_inactive_file (\d+)`), } +} - err = updateFakeCgroupv1Files(c, 1000, 1000, 51) - if err != nil { - t.Error(err) - } - exceeded, err := c.isLimitExceeded() - if err != nil { - t.Error(err) - } - if exceeded { - t.Error("Expected under limit") +func TestCgroupsFailIfCantOpen(t *testing.T) { + testFiles := makeCgroupsTestDir(t.TempDir()) + c := newCgroupsMemoryLimitChecker(testFiles, 1024*1024*512) + if _, err := c.isLimitExceeded(); err == nil { + t.Fatal("Should fail open if can't read files") } +} - err = updateFakeCgroupv1Files(c, 1000, 1000, 50) - if err != nil { - t.Error(err) +func TestCgroupsMemoryLimit(t *testing.T) { + for _, tc := range []struct { + desc string + sysLimit int + inactive int + active int + usage int + memLimit string + want bool + }{ + { + desc: "limit should be exceeded", + sysLimit: 1000, + inactive: 50, + active: 25, + usage: 1000, + memLimit: "75B", + want: true, + }, + { + desc: "limit should not be exceeded", + sysLimit: 1000, + inactive: 51, + active: 25, + usage: 1000, + memLimit: "75b", + want: false, + }, + { + desc: "limit (MB) should be exceeded", + sysLimit: 1000 * 1024 * 1024, + inactive: 50 * 1024 * 1024, + active: 25 * 1024 * 1024, + usage: 1000 * 1024 * 1024, + memLimit: "75MB", + want: true, + }, + { + desc: "limit (MB) should not be exceeded", + sysLimit: 1000 * 1024 * 1024, + inactive: 1 + 50*1024*1024, + active: 25 * 1024 * 1024, + usage: 1000 * 1024 * 1024, + memLimit: "75m", + want: false, + }, + { + desc: "limit (GB) should be exceeded", + sysLimit: 1000 * 1024 * 1024 * 1024, + inactive: 50 * 1024 * 1024 * 1024, + active: 25 * 1024 * 1024 * 1024, + usage: 1000 * 1024 * 1024 * 1024, + memLimit: "75G", + want: true, + }, + { + desc: "limit (GB) should not be exceeded", + sysLimit: 1000 * 1024 * 1024 * 1024, + inactive: 1 + 50*1024*1024*1024, + active: 25 * 1024 * 1024 * 1024, + usage: 1000 * 1024 * 1024 * 1024, + memLimit: "75gb", + want: false, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + testFiles := makeCgroupsTestDir(t.TempDir()) + memLimit, err := parseMemLimit(tc.memLimit) + if err != nil { + t.Fatalf("Parsing memory limit failed: %v", err) + } + c := newCgroupsMemoryLimitChecker(testFiles, memLimit) + if err := updateFakeCgroupFiles(c, tc.sysLimit, tc.usage, tc.inactive, tc.active); err != nil { + t.Fatalf("Updating cgroup files: %v", err) + } + exceeded, err := c.isLimitExceeded() + if err != nil { + t.Fatalf("Checking if limit exceeded: %v", err) + } + if exceeded != tc.want { + t.Errorf("isLimitExceeded() = %t, want %t", exceeded, tc.want) + } + }, + ) } - exceeded, err = c.isLimitExceeded() - if err != nil { - t.Error(err) - } - if !exceeded { - t.Error("Expected over limit") - } - } diff --git a/arbnode/schema.go b/arbnode/schema.go index dddff11753..ddc7cf54fd 100644 --- a/arbnode/schema.go +++ b/arbnode/schema.go @@ -4,7 +4,6 @@ package arbnode var ( - BlockValidatorPrefix string = "v" // the prefix for all block validator keys messagePrefix []byte = []byte("m") // maps a message sequence number to a message legacyDelayedMessagePrefix []byte = []byte("d") // maps a delayed sequence number to an accumulator and a message as serialized on L1 rlpDelayedMessagePrefix []byte = []byte("e") // maps a delayed sequence number to an accumulator and an RLP encoded message diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index 6556ab3726..a00700e1a0 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -58,28 +58,28 @@ type SeqCoordinator struct { } type SeqCoordinatorConfig struct { - Enable bool `koanf:"enable"` - ChosenHealthcheckAddr string `koanf:"chosen-healthcheck-addr"` - RedisUrl string `koanf:"redis-url"` - LockoutDuration time.Duration `koanf:"lockout-duration"` - LockoutSpare time.Duration `koanf:"lockout-spare"` - SeqNumDuration time.Duration `koanf:"seq-num-duration"` - UpdateInterval time.Duration `koanf:"update-interval"` - RetryInterval time.Duration `koanf:"retry-interval"` - HandoffTimeout time.Duration `koanf:"handoff-timeout"` - SafeShutdownDelay time.Duration `koanf:"safe-shutdown-delay"` - ReleaseRetries int `koanf:"release-retries"` - MaxMsgPerPoll arbutil.MessageIndex `koanf:"msg-per-poll"` - MyUrlImpl string `koanf:"my-url"` - Signing signature.SignVerifyConfig `koanf:"signer"` -} - -func (c *SeqCoordinatorConfig) MyUrl() string { - if c.MyUrlImpl == "" { + Enable bool `koanf:"enable"` + ChosenHealthcheckAddr string `koanf:"chosen-healthcheck-addr"` + RedisUrl string `koanf:"redis-url"` + LockoutDuration time.Duration `koanf:"lockout-duration"` + LockoutSpare time.Duration `koanf:"lockout-spare"` + SeqNumDuration time.Duration `koanf:"seq-num-duration"` + UpdateInterval time.Duration `koanf:"update-interval"` + RetryInterval time.Duration `koanf:"retry-interval"` + HandoffTimeout time.Duration `koanf:"handoff-timeout"` + SafeShutdownDelay time.Duration `koanf:"safe-shutdown-delay"` + ReleaseRetries int `koanf:"release-retries"` + // Max message per poll. + MsgPerPoll arbutil.MessageIndex `koanf:"msg-per-poll"` + MyUrl string `koanf:"my-url"` + Signer signature.SignVerifyConfig `koanf:"signer"` +} + +func (c *SeqCoordinatorConfig) Url() string { + if c.MyUrl == "" { return redisutil.INVALID_URL } - - return c.MyUrlImpl + return c.MyUrl } func SeqCoordinatorConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -94,8 +94,8 @@ func SeqCoordinatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Duration(prefix+".handoff-timeout", DefaultSeqCoordinatorConfig.HandoffTimeout, "the maximum amount of time to spend waiting for another sequencer to accept the lockout when handing it off on shutdown or db compaction") f.Duration(prefix+".safe-shutdown-delay", DefaultSeqCoordinatorConfig.SafeShutdownDelay, "if non-zero will add delay after transferring control") f.Int(prefix+".release-retries", DefaultSeqCoordinatorConfig.ReleaseRetries, "the number of times to retry releasing the wants lockout and chosen one status on shutdown") - f.Uint64(prefix+".msg-per-poll", uint64(DefaultSeqCoordinatorConfig.MaxMsgPerPoll), "will only be marked as wanting the lockout if not too far behind") - f.String(prefix+".my-url", DefaultSeqCoordinatorConfig.MyUrlImpl, "url for this sequencer if it is the chosen") + f.Uint64(prefix+".msg-per-poll", uint64(DefaultSeqCoordinatorConfig.MsgPerPoll), "will only be marked as wanting the lockout if not too far behind") + f.String(prefix+".my-url", DefaultSeqCoordinatorConfig.MyUrl, "url for this sequencer if it is the chosen") signature.SignVerifyConfigAddOptions(prefix+".signer", f) } @@ -111,9 +111,9 @@ var DefaultSeqCoordinatorConfig = SeqCoordinatorConfig{ SafeShutdownDelay: 5 * time.Second, ReleaseRetries: 4, RetryInterval: 50 * time.Millisecond, - MaxMsgPerPoll: 2000, - MyUrlImpl: redisutil.INVALID_URL, - Signing: signature.DefaultSignVerifyConfig, + MsgPerPoll: 2000, + MyUrl: redisutil.INVALID_URL, + Signer: signature.DefaultSignVerifyConfig, } var TestSeqCoordinatorConfig = SeqCoordinatorConfig{ @@ -127,18 +127,24 @@ var TestSeqCoordinatorConfig = SeqCoordinatorConfig{ SafeShutdownDelay: time.Millisecond * 100, ReleaseRetries: 4, RetryInterval: time.Millisecond * 3, - MaxMsgPerPoll: 20, - MyUrlImpl: redisutil.INVALID_URL, - Signing: signature.DefaultSignVerifyConfig, -} - -func NewSeqCoordinator(dataSigner signature.DataSignerFunc, bpvalidator *contracts.BatchPosterVerifier, streamer *TransactionStreamer, sequencer execution.ExecutionSequencer, - sync *SyncMonitor, config SeqCoordinatorConfig) (*SeqCoordinator, error) { + MsgPerPoll: 20, + MyUrl: redisutil.INVALID_URL, + Signer: signature.DefaultSignVerifyConfig, +} + +func NewSeqCoordinator( + dataSigner signature.DataSignerFunc, + bpvalidator *contracts.AddressVerifier, + streamer *TransactionStreamer, + sequencer execution.ExecutionSequencer, + sync *SyncMonitor, + config SeqCoordinatorConfig, +) (*SeqCoordinator, error) { redisCoordinator, err := redisutil.NewRedisCoordinator(config.RedisUrl) if err != nil { return nil, err } - signer, err := signature.NewSignVerify(&config.Signing, dataSigner, bpvalidator) + signer, err := signature.NewSignVerify(&config.Signer, dataSigner, bpvalidator) if err != nil { return nil, err } @@ -251,7 +257,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC if err != nil { return err } - if c.config.Signing.SymmetricSign { + if c.config.Signer.SymmetricSign { messageString := string(append(msgSig, msgBytes...)) messageData = &messageString } else { @@ -279,7 +285,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC if err != nil { return err } - if !wasEmpty && (current != c.config.MyUrl()) { + if !wasEmpty && (current != c.config.Url()) { return fmt.Errorf("%w: failed to catch lock. redis shows chosen: %s", execution.ErrRetrySequencer, current) } remoteMsgCount, err := c.getRemoteMsgCountImpl(ctx, tx) @@ -301,7 +307,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC initialDuration = 2 * time.Second } if wasEmpty { - pipe.Set(ctx, redisutil.CHOSENSEQ_KEY, c.config.MyUrl(), initialDuration) + pipe.Set(ctx, redisutil.CHOSENSEQ_KEY, c.config.Url(), initialDuration) } pipe.Set(ctx, redisutil.MSG_COUNT_KEY, msgCountMsg, c.config.SeqNumDuration) if messageData != nil { @@ -312,7 +318,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC } pipe.PExpireAt(ctx, redisutil.CHOSENSEQ_KEY, lockoutUntil) if setWantsLockout { - myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.MyUrl()) + myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.Url()) pipe.Set(ctx, myWantsLockoutKey, redisutil.WANTS_LOCKOUT_VAL, initialDuration) pipe.PExpireAt(ctx, myWantsLockoutKey, lockoutUntil) } @@ -363,7 +369,7 @@ func (c *SeqCoordinator) wantsLockoutUpdateWithMutex(ctx context.Context) error if c.avoidLockout > 0 { return nil } - myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.MyUrl()) + myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.Url()) wantsLockoutUntil := time.Now().Add(c.config.LockoutDuration) pipe := c.Client.TxPipeline() initialDuration := c.config.LockoutDuration @@ -391,7 +397,7 @@ func (c *SeqCoordinator) chosenOneRelease(ctx context.Context) error { if err != nil { return err } - if current != c.config.MyUrl() { + if current != c.config.Url() { return nil } pipe := tx.TxPipeline() @@ -410,7 +416,7 @@ func (c *SeqCoordinator) chosenOneRelease(ctx context.Context) error { if errors.Is(readErr, redis.Nil) { return nil } - if current != c.config.MyUrl() { + if current != c.config.Url() { return nil } return releaseErr @@ -422,7 +428,7 @@ func (c *SeqCoordinator) wantsLockoutRelease(ctx context.Context) error { if !c.reportedWantsLockout { return nil } - myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.MyUrl()) + myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.Url()) releaseErr := c.Client.Del(ctx, myWantsLockoutKey).Err() if releaseErr != nil { // got error - was it still deleted? @@ -451,7 +457,7 @@ func (c *SeqCoordinator) noRedisError() time.Duration { // update for the prev known-chosen sequencer (no need to load new messages) func (c *SeqCoordinator) updateWithLockout(ctx context.Context, nextChosen string) time.Duration { - if nextChosen != "" && nextChosen != c.config.MyUrl() { + if nextChosen != "" && nextChosen != c.config.Url() { // was the active sequencer, but no longer // we maintain chosen status if we had it and nobody in the priorities wants the lockout setPrevChosenTo := nextChosen @@ -468,7 +474,7 @@ func (c *SeqCoordinator) updateWithLockout(ctx context.Context, nextChosen strin return c.retryAfterRedisError() } c.prevChosenSequencer = setPrevChosenTo - log.Info("released chosen-coordinator lock", "myUrl", c.config.MyUrl(), "nextChosen", nextChosen) + log.Info("released chosen-coordinator lock", "myUrl", c.config.Url(), "nextChosen", nextChosen) return c.noRedisError() } // Was, and still is, the active sequencer @@ -497,10 +503,10 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { log.Warn("coordinator failed finding sequencer wanting lockout", "err", err) return c.retryAfterRedisError() } - if c.prevChosenSequencer == c.config.MyUrl() { + if c.prevChosenSequencer == c.config.Url() { return c.updateWithLockout(ctx, chosenSeq) } - if chosenSeq != c.config.MyUrl() && chosenSeq != c.prevChosenSequencer { + if chosenSeq != c.config.Url() && chosenSeq != c.prevChosenSequencer { var err error if c.sequencer != nil { _, err = c.sequencer.ForwardTo(chosenSeq).Await(ctx) @@ -527,8 +533,8 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { return c.retryAfterRedisError() } readUntil := remoteMsgCount - if readUntil > localMsgCount+c.config.MaxMsgPerPoll { - readUntil = localMsgCount + c.config.MaxMsgPerPoll + if readUntil > localMsgCount+c.config.MsgPerPoll { + readUntil = localMsgCount + c.config.MsgPerPoll } var messages []arbostypes.MessageWithMetadata msgToRead := localMsgCount @@ -600,7 +606,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { } } - if c.config.MyUrl() == redisutil.INVALID_URL { + if c.config.Url() == redisutil.INVALID_URL { return c.noRedisError() } @@ -615,7 +621,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { } // can take over as main sequencer? - if synced && localMsgCount >= remoteMsgCount && chosenSeq == c.config.MyUrl() { + if synced && localMsgCount >= remoteMsgCount && chosenSeq == c.config.Url() { if c.sequencer == nil { log.Error("myurl main sequencer, but no sequencer exists") return c.noRedisError() @@ -640,7 +646,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { c.prevChosenSequencer = "" return c.retryAfterRedisError() } - log.Info("caught chosen-coordinator lock", "myUrl", c.config.MyUrl()) + log.Info("caught chosen-coordinator lock", "myUrl", c.config.Url()) if c.delayedSequencer != nil { err = c.delayedSequencer.ForceSequenceDelayed(ctx) if err != nil { @@ -652,7 +658,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { log.Warn("failed to populate the feed backlog on lockout acquisition", "err", err) } c.sequencer.Activate() - c.prevChosenSequencer = c.config.MyUrl() + c.prevChosenSequencer = c.config.Url() return c.noRedisError() } } @@ -685,7 +691,7 @@ func (c *SeqCoordinator) AvoidingLockout() bool { func (c *SeqCoordinator) DebugPrint() string { c.wantsLockoutMutex.Lock() defer c.wantsLockoutMutex.Unlock() - return fmt.Sprint("Url:", c.config.MyUrl(), + return fmt.Sprint("Url:", c.config.Url(), " prevChosenSequencer:", c.prevChosenSequencer, " reportedWantsLockout:", c.reportedWantsLockout, " lockoutUntil:", c.lockoutUntil, @@ -761,7 +767,7 @@ func (c *SeqCoordinator) StopAndWait() { // We've just stopped our normal context so we need to use our parent's context. parentCtx := c.StopWaiter.GetParentContext() for i := 0; i <= c.config.ReleaseRetries || c.config.ReleaseRetries < 0; i++ { - log.Info("releasing wants lockout key", "myUrl", c.config.MyUrl(), "attempt", i) + log.Info("releasing wants lockout key", "myUrl", c.config.Url(), "attempt", i) err := c.wantsLockoutRelease(parentCtx) if err == nil { c.noRedisError() @@ -772,7 +778,7 @@ func (c *SeqCoordinator) StopAndWait() { } } for i := 0; i < c.config.ReleaseRetries || c.config.ReleaseRetries < 0; i++ { - log.Info("releasing chosen one", "myUrl", c.config.MyUrl(), "attempt", i) + log.Info("releasing chosen one", "myUrl", c.config.Url(), "attempt", i) err := c.chosenOneRelease(parentCtx) if err == nil { c.noRedisError() @@ -805,7 +811,7 @@ func (c *SeqCoordinator) AvoidLockout(ctx context.Context) bool { c.wantsLockoutMutex.Lock() c.avoidLockout++ c.wantsLockoutMutex.Unlock() - log.Info("avoiding lockout", "myUrl", c.config.MyUrl()) + log.Info("avoiding lockout", "myUrl", c.config.Url()) err := c.wantsLockoutRelease(ctx) if err != nil { log.Error("failed to release wanting the lockout in redis", "err", err) @@ -819,7 +825,7 @@ func (c *SeqCoordinator) TryToHandoffChosenOne(ctx context.Context) bool { ctx, cancel := context.WithTimeout(ctx, c.config.HandoffTimeout) defer cancel() if c.CurrentlyChosen() { - log.Info("waiting for another sequencer to become chosen...", "timeout", c.config.HandoffTimeout, "myUrl", c.config.MyUrl()) + log.Info("waiting for another sequencer to become chosen...", "timeout", c.config.HandoffTimeout, "myUrl", c.config.Url()) success := c.waitFor(ctx, func() bool { return !c.CurrentlyChosen() }) @@ -843,7 +849,7 @@ func (c *SeqCoordinator) SeekLockout(ctx context.Context) { c.wantsLockoutMutex.Lock() defer c.wantsLockoutMutex.Unlock() c.avoidLockout-- - log.Info("seeking lockout", "myUrl", c.config.MyUrl()) + log.Info("seeking lockout", "myUrl", c.config.Url()) if c.sync.Synced() { // Even if this errors we still internally marked ourselves as wanting the lockout err := c.wantsLockoutUpdateWithMutex(ctx) diff --git a/arbnode/seq_coordinator_atomic_test.go b/arbnode/seq_coordinator_atomic_test.go index 8cc0acadae..61468a3adb 100644 --- a/arbnode/seq_coordinator_atomic_test.go +++ b/arbnode/seq_coordinator_atomic_test.go @@ -69,7 +69,7 @@ func coordinatorTestThread(ctx context.Context, coord *SeqCoordinator, data *Coo timeLaunching := time.Now() // didn't sequence.. should we have succeeded? if timeLaunching.Before(holdingLockout) { - execError = fmt.Errorf("failed while holding lock %s err %w", coord.config.MyUrl(), err) + execError = fmt.Errorf("failed while holding lock %s err %w", coord.config.Url(), err) break } } @@ -79,9 +79,9 @@ func coordinatorTestThread(ctx context.Context, coord *SeqCoordinator, data *Coo continue } if data.sequencer[i] != "" { - execError = fmt.Errorf("two sequencers for same msg: submsg %d, success for %s, %s", i, data.sequencer[i], coord.config.MyUrl()) + execError = fmt.Errorf("two sequencers for same msg: submsg %d, success for %s, %s", i, data.sequencer[i], coord.config.Url()) } - data.sequencer[i] = coord.config.MyUrl() + data.sequencer[i] = coord.config.Url() } if execError != nil { data.err = execError @@ -99,16 +99,16 @@ func TestRedisSeqCoordinatorAtomic(t *testing.T) { coordConfig := TestSeqCoordinatorConfig coordConfig.LockoutDuration = time.Millisecond * 100 coordConfig.LockoutSpare = time.Millisecond * 10 - coordConfig.Signing.ECDSA.AcceptSequencer = false - coordConfig.Signing.SymmetricFallback = true - coordConfig.Signing.SymmetricSign = true - coordConfig.Signing.Symmetric.Dangerous.DisableSignatureVerification = true - coordConfig.Signing.Symmetric.SigningKey = "" + coordConfig.Signer.ECDSA.AcceptSequencer = false + coordConfig.Signer.SymmetricFallback = true + coordConfig.Signer.SymmetricSign = true + coordConfig.Signer.Symmetric.Dangerous.DisableSignatureVerification = true + coordConfig.Signer.Symmetric.SigningKey = "" testData := CoordinatorTestData{ testStartRound: -1, sequencer: make([]string, messagesPerRound), } - nullSigner, err := signature.NewSignVerify(&coordConfig.Signing, nil, nil) + nullSigner, err := signature.NewSignVerify(&coordConfig.Signer, nil, nil) Require(t, err) redisUrl := redisutil.CreateTestRedis(ctx, t) @@ -121,7 +121,7 @@ func TestRedisSeqCoordinatorAtomic(t *testing.T) { for i := 0; i < NumOfThreads; i++ { config := coordConfig - config.MyUrlImpl = fmt.Sprint(i) + config.MyUrl = fmt.Sprint(i) redisCoordinator, err := redisutil.NewRedisCoordinator(config.RedisUrl) Require(t, err) coordinator := &SeqCoordinator{ diff --git a/arbnode/simple_redis_lock_test.go b/arbnode/simple_redis_lock_test.go index 35947279b8..b7506145c3 100644 --- a/arbnode/simple_redis_lock_test.go +++ b/arbnode/simple_redis_lock_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/util/redisutil" ) @@ -20,7 +21,7 @@ const test_release_frac = 5 const test_delay = time.Millisecond const test_redisKey_prefix = "__TEMP_SimpleRedisLockTest__" -func attemptLock(ctx context.Context, s *SimpleRedisLock, flag *int32, wg *sync.WaitGroup) { +func attemptLock(ctx context.Context, s *redislock.Simple, flag *int32, wg *sync.WaitGroup) { defer wg.Done() for i := 0; i < test_attempts; i++ { if s.AttemptLock(ctx) { @@ -46,22 +47,22 @@ func simpleRedisLockTest(t *testing.T, redisKeySuffix string, chosen int, backgo Require(t, err) Require(t, redisClient.Del(ctx, redisKey).Err()) - conf := &SimpleRedisLockConfig{ + conf := &redislock.SimpleCfg{ LockoutDuration: test_delay * test_attempts * 10, RefreshDuration: test_delay * 2, Key: redisKey, BackgroundLock: backgound, } - confFetcher := func() *SimpleRedisLockConfig { return conf } + confFetcher := func() *redislock.SimpleCfg { return conf } - locks := make([]*SimpleRedisLock, 0) + locks := make([]*redislock.Simple, 0) for i := 0; i < test_threads; i++ { var err error - var lock *SimpleRedisLock + var lock *redislock.Simple if chosen < 0 || chosen == i { - lock, err = NewSimpleRedisLock(redisClient, confFetcher, prepareTrue) + lock, err = redislock.NewSimple(redisClient, confFetcher, prepareTrue) } else { - lock, err = NewSimpleRedisLock(redisClient, confFetcher, prepareFalse) + lock, err = redislock.NewSimple(redisClient, confFetcher, prepareFalse) } if err != nil { t.Fatal(err) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 49fbc8acb2..202a5c7d79 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -19,6 +19,7 @@ import ( "errors" + "github.com/cockroachdb/pebble" flag "github.com/spf13/pflag" "github.com/syndtr/goleveldb/leveldb" @@ -71,7 +72,7 @@ type TransactionStreamer struct { } type TransactionStreamerConfig struct { - MaxBroadcastQueueSize int `koanf:"max-broadcaster-queue-size"` + MaxBroadcasterQueueSize int `koanf:"max-broadcaster-queue-size"` MaxReorgResequenceDepth int64 `koanf:"max-reorg-resequence-depth" reload:"hot"` ExecuteMessageLoopDelay time.Duration `koanf:"execute-message-loop-delay" reload:"hot"` } @@ -79,19 +80,19 @@ type TransactionStreamerConfig struct { type TransactionStreamerConfigFetcher func() *TransactionStreamerConfig var DefaultTransactionStreamerConfig = TransactionStreamerConfig{ - MaxBroadcastQueueSize: 1024, + MaxBroadcasterQueueSize: 1024, MaxReorgResequenceDepth: 1024, ExecuteMessageLoopDelay: time.Millisecond * 100, } var TestTransactionStreamerConfig = TransactionStreamerConfig{ - MaxBroadcastQueueSize: 10_000, + MaxBroadcasterQueueSize: 10_000, MaxReorgResequenceDepth: 128 * 1024, ExecuteMessageLoopDelay: time.Millisecond, } func TransactionStreamerConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Int(prefix+".max-broadcaster-queue-size", DefaultTransactionStreamerConfig.MaxBroadcastQueueSize, "maximum cache of pending broadcaster messages") + f.Int(prefix+".max-broadcaster-queue-size", DefaultTransactionStreamerConfig.MaxBroadcasterQueueSize, "maximum cache of pending broadcaster messages") f.Int64(prefix+".max-reorg-resequence-depth", DefaultTransactionStreamerConfig.MaxReorgResequenceDepth, "maximum number of messages to attempt to resequence on reorg (0 = never resequence, -1 = always resequence)") f.Duration(prefix+".execute-message-loop-delay", DefaultTransactionStreamerConfig.ExecuteMessageLoopDelay, "delay when polling calls to execute messages") } @@ -495,7 +496,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*broadcaster.B s.broadcasterQueuedMessagesActiveReorg = feedReorg } else if broadcasterQueuedMessagesPos+arbutil.MessageIndex(len(s.broadcasterQueuedMessages)) == broadcastStartPos { // Feed messages can be added directly to end of cache - maxQueueSize := s.config().MaxBroadcastQueueSize + maxQueueSize := s.config().MaxBroadcasterQueueSize if maxQueueSize == 0 || len(s.broadcasterQueuedMessages) <= maxQueueSize { s.broadcasterQueuedMessages = append(s.broadcasterQueuedMessages, messages...) } @@ -524,7 +525,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*broadcaster.B if broadcastStartPos > 0 { _, err := s.GetMessage(broadcastStartPos - 1) if err != nil { - if !errors.Is(err, leveldb.ErrNotFound) { + if !errors.Is(err, leveldb.ErrNotFound) && !errors.Is(err, pebble.ErrNotFound) { return err } // Message before current message doesn't exist in database, so don't add current messages yet @@ -711,6 +712,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil var oldMsg *arbostypes.MessageWithMetadata var lastDelayedRead uint64 var hasNewConfirmedMessages bool + var cacheClearLen int messagesAfterPos := messageStartPos + arbutil.MessageIndex(len(messages)) broadcastStartPos := arbutil.MessageIndex(atomic.LoadUint64(&s.broadcasterQueuedMessagesPos)) @@ -739,10 +741,13 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil // Or no active broadcast reorg and broadcast messages start before or immediately after last L1 message if messagesAfterPos >= broadcastStartPos { broadcastSliceIndex := int(messagesAfterPos - broadcastStartPos) + messagesOldLen := len(messages) if broadcastSliceIndex < len(s.broadcasterQueuedMessages) { // Some cached feed messages can be used messages = append(messages, s.broadcasterQueuedMessages[broadcastSliceIndex:]...) } + // This calculation gives the exact length of cache which was appended to messages + cacheClearLen = broadcastSliceIndex + len(messages) - messagesOldLen } // L1 used or replaced broadcast cache items @@ -815,8 +820,14 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil } if clearQueueOnSuccess { - s.broadcasterQueuedMessages = s.broadcasterQueuedMessages[:0] - atomic.StoreUint64(&s.broadcasterQueuedMessagesPos, 0) + // Check if new messages were added at the end of cache, if they were, then dont remove those particular messages + if len(s.broadcasterQueuedMessages) > cacheClearLen { + s.broadcasterQueuedMessages = s.broadcasterQueuedMessages[cacheClearLen:] + atomic.StoreUint64(&s.broadcasterQueuedMessagesPos, uint64(broadcastStartPos)+uint64(cacheClearLen)) + } else { + s.broadcasterQueuedMessages = s.broadcasterQueuedMessages[:0] + atomic.StoreUint64(&s.broadcasterQueuedMessagesPos, 0) + } s.broadcasterQueuedMessagesActiveReorg = false } @@ -956,7 +967,7 @@ func (s *TransactionStreamer) executeNextMsg(ctx context.Context, exec execution log.Error("feedOneMsg failed to get message count", "err", err) return false } - s.execLastMsgCount = prevMessageCount + s.execLastMsgCount = msgCount pos, err := s.exec.HeadMessageNumber().Await(ctx) if err != nil { log.Error("feedOneMsg failed to get exec engine message count", "err", err) diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index ae2e6a34c1..1f09ff1440 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -3,7 +3,11 @@ package addressSet +// TODO lowercase this package name + import ( + "errors" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbos/storage" "github.com/offchainlabs/nitro/arbos/util" @@ -24,49 +28,49 @@ func Initialize(sto *storage.Storage) error { func OpenAddressSet(sto *storage.Storage) *AddressSet { return &AddressSet{ - sto, - sto.OpenStorageBackedUint64(0), - sto.OpenSubStorage([]byte{0}), + backingStorage: sto.WithoutCache(), + size: sto.OpenStorageBackedUint64(0), + byAddress: sto.OpenSubStorage([]byte{0}), } } -func (aset *AddressSet) Size() (uint64, error) { - return aset.size.Get() +func (as *AddressSet) Size() (uint64, error) { + return as.size.Get() } -func (aset *AddressSet) IsMember(addr common.Address) (bool, error) { - value, err := aset.byAddress.Get(util.AddressToHash(addr)) +func (as *AddressSet) IsMember(addr common.Address) (bool, error) { + value, err := as.byAddress.Get(util.AddressToHash(addr)) return value != (common.Hash{}), err } -func (aset *AddressSet) GetAnyMember() (*common.Address, error) { - size, err := aset.size.Get() +func (as *AddressSet) GetAnyMember() (*common.Address, error) { + size, err := as.size.Get() if err != nil || size == 0 { return nil, err } - sba := aset.backingStorage.OpenStorageBackedAddressOrNil(1) + sba := as.backingStorage.OpenStorageBackedAddressOrNil(1) addr, err := sba.Get() return addr, err } -func (aset *AddressSet) Clear() error { - size, err := aset.size.Get() +func (as *AddressSet) Clear() error { + size, err := as.size.Get() if err != nil || size == 0 { return err } for i := uint64(1); i <= size; i++ { - contents, _ := aset.backingStorage.GetByUint64(i) - _ = aset.backingStorage.ClearByUint64(i) - err = aset.byAddress.Clear(contents) + contents, _ := as.backingStorage.GetByUint64(i) + _ = as.backingStorage.ClearByUint64(i) + err = as.byAddress.Clear(contents) if err != nil { return err } } - return aset.size.Clear() + return as.size.Clear() } -func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error) { - size, err := aset.size.Get() +func (as *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error) { + size, err := as.size.Get() if err != nil { return nil, err } @@ -75,7 +79,7 @@ func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, err } ret := make([]common.Address, size) for i := range ret { - sba := aset.backingStorage.OpenStorageBackedAddress(uint64(i + 1)) + sba := as.backingStorage.OpenStorageBackedAddress(uint64(i + 1)) ret[i], err = sba.Get() if err != nil { return nil, err @@ -84,65 +88,112 @@ func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, err return ret, nil } -func (aset *AddressSet) Add(addr common.Address) error { - present, err := aset.IsMember(addr) +func (as *AddressSet) ClearList() error { + size, err := as.size.Get() + if err != nil || size == 0 { + return err + } + for i := uint64(1); i <= size; i++ { + err = as.backingStorage.ClearByUint64(i) + if err != nil { + return err + } + } + return as.size.Clear() +} + +func (as *AddressSet) RectifyMapping(addr common.Address) error { + isOwner, err := as.IsMember(addr) + if !isOwner || err != nil { + return errors.New("RectifyMapping: Address is not an owner") + } + + // If the mapping is correct, RectifyMapping shouldn't do anything + // Additional safety check to avoid corruption of mapping after the initial fix + addrAsHash := common.BytesToHash(addr.Bytes()) + slot, err := as.byAddress.GetUint64(addrAsHash) + if err != nil { + return err + } + atSlot, err := as.backingStorage.GetByUint64(slot) + if err != nil { + return err + } + size, err := as.size.Get() + if err != nil { + return err + } + if atSlot == addrAsHash && slot <= size { + return errors.New("RectifyMapping: Owner address is correctly mapped") + } + + // Remove the owner from map and add them as a new owner + err = as.byAddress.Clear(addrAsHash) + if err != nil { + return err + } + + return as.Add(addr) +} + +func (as *AddressSet) Add(addr common.Address) error { + present, err := as.IsMember(addr) if present || err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } - sba := aset.backingStorage.OpenStorageBackedAddress(1 + size) slot := util.UintToHash(1 + size) addrAsHash := common.BytesToHash(addr.Bytes()) - err = aset.byAddress.Set(addrAsHash, slot) + err = as.byAddress.Set(addrAsHash, slot) if err != nil { return err } - sba = aset.backingStorage.OpenStorageBackedAddress(1 + size) + sba := as.backingStorage.OpenStorageBackedAddress(1 + size) err = sba.Set(addr) if err != nil { return err } - _, err = aset.size.Increment() + _, err = as.size.Increment() return err } -func (aset *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { +func (as *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { addrAsHash := common.BytesToHash(addr.Bytes()) - slot, err := aset.byAddress.GetUint64(addrAsHash) + slot, err := as.byAddress.GetUint64(addrAsHash) if slot == 0 || err != nil { return err } - err = aset.byAddress.Clear(addrAsHash) + err = as.byAddress.Clear(addrAsHash) if err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } if slot < size { - atSize, err := aset.backingStorage.GetByUint64(size) + atSize, err := as.backingStorage.GetByUint64(size) if err != nil { return err } - err = aset.backingStorage.SetByUint64(slot, atSize) + err = as.backingStorage.SetByUint64(slot, atSize) if err != nil { return err } if arbosVersion >= 11 { - err = aset.byAddress.Set(atSize, util.UintToHash(slot)) + err = as.byAddress.Set(atSize, util.UintToHash(slot)) if err != nil { return err } } } - err = aset.backingStorage.ClearByUint64(size) + err = as.backingStorage.ClearByUint64(size) if err != nil { return err } - _, err = aset.size.Decrement() + _, err = as.size.Decrement() return err } diff --git a/arbos/addressSet/addressSet_test.go b/arbos/addressSet/addressSet_test.go index 4296531f41..7d06c74f0b 100644 --- a/arbos/addressSet/addressSet_test.go +++ b/arbos/addressSet/addressSet_test.go @@ -10,11 +10,14 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/params" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/storage" + "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/testhelpers" ) @@ -173,6 +176,158 @@ func TestAddressSetAllMembers(t *testing.T) { } } +func TestRectifyMappingAgainstHistory(t *testing.T) { + db := storage.NewMemoryBackedStateDB() + sto := storage.NewGeth(db, burn.NewSystemBurner(nil, false)) + Require(t, Initialize(sto)) + aset := OpenAddressSet(sto) + version := uint64(10) + + // Test Nova history + addr1 := common.HexToAddress("0x9C040726F2A657226Ed95712245DeE84b650A1b5") + addr2 := common.HexToAddress("0xd345e41ae2cb00311956aa7109fc801ae8c81a52") + addr3 := common.HexToAddress("0xd0749b3e537ed52de4e6a3ae1eb6fc26059d0895") + addr4 := common.HexToAddress("0x86a02dd71363c440b21f4c0e5b2ad01ffe1a7482") + // Follow logs + Require(t, aset.Add(addr1)) + Require(t, aset.Add(addr2)) + Require(t, aset.Remove(addr1, version)) + Require(t, aset.Add(addr3)) + Require(t, aset.Add(addr4)) + Require(t, aset.Remove(addr2, version)) + Require(t, aset.Remove(addr3, version)) + // Check if history's correct + CurrentOwner, _ := aset.backingStorage.GetByUint64(uint64(1)) + isOwner, _ := aset.IsMember(addr2) + correctOwner, _ := aset.IsMember(addr4) + if size(t, aset) != uint64(1) || CurrentOwner != common.BytesToHash(addr2.Bytes()) || isOwner || !correctOwner { + Fail(t, "Logs and current state did not match") + } + // Run RectifyMapping to fix the issue + checkIfRectifyMappingWorks(t, aset, []common.Address{addr4}, true) + Require(t, aset.Clear()) + + // Test Arb1 history + addr1 = common.HexToAddress("0xd345e41ae2cb00311956aa7109fc801ae8c81a52") + addr2 = common.HexToAddress("0x98e4db7e07e584f89a2f6043e7b7c89dc27769ed") + addr3 = common.HexToAddress("0xcf57572261c7c2bcf21ffd220ea7d1a27d40a827") + // Follow logs + Require(t, aset.Add(addr1)) + Require(t, aset.Add(addr2)) + Require(t, aset.Add(addr3)) + Require(t, aset.Remove(addr1, version)) + Require(t, aset.Remove(addr2, version)) + // Check if history's correct + CurrentOwner, _ = aset.backingStorage.GetByUint64(uint64(1)) + correctOwner, _ = aset.IsMember(addr3) + index, _ := aset.byAddress.GetUint64(common.BytesToHash(addr3.Bytes())) + if size(t, aset) != uint64(1) || index == 1 || CurrentOwner != common.BytesToHash(addr3.Bytes()) || !correctOwner { + Fail(t, "Logs and current state did not match") + } + // Run RectifyMapping to fix the issue + checkIfRectifyMappingWorks(t, aset, []common.Address{addr3}, true) + Require(t, aset.Clear()) + + // Test Goerli history + addr1 = common.HexToAddress("0x186B56023d42B2B4E7616589a5C62EEf5FCa21DD") + addr2 = common.HexToAddress("0xc8efdb677afeb775ce1617dd976b56b3a6e95bba") + addr3 = common.HexToAddress("0xc3f86bb81e32295d29c288ffb4828936538cf326") + addr4 = common.HexToAddress("0x67acb531a05160a81dcd03079347f264c4fa2da3") + // Follow logs + Require(t, aset.Add(addr1)) + Require(t, aset.Add(addr2)) + Require(t, aset.Add(addr3)) + Require(t, aset.Remove(addr1, version)) + Require(t, aset.Add(addr4)) + Require(t, aset.Remove(addr3, version)) + Require(t, aset.Remove(addr2, version)) + // Check if history's correct + CurrentOwner, _ = aset.backingStorage.GetByUint64(uint64(1)) + isOwner, _ = aset.IsMember(addr3) + correctOwner, _ = aset.IsMember(addr4) + if size(t, aset) != uint64(1) || CurrentOwner != common.BytesToHash(addr3.Bytes()) || isOwner || !correctOwner { + Fail(t, "Logs and current state did not match") + } + // Run RectifyMapping to fix the issue + checkIfRectifyMappingWorks(t, aset, []common.Address{addr4}, true) +} + +func TestRectifyMapping(t *testing.T) { + db := storage.NewMemoryBackedStateDB() + sto := storage.NewGeth(db, burn.NewSystemBurner(nil, false)) + Require(t, Initialize(sto)) + aset := OpenAddressSet(sto) + + addr1 := testhelpers.RandomAddress() + addr2 := testhelpers.RandomAddress() + addr3 := testhelpers.RandomAddress() + possibleAddresses := []common.Address{addr1, addr2, addr3} + + Require(t, aset.Add(addr1)) + Require(t, aset.Add(addr2)) + Require(t, aset.Add(addr3)) + + // Non owner's should not be able to call RectifyMapping + err := aset.RectifyMapping(testhelpers.RandomAddress()) + if err == nil { + Fail(t, "RectifyMapping was successfully called by non owner") + } + + // Corrupt the list and verify if RectifyMapping fixes it + addrHash := common.BytesToHash(addr2.Bytes()) + Require(t, aset.backingStorage.SetByUint64(uint64(1), addrHash)) + checkIfRectifyMappingWorks(t, aset, possibleAddresses, true) + + // Corrupt the map and verify if RectifyMapping fixes it + addrHash = common.BytesToHash(addr2.Bytes()) + Require(t, aset.byAddress.Set(addrHash, util.UintToHash(uint64(6)))) + checkIfRectifyMappingWorks(t, aset, possibleAddresses, true) + + // Add a new owner to the map and verify if RectifyMapping syncs list with the map + // to check for the case where list has fewer owners than expected + addr4 := testhelpers.RandomAddress() + addrHash = common.BytesToHash(addr4.Bytes()) + Require(t, aset.byAddress.Set(addrHash, util.UintToHash(uint64(1)))) + checkIfRectifyMappingWorks(t, aset, possibleAddresses, true) + + // RectifyMapping should not do anything if the mapping is correct + // Check to verify functionality post fix + err = aset.RectifyMapping(addr1) + if err == nil { + Fail(t, "RectifyMapping called by a correctly mapped owner") + } + +} + +func checkIfRectifyMappingWorks(t *testing.T, aset *AddressSet, owners []common.Address, clearList bool) { + t.Helper() + if clearList { + Require(t, aset.ClearList()) + } + for index, owner := range owners { + Require(t, aset.RectifyMapping(owner)) + + addrAsHash := common.BytesToHash(owner.Bytes()) + slot, err := aset.byAddress.GetUint64(addrAsHash) + Require(t, err) + atSlot, err := aset.backingStorage.GetByUint64(slot) + Require(t, err) + if slot == 0 || atSlot != addrAsHash { + Fail(t, "RectifyMapping did not fix the mismatch") + } + + if clearList && int(size(t, aset)) != index+1 { + Fail(t, "RectifyMapping did not fix the mismatch") + } + } + allMembers, err := aset.AllMembers(size(t, aset)) + Require(t, err) + less := func(a, b common.Address) bool { return a.String() < b.String() } + if cmp.Diff(owners, allMembers, cmpopts.SortSlices(less)) != "" { + Fail(t, "RectifyMapping did not fix the mismatch") + } +} + func checkAllMembers(t *testing.T, aset *AddressSet, possibleAddresses []common.Address) { allMembers, err := aset.AllMembers(1024) Require(t, err) diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index 220c2700f4..3fbb7b3782 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -3,6 +3,8 @@ package addressTable +// TODO lowercase this package name + import ( "bytes" "errors" @@ -25,7 +27,7 @@ func Initialize(sto *storage.Storage) { func Open(sto *storage.Storage) *AddressTable { numItems := sto.OpenStorageBackedUint64(0) - return &AddressTable{sto, sto.OpenSubStorage([]byte{}), numItems} + return &AddressTable{sto.WithoutCache(), sto.OpenSubStorage([]byte{}), numItems} } func (atab *AddressTable) Register(addr common.Address) (uint64, error) { diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 2bea8f7c54..8702c62d16 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbos/addressSet" "github.com/offchainlabs/nitro/arbos/addressTable" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -35,23 +36,24 @@ import ( // persisted beyond the end of the test.) type ArbosState struct { - arbosVersion uint64 // version of the ArbOS storage format and semantics - upgradeVersion storage.StorageBackedUint64 // version we're planning to upgrade to, or 0 if not planning to upgrade - upgradeTimestamp storage.StorageBackedUint64 // when to do the planned upgrade - networkFeeAccount storage.StorageBackedAddress - l1PricingState *l1pricing.L1PricingState - l2PricingState *l2pricing.L2PricingState - retryableState *retryables.RetryableState - addressTable *addressTable.AddressTable - chainOwners *addressSet.AddressSet - sendMerkle *merkleAccumulator.MerkleAccumulator - blockhashes *blockhash.Blockhashes - chainId storage.StorageBackedBigInt - chainConfig storage.StorageBackedBytes - genesisBlockNum storage.StorageBackedUint64 - infraFeeAccount storage.StorageBackedAddress - backingStorage *storage.Storage - Burner burn.Burner + arbosVersion uint64 // version of the ArbOS storage format and semantics + upgradeVersion storage.StorageBackedUint64 // version we're planning to upgrade to, or 0 if not planning to upgrade + upgradeTimestamp storage.StorageBackedUint64 // when to do the planned upgrade + networkFeeAccount storage.StorageBackedAddress + l1PricingState *l1pricing.L1PricingState + l2PricingState *l2pricing.L2PricingState + retryableState *retryables.RetryableState + addressTable *addressTable.AddressTable + chainOwners *addressSet.AddressSet + sendMerkle *merkleAccumulator.MerkleAccumulator + blockhashes *blockhash.Blockhashes + chainId storage.StorageBackedBigInt + chainConfig storage.StorageBackedBytes + genesisBlockNum storage.StorageBackedUint64 + infraFeeAccount storage.StorageBackedAddress + brotliCompressionLevel storage.StorageBackedUint64 // brotli compression level used for pricing + backingStorage *storage.Storage + Burner burn.Burner } var ErrUninitializedArbOS = errors.New("ArbOS uninitialized") @@ -71,17 +73,18 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error) backingStorage.OpenStorageBackedUint64(uint64(upgradeVersionOffset)), backingStorage.OpenStorageBackedUint64(uint64(upgradeTimestampOffset)), backingStorage.OpenStorageBackedAddress(uint64(networkFeeAccountOffset)), - l1pricing.OpenL1PricingState(backingStorage.OpenSubStorage(l1PricingSubspace)), - l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace)), - retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace), stateDB), - addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace)), - addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace)), - merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace)), - blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace)), + l1pricing.OpenL1PricingState(backingStorage.OpenCachedSubStorage(l1PricingSubspace)), + l2pricing.OpenL2PricingState(backingStorage.OpenCachedSubStorage(l2PricingSubspace)), + retryables.OpenRetryableState(backingStorage.OpenCachedSubStorage(retryablesSubspace), stateDB), + addressTable.Open(backingStorage.OpenCachedSubStorage(addressTableSubspace)), + addressSet.OpenAddressSet(backingStorage.OpenCachedSubStorage(chainOwnerSubspace)), + merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenCachedSubStorage(sendMerkleSubspace)), + blockhash.OpenBlockhashes(backingStorage.OpenCachedSubStorage(blockhashesSubspace)), backingStorage.OpenStorageBackedBigInt(uint64(chainIdOffset)), backingStorage.OpenStorageBackedBytes(chainConfigSubspace), backingStorage.OpenStorageBackedUint64(uint64(genesisBlockNumOffset)), backingStorage.OpenStorageBackedAddress(uint64(infraFeeAccountOffset)), + backingStorage.OpenStorageBackedUint64(uint64(brotliCompressionLevelOffset)), backingStorage, burner, }, nil @@ -139,6 +142,7 @@ const ( chainIdOffset genesisBlockNumOffset infraFeeAccountOffset + brotliCompressionLevelOffset ) type SubspaceID []byte @@ -215,19 +219,20 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p chainConfigStorage := sto.OpenStorageBackedBytes(chainConfigSubspace) _ = chainConfigStorage.Set(initMessage.SerializedChainConfig) _ = sto.SetUint64ByUint64(uint64(genesisBlockNumOffset), chainConfig.ArbitrumChainParams.GenesisBlockNum) + _ = sto.SetUint64ByUint64(uint64(brotliCompressionLevelOffset), 0) // default brotliCompressionLevel for fast compression is 0 initialRewardsRecipient := l1pricing.BatchPosterAddress if desiredArbosVersion >= 2 { initialRewardsRecipient = initialChainOwner } - _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) - _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace)) - _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace)) - addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace)) - merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace)) - blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace)) - - ownersStorage := sto.OpenSubStorage(chainOwnerSubspace) + _ = l1pricing.InitializeL1PricingState(sto.OpenCachedSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) + _ = l2pricing.InitializeL2PricingState(sto.OpenCachedSubStorage(l2PricingSubspace)) + _ = retryables.InitializeRetryableState(sto.OpenCachedSubStorage(retryablesSubspace)) + addressTable.Initialize(sto.OpenCachedSubStorage(addressTableSubspace)) + merkleAccumulator.InitializeMerkleAccumulator(sto.OpenCachedSubStorage(sendMerkleSubspace)) + blockhash.InitializeBlockhashes(sto.OpenCachedSubStorage(blockhashesSubspace)) + + ownersStorage := sto.OpenCachedSubStorage(chainOwnerSubspace) _ = addressSet.Initialize(ownersStorage) _ = addressSet.OpenAddressSet(ownersStorage).Add(initialChainOwner) @@ -303,7 +308,32 @@ func (state *ArbosState) UpgradeArbosVersion( ErrFatalNodeOutOfDate, ) } - // no state changes needed + // Update the PerBatchGasCost to a more accurate value compared to the old v6 default. + ensure(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV12)) + + // We had mistakenly initialized AmortizedCostCapBips to math.MaxUint64 in older versions, + // but the correct value to disable the amortization cap is 0. + oldAmortizationCap, err := state.l1PricingState.AmortizedCostCapBips() + ensure(err) + if oldAmortizationCap == math.MaxUint64 { + ensure(state.l1PricingState.SetAmortizedCostCapBips(0)) + } + + // Clear chainOwners list to allow rectification of the mapping. + if !firstTime { + ensure(state.chainOwners.ClearList()) + } + case 11: + if !chainConfig.DebugMode() { + // This upgrade isn't finalized so we only want to support it for testing + return fmt.Errorf( + "the chain is upgrading to unsupported ArbOS version %v, %w", + state.arbosVersion+1, + ErrFatalNodeOutOfDate, + ) + } + // Update Brotli compression level for fast compression from 0 to 1 + ensure(state.SetBrotliCompressionLevel(1)) default: return fmt.Errorf( "the chain is upgrading to unsupported ArbOS version %v, %w", @@ -315,7 +345,9 @@ func (state *ArbosState) UpgradeArbosVersion( } if firstTime && upgradeTo >= 6 { - state.Restrict(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV6)) + if upgradeTo < 11 { + state.Restrict(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV6)) + } state.Restrict(state.l1PricingState.SetEquilibrationUnits(l1pricing.InitialEquilibrationUnitsV6)) state.Restrict(state.l2PricingState.SetSpeedLimitPerSecond(l2pricing.InitialSpeedLimitPerSecondV6)) state.Restrict(state.l2PricingState.SetMaxPerBlockGasLimit(l2pricing.InitialPerBlockGasLimitV6)) @@ -363,6 +395,17 @@ func (state *ArbosState) SetFormatVersion(val uint64) { state.Restrict(state.backingStorage.SetUint64ByUint64(uint64(versionOffset), val)) } +func (state *ArbosState) BrotliCompressionLevel() (uint64, error) { + return state.brotliCompressionLevel.Get() +} + +func (state *ArbosState) SetBrotliCompressionLevel(val uint64) error { + if val <= arbcompress.LEVEL_WELL { + return state.brotliCompressionLevel.Set(val) + } + return errors.New("invalid brotli compression level") +} + func (state *ArbosState) RetryableState() *retryables.RetryableState { return state.retryableState } @@ -385,7 +428,7 @@ func (state *ArbosState) ChainOwners() *addressSet.AddressSet { func (state *ArbosState) SendMerkleAccumulator() *merkleAccumulator.MerkleAccumulator { if state.sendMerkle == nil { - state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenSubStorage(sendMerkleSubspace)) + state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenCachedSubStorage(sendMerkleSubspace)) } return state.sendMerkle } diff --git a/arbos/arbosState/arbosstate_test.go b/arbos/arbosState/arbosstate_test.go index c4643c9183..ef63c23386 100644 --- a/arbos/arbosState/arbosstate_test.go +++ b/arbos/arbosState/arbosstate_test.go @@ -64,7 +64,7 @@ func TestStorageBackedInt64(t *testing.T) { func TestStorageSlots(t *testing.T) { state, _ := NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}) + sto := state.BackingStorage().OpenCachedSubStorage([]byte{}) println("nil address", colors.Blue, storage.NilAddressRepresentation.String(), colors.Clear) diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index e98ab08485..9f24d96765 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -189,7 +189,8 @@ func initializeRetryables(statedb *state.StateDB, rs *retryables.RetryableState, for _, r := range retryablesList { var to *common.Address if r.To != (common.Address{}) { - to = &r.To + addr := r.To + to = &addr } statedb.AddBalance(retryables.RetryableEscrowAddress(r.Id), r.Callvalue) _, err := rs.CreateRetryable(r.Id, r.Timeout, r.From, to, r.Callvalue, r.Beneficiary, r.Calldata) diff --git a/arbos/arbostypes/incomingmessage.go b/arbos/arbostypes/incomingmessage.go index e9a5466d46..04ce8ebe2e 100644 --- a/arbos/arbostypes/incomingmessage.go +++ b/arbos/arbostypes/incomingmessage.go @@ -127,14 +127,21 @@ func (msg *L1IncomingMessage) Equals(other *L1IncomingMessage) bool { return msg.Header.Equals(other.Header) && bytes.Equal(msg.L2msg, other.L2msg) } +func hashesEqual(ha, hb *common.Hash) bool { + if (ha == nil) != (hb == nil) { + return false + } + return (ha == nil && hb == nil) || *ha == *hb +} + func (h *L1IncomingMessageHeader) Equals(other *L1IncomingMessageHeader) bool { // These are all non-pointer types so it's safe to use the == operator return h.Kind == other.Kind && h.Poster == other.Poster && h.BlockNumber == other.BlockNumber && h.Timestamp == other.Timestamp && - h.RequestId == other.RequestId && - h.L1BaseFee == other.L1BaseFee + hashesEqual(h.RequestId, other.RequestId) && + arbmath.BigEquals(h.L1BaseFee, other.L1BaseFee) } func ComputeBatchGasCost(data []byte) uint64 { diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 9f208c4404..6f87864b61 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" ) @@ -39,6 +40,7 @@ var L2ToL1TransactionEventID common.Hash var L2ToL1TxEventID common.Hash var EmitReedeemScheduledEvent func(*vm.EVM, uint64, uint64, [32]byte, [32]byte, common.Address, *big.Int, *big.Int) error var EmitTicketCreatedEvent func(*vm.EVM, [32]byte) error +var gasUsedSinceStartupCounter = metrics.NewRegisteredCounter("arb/gas_used", nil) type L1Info struct { poster common.Address @@ -190,7 +192,7 @@ func ProduceBlockAdvanced( } header := createNewHeader(lastBlockHeader, l1Info, state, chainConfig) - signer := types.MakeSigner(chainConfig, header.Number) + signer := types.MakeSigner(chainConfig, header.Number, header.Time) // Note: blockGasLeft will diverge from the actual gas left during execution in the event of invalid txs, // but it's only used as block-local representation limiting the amount of work done in a block. blockGasLeft, _ := state.L2PricingState().PerBlockGasLimit() @@ -269,7 +271,11 @@ func ProduceBlockAdvanced( if basefee.Sign() > 0 { dataGas = math.MaxUint64 - posterCost, _ := state.L1PricingState().GetPosterInfo(tx, poster) + brotliCompressionLevel, err := state.BrotliCompressionLevel() + if err != nil { + return nil, nil, fmt.Errorf("failed to get brotli compression level: %w", err) + } + posterCost, _ := state.L1PricingState().GetPosterInfo(tx, poster, brotliCompressionLevel) posterCostInL2Gas := arbmath.BigDiv(posterCost, basefee) if posterCostInL2Gas.IsUint64() { @@ -343,11 +349,7 @@ func ProduceBlockAdvanced( log.Debug("error applying transaction", "tx", tx, "err", err) if !hooks.DiscardInvalidTxsEarly { // we'll still deduct a TxGas's worth from the block-local rate limiter even if the tx was invalid - if blockGasLeft > params.TxGas { - blockGasLeft -= params.TxGas - } else { - blockGasLeft = 0 - } + blockGasLeft = arbmath.SaturatingUSub(blockGasLeft, params.TxGas) if isUserTx { userTxsProcessed++ } @@ -416,11 +418,11 @@ func ProduceBlockAdvanced( } } - if blockGasLeft > computeUsed { - blockGasLeft -= computeUsed - } else { - blockGasLeft = 0 - } + blockGasLeft = arbmath.SaturatingUSub(blockGasLeft, computeUsed) + + // Add gas used since startup to prometheus metric. + gasUsed := arbmath.SaturatingUSub(receipt.GasUsed, receipt.GasUsedForL1) + gasUsedSinceStartupCounter.Inc(arbmath.SaturatingCast(gasUsed)) complete = append(complete, tx) receipts = append(receipts, receipt) diff --git a/arbos/blockhash/blockhash.go b/arbos/blockhash/blockhash.go index 2eedf7f5bb..34c907207c 100644 --- a/arbos/blockhash/blockhash.go +++ b/arbos/blockhash/blockhash.go @@ -21,7 +21,7 @@ func InitializeBlockhashes(backingStorage *storage.Storage) { } func OpenBlockhashes(backingStorage *storage.Storage) *Blockhashes { - return &Blockhashes{backingStorage, backingStorage.OpenStorageBackedUint64(0)} + return &Blockhashes{backingStorage.WithoutCache(), backingStorage.OpenStorageBackedUint64(0)} } func (bh *Blockhashes) L1BlockNumber() (uint64, error) { diff --git a/arbos/engine.go b/arbos/engine.go index ebc27c0886..0014e8ab96 100644 --- a/arbos/engine.go +++ b/arbos/engine.go @@ -23,15 +23,15 @@ func (e Engine) Author(header *types.Header) (common.Address, error) { return header.Coinbase, nil } -func (e Engine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { +func (e Engine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { // TODO what verification should be done here? return nil } -func (e Engine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { +func (e Engine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) { errors := make(chan error, len(headers)) for i := range headers { - errors <- e.VerifyHeader(chain, headers[i], seals[i]) + errors <- e.VerifyHeader(chain, headers[i]) } return make(chan struct{}), errors } diff --git a/arbos/l1pricing/batchPoster.go b/arbos/l1pricing/batchPoster.go index 97b7b16234..a3428c441c 100644 --- a/arbos/l1pricing/batchPoster.go +++ b/arbos/l1pricing/batchPoster.go @@ -42,12 +42,12 @@ func InitializeBatchPostersTable(storage *storage.Storage) error { if err := totalFundsDue.SetChecked(common.Big0); err != nil { return err } - return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey)) + return addressSet.Initialize(storage.OpenCachedSubStorage(PosterAddrsKey)) } func OpenBatchPostersTable(storage *storage.Storage) *BatchPostersTable { return &BatchPostersTable{ - posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey)), + posterAddrs: addressSet.OpenAddressSet(storage.OpenCachedSubStorage(PosterAddrsKey)), posterInfo: storage.OpenSubStorage(PosterInfoKey), totalFundsDue: storage.OpenStorageBackedBigInt(totalFundsDueOffset), } diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 9772ac028b..27ecae8b85 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -72,9 +72,10 @@ const ( ) const ( - InitialInertia = 10 - InitialPerUnitReward = 10 - InitialPerBatchGasCostV6 = 100000 + InitialInertia = 10 + InitialPerUnitReward = 10 + InitialPerBatchGasCostV6 = 100_000 + InitialPerBatchGasCostV12 = 210_000 // overridden as part of the upgrade ) // one minute at 100000 bytes / sec @@ -82,7 +83,7 @@ var InitialEquilibrationUnitsV0 = arbmath.UintToBig(60 * params.TxDataNonZeroGas var InitialEquilibrationUnitsV6 = arbmath.UintToBig(params.TxDataNonZeroGasEIP2028 * 10000000) func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient common.Address, initialL1BaseFee *big.Int) error { - bptStorage := sto.OpenSubStorage(BatchPosterTableKey) + bptStorage := sto.OpenCachedSubStorage(BatchPosterTableKey) if err := InitializeBatchPostersTable(bptStorage); err != nil { return err } @@ -117,7 +118,7 @@ func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient comm func OpenL1PricingState(sto *storage.Storage) *L1PricingState { return &L1PricingState{ sto, - OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey)), + OpenBatchPostersTable(sto.OpenCachedSubStorage(BatchPosterTableKey)), sto.OpenStorageBackedAddress(payRewardsToOffset), sto.OpenStorageBackedBigUint(equilibrationUnitsOffset), sto.OpenStorageBackedUint64(inertiaOffset), @@ -145,6 +146,10 @@ func (ps *L1PricingState) SetPayRewardsTo(addr common.Address) error { return ps.payRewardsTo.Set(addr) } +func (ps *L1PricingState) GetRewardsRecepient() (common.Address, error) { + return ps.payRewardsTo.Get() +} + func (ps *L1PricingState) EquilibrationUnits() (*big.Int, error) { return ps.equilibrationUnits.Get() } @@ -169,6 +174,10 @@ func (ps *L1PricingState) SetPerUnitReward(weiPerUnit uint64) error { return ps.perUnitReward.Set(weiPerUnit) } +func (ps *L1PricingState) GetRewardsRate() (uint64, error) { + return ps.perUnitReward.Get() +} + func (ps *L1PricingState) LastUpdateTime() (uint64, error) { return ps.lastUpdateTime.Get() } @@ -480,7 +489,7 @@ func (ps *L1PricingState) UpdateForBatchPosterSpending( return nil } -func (ps *L1PricingState) getPosterUnitsWithoutCache(tx *types.Transaction, posterAddr common.Address) uint64 { +func (ps *L1PricingState) getPosterUnitsWithoutCache(tx *types.Transaction, posterAddr common.Address, brotliCompressionLevel uint64) uint64 { if posterAddr != BatchPosterAddress { return 0 @@ -491,7 +500,7 @@ func (ps *L1PricingState) getPosterUnitsWithoutCache(tx *types.Transaction, post return 0 } - l1Bytes, err := byteCountAfterBrotli0(txBytes) + l1Bytes, err := byteCountAfterBrotliLevel(txBytes, int(brotliCompressionLevel)) if err != nil { panic(fmt.Sprintf("failed to compress tx: %v", err)) } @@ -499,13 +508,13 @@ func (ps *L1PricingState) getPosterUnitsWithoutCache(tx *types.Transaction, post } // GetPosterInfo returns the poster cost and the calldata units for a transaction -func (ps *L1PricingState) GetPosterInfo(tx *types.Transaction, poster common.Address) (*big.Int, uint64) { +func (ps *L1PricingState) GetPosterInfo(tx *types.Transaction, poster common.Address, brotliCompressionLevel uint64) (*big.Int, uint64) { if poster != BatchPosterAddress { return common.Big0, 0 } units := atomic.LoadUint64(&tx.CalldataUnits) if units == 0 { - units = ps.getPosterUnitsWithoutCache(tx, poster) + units = ps.getPosterUnitsWithoutCache(tx, poster, brotliCompressionLevel) atomic.StoreUint64(&tx.CalldataUnits, units) } @@ -561,23 +570,23 @@ func makeFakeTxForMessage(message *core.Message) *types.Transaction { }) } -func (ps *L1PricingState) PosterDataCost(message *core.Message, poster common.Address) (*big.Int, uint64) { +func (ps *L1PricingState) PosterDataCost(message *core.Message, poster common.Address, brotliCompressionLevel uint64) (*big.Int, uint64) { tx := message.Tx if tx != nil { - return ps.GetPosterInfo(tx, poster) + return ps.GetPosterInfo(tx, poster, brotliCompressionLevel) } // Otherwise, we don't have an underlying transaction, so we're likely in gas estimation. // We'll instead make a fake tx from the message info we do have, and then pad our cost a bit to be safe. tx = makeFakeTxForMessage(message) - units := ps.getPosterUnitsWithoutCache(tx, poster) + units := ps.getPosterUnitsWithoutCache(tx, poster, brotliCompressionLevel) units = arbmath.UintMulByBips(units+estimationPaddingUnits, arbmath.OneInBips+estimationPaddingBasisPoints) pricePerUnit, _ := ps.PricePerUnit() return am.BigMulByUint(pricePerUnit, units), units } -func byteCountAfterBrotli0(input []byte) (uint64, error) { - compressed, err := arbcompress.CompressFast(input) +func byteCountAfterBrotliLevel(input []byte, level int) (uint64, error) { + compressed, err := arbcompress.CompressLevel(input, level) if err != nil { return 0, err } diff --git a/arbos/queue_test.go b/arbos/queue_test.go index d8d491bdb0..ff993a233f 100644 --- a/arbos/queue_test.go +++ b/arbos/queue_test.go @@ -14,7 +14,7 @@ import ( func TestQueue(t *testing.T) { state, statedb := arbosState.NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}) + sto := state.BackingStorage().OpenCachedSubStorage([]byte{}) Require(t, storage.InitializeQueue(sto)) q := storage.OpenQueue(sto) diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index abea2ab7bd..6984e41904 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -31,13 +31,13 @@ var ( ) func InitializeRetryableState(sto *storage.Storage) error { - return storage.InitializeQueue(sto.OpenSubStorage(timeoutQueueKey)) + return storage.InitializeQueue(sto.OpenCachedSubStorage(timeoutQueueKey)) } func OpenRetryableState(sto *storage.Storage, statedb vm.StateDB) *RetryableState { return &RetryableState{ sto, - storage.OpenQueue(sto.OpenSubStorage(timeoutQueueKey)), + storage.OpenQueue(sto.OpenCachedSubStorage(timeoutQueueKey)), } } @@ -150,6 +150,7 @@ func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario return false, err } + // we ignore returned error as we expect that if one ClearByUint64 fails, than all consecutive calls to ClearByUint64 will fail with the same error (not modifying state), and then ClearBytes will also fail with the same error (also not modifying state) - and this one we check and return _ = retStorage.ClearByUint64(numTriesOffset) _ = retStorage.ClearByUint64(fromOffset) _ = retStorage.ClearByUint64(toOffset) diff --git a/arbos/storage/queue.go b/arbos/storage/queue.go index 55231d3a90..9c02dc1ee7 100644 --- a/arbos/storage/queue.go +++ b/arbos/storage/queue.go @@ -25,7 +25,7 @@ func InitializeQueue(sto *Storage) error { func OpenQueue(sto *Storage) *Queue { return &Queue{ - sto, + sto.WithoutCache(), sto.OpenStorageBackedUint64(0), sto.OpenStorageBackedUint64(1), } diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 478ad68f8f..63987b91f8 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -4,10 +4,13 @@ package storage import ( + "bytes" "fmt" "math/big" + "sync/atomic" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" @@ -43,12 +46,18 @@ type Storage struct { db vm.StateDB storageKey []byte burner burn.Burner + hashCache *lru.Cache[string, []byte] } const StorageReadCost = params.SloadGasEIP2200 const StorageWriteCost = params.SstoreSetGasEIP2200 const StorageWriteZeroCost = params.SstoreResetGasEIP2200 +const storageKeyCacheSize = 1024 + +var storageHashCache = lru.NewCache[string, []byte](storageKeyCacheSize) +var cacheFullLogged atomic.Bool + // NewGeth uses a Geth database to create an evm key-value store func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { account := common.HexToAddress("0xA4B05FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") @@ -58,6 +67,7 @@ func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { db: statedb, storageKey: []byte{}, burner: burner, + hashCache: storageHashCache, } } @@ -81,15 +91,13 @@ func NewMemoryBackedStateDB() vm.StateDB { // a page, to preserve contiguity within a page. This will reduce cost if/when Ethereum switches to storage // representations that reward contiguity. // Because page numbers are 248 bits, this gives us 124-bit security against collision attacks, which is good enough. -func mapAddress(storageKey []byte, key common.Hash) common.Hash { +func (s *Storage) mapAddress(key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 - return common.BytesToHash( - append( - crypto.Keccak256(storageKey, keyBytes[:boundary])[:boundary], - keyBytes[boundary], - ), - ) + mapped := make([]byte, 0, common.HashLength) + mapped = append(mapped, s.cachedKeccak(s.storageKey, keyBytes[:boundary])[:boundary]...) + mapped = append(mapped, keyBytes[boundary]) + return common.BytesToHash(mapped) } func writeCost(value common.Hash) uint64 { @@ -99,117 +107,139 @@ func writeCost(value common.Hash) uint64 { return StorageWriteCost } -func (store *Storage) Account() common.Address { - return store.account +func (s *Storage) Account() common.Address { + return s.account } -func (store *Storage) Get(key common.Hash) (common.Hash, error) { - err := store.burner.Burn(StorageReadCost) +func (s *Storage) Get(key common.Hash) (common.Hash, error) { + err := s.burner.Burn(StorageReadCost) if err != nil { return common.Hash{}, err } - if info := store.burner.TracingInfo(); info != nil { + if info := s.burner.TracingInfo(); info != nil { info.RecordStorageGet(key) } - return store.db.GetState(store.account, mapAddress(store.storageKey, key)), nil + return s.db.GetState(s.account, s.mapAddress(key)), nil } -func (store *Storage) GetStorageSlot(key common.Hash) common.Hash { - return mapAddress(store.storageKey, key) +func (s *Storage) GetStorageSlot(key common.Hash) common.Hash { + return s.mapAddress(key) } -func (store *Storage) GetUint64(key common.Hash) (uint64, error) { - value, err := store.Get(key) +func (s *Storage) GetUint64(key common.Hash) (uint64, error) { + value, err := s.Get(key) return value.Big().Uint64(), err } -func (store *Storage) GetByUint64(key uint64) (common.Hash, error) { - return store.Get(util.UintToHash(key)) +func (s *Storage) GetByUint64(key uint64) (common.Hash, error) { + return s.Get(util.UintToHash(key)) } -func (store *Storage) GetUint64ByUint64(key uint64) (uint64, error) { - return store.GetUint64(util.UintToHash(key)) +func (s *Storage) GetUint64ByUint64(key uint64) (uint64, error) { + return s.GetUint64(util.UintToHash(key)) } -func (store *Storage) Set(key common.Hash, value common.Hash) error { - if store.burner.ReadOnly() { +func (s *Storage) Set(key common.Hash, value common.Hash) error { + if s.burner.ReadOnly() { log.Error("Read-only burner attempted to mutate state", "key", key, "value", value) return vm.ErrWriteProtection } - err := store.burner.Burn(writeCost(value)) + err := s.burner.Burn(writeCost(value)) if err != nil { return err } - if info := store.burner.TracingInfo(); info != nil { + if info := s.burner.TracingInfo(); info != nil { info.RecordStorageSet(key, value) } - store.db.SetState(store.account, mapAddress(store.storageKey, key), value) + s.db.SetState(s.account, s.mapAddress(key), value) return nil } -func (store *Storage) SetByUint64(key uint64, value common.Hash) error { - return store.Set(util.UintToHash(key), value) +func (s *Storage) SetByUint64(key uint64, value common.Hash) error { + return s.Set(util.UintToHash(key), value) } -func (store *Storage) SetUint64ByUint64(key uint64, value uint64) error { - return store.Set(util.UintToHash(key), util.UintToHash(value)) +func (s *Storage) SetUint64ByUint64(key uint64, value uint64) error { + return s.Set(util.UintToHash(key), util.UintToHash(value)) } -func (store *Storage) Clear(key common.Hash) error { - return store.Set(key, common.Hash{}) +func (s *Storage) Clear(key common.Hash) error { + return s.Set(key, common.Hash{}) } -func (store *Storage) ClearByUint64(key uint64) error { - return store.Set(util.UintToHash(key), common.Hash{}) +func (s *Storage) ClearByUint64(key uint64) error { + return s.Set(util.UintToHash(key), common.Hash{}) } -func (store *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, error) { - oldValue, err := store.Get(key) +func (s *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, error) { + oldValue, err := s.Get(key) if err != nil { return common.Hash{}, err } - return oldValue, store.Set(key, newValue) + return oldValue, s.Set(key, newValue) +} + +func (s *Storage) OpenCachedSubStorage(id []byte) *Storage { + return &Storage{ + account: s.account, + db: s.db, + storageKey: s.cachedKeccak(s.storageKey, id), + burner: s.burner, + hashCache: storageHashCache, + } +} +func (s *Storage) OpenSubStorage(id []byte) *Storage { + return &Storage{ + account: s.account, + db: s.db, + storageKey: s.cachedKeccak(s.storageKey, id), + burner: s.burner, + hashCache: nil, + } } -func (store *Storage) OpenSubStorage(id []byte) *Storage { +// Returns shallow copy of Storage that won't use storage key hash cache. +// The storage space represented by the returned Storage is kept the same. +func (s *Storage) WithoutCache() *Storage { return &Storage{ - store.account, - store.db, - crypto.Keccak256(store.storageKey, id), - store.burner, + account: s.account, + db: s.db, + storageKey: s.storageKey, + burner: s.burner, + hashCache: nil, } } -func (store *Storage) SetBytes(b []byte) error { - err := store.ClearBytes() +func (s *Storage) SetBytes(b []byte) error { + err := s.ClearBytes() if err != nil { return err } - err = store.SetUint64ByUint64(0, uint64(len(b))) + err = s.SetUint64ByUint64(0, uint64(len(b))) if err != nil { return err } offset := uint64(1) for len(b) >= 32 { - err = store.SetByUint64(offset, common.BytesToHash(b[:32])) + err = s.SetByUint64(offset, common.BytesToHash(b[:32])) if err != nil { return err } b = b[32:] offset++ } - return store.SetByUint64(offset, common.BytesToHash(b)) + return s.SetByUint64(offset, common.BytesToHash(b)) } -func (store *Storage) GetBytes() ([]byte, error) { - bytesLeft, err := store.GetUint64ByUint64(0) +func (s *Storage) GetBytes() ([]byte, error) { + bytesLeft, err := s.GetUint64ByUint64(0) if err != nil { return nil, err } ret := []byte{} offset := uint64(1) for bytesLeft >= 32 { - next, err := store.GetByUint64(offset) + next, err := s.GetByUint64(offset) if err != nil { return nil, err } @@ -217,7 +247,7 @@ func (store *Storage) GetBytes() ([]byte, error) { bytesLeft -= 32 offset++ } - next, err := store.GetByUint64(offset) + next, err := s.GetByUint64(offset) if err != nil { return nil, err } @@ -225,18 +255,18 @@ func (store *Storage) GetBytes() ([]byte, error) { return ret, nil } -func (store *Storage) GetBytesSize() (uint64, error) { - return store.GetUint64ByUint64(0) +func (s *Storage) GetBytesSize() (uint64, error) { + return s.GetUint64ByUint64(0) } -func (store *Storage) ClearBytes() error { - bytesLeft, err := store.GetUint64ByUint64(0) +func (s *Storage) ClearBytes() error { + bytesLeft, err := s.GetUint64ByUint64(0) if err != nil { return err } offset := uint64(1) for bytesLeft > 0 { - err := store.ClearByUint64(offset) + err := s.ClearByUint64(offset) if err != nil { return err } @@ -247,30 +277,51 @@ func (store *Storage) ClearBytes() error { bytesLeft -= 32 } } - return store.ClearByUint64(0) + return s.ClearByUint64(0) } -func (store *Storage) Burner() burn.Burner { - return store.burner // not public because these should never be changed once set +func (s *Storage) Burner() burn.Burner { + return s.burner // not public because these should never be changed once set } -func (store *Storage) Keccak(data ...[]byte) ([]byte, error) { +func (s *Storage) Keccak(data ...[]byte) ([]byte, error) { byteCount := 0 for _, part := range data { byteCount += len(part) } cost := 30 + 6*arbmath.WordsForBytes(uint64(byteCount)) - if err := store.burner.Burn(cost); err != nil { + if err := s.burner.Burn(cost); err != nil { return nil, err } return crypto.Keccak256(data...), nil } -func (store *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { - bytes, err := store.Keccak(data...) +func (s *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { + bytes, err := s.Keccak(data...) return common.BytesToHash(bytes), err } +// Returns crypto.Keccak256 result for the given data +// If available the result is taken from hash cache +// otherwise crypto.Keccak256 is executed and its result is added to the cache and returned +// note: the method doesn't burn gas, as it's only intended for generating storage subspace keys and mapping slot addresses +// note: returned slice is not thread-safe +func (s *Storage) cachedKeccak(data ...[]byte) []byte { + if s.hashCache == nil { + return crypto.Keccak256(data...) + } + keyString := string(bytes.Join(data, []byte{})) + if hash, wasCached := s.hashCache.Get(keyString); wasCached { + return hash + } + hash := crypto.Keccak256(data...) + evicted := s.hashCache.Add(keyString, hash) + if evicted && cacheFullLogged.CompareAndSwap(false, true) { + log.Warn("Hash cache full, we didn't expect that. Some non-static storage keys may fill up the cache.") + } + return hash +} + type StorageSlot struct { account common.Address db vm.StateDB @@ -278,8 +329,8 @@ type StorageSlot struct { burner burn.Burner } -func (store *Storage) NewSlot(offset uint64) StorageSlot { - return StorageSlot{store.account, store.db, mapAddress(store.storageKey, util.UintToHash(offset)), store.burner} +func (s *Storage) NewSlot(offset uint64) StorageSlot { + return StorageSlot{s.account, s.db, s.mapAddress(util.UintToHash(offset)), s.burner} } func (ss *StorageSlot) Get() (common.Hash, error) { @@ -318,8 +369,8 @@ type StorageBackedInt64 struct { StorageSlot } -func (store *Storage) OpenStorageBackedInt64(offset uint64) StorageBackedInt64 { - return StorageBackedInt64{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedInt64(offset uint64) StorageBackedInt64 { + return StorageBackedInt64{s.NewSlot(offset)} } func (sbu *StorageBackedInt64) Get() (int64, error) { @@ -339,8 +390,8 @@ type StorageBackedBips struct { backing StorageBackedInt64 } -func (store *Storage) OpenStorageBackedBips(offset uint64) StorageBackedBips { - return StorageBackedBips{StorageBackedInt64{store.NewSlot(offset)}} +func (s *Storage) OpenStorageBackedBips(offset uint64) StorageBackedBips { + return StorageBackedBips{StorageBackedInt64{s.NewSlot(offset)}} } func (sbu *StorageBackedBips) Get() (arbmath.Bips, error) { @@ -356,8 +407,8 @@ type StorageBackedUint64 struct { StorageSlot } -func (store *Storage) OpenStorageBackedUint64(offset uint64) StorageBackedUint64 { - return StorageBackedUint64{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedUint64(offset uint64) StorageBackedUint64 { + return StorageBackedUint64{s.NewSlot(offset)} } func (sbu *StorageBackedUint64) Get() (uint64, error) { @@ -444,8 +495,8 @@ type StorageBackedBigUint struct { StorageSlot } -func (store *Storage) OpenStorageBackedBigUint(offset uint64) StorageBackedBigUint { - return StorageBackedBigUint{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedBigUint(offset uint64) StorageBackedBigUint { + return StorageBackedBigUint{s.NewSlot(offset)} } func (sbbu *StorageBackedBigUint) Get() (*big.Int, error) { @@ -483,8 +534,8 @@ type StorageBackedBigInt struct { StorageSlot } -func (store *Storage) OpenStorageBackedBigInt(offset uint64) StorageBackedBigInt { - return StorageBackedBigInt{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedBigInt(offset uint64) StorageBackedBigInt { + return StorageBackedBigInt{s.NewSlot(offset)} } func (sbbi *StorageBackedBigInt) Get() (*big.Int, error) { @@ -540,8 +591,8 @@ type StorageBackedAddress struct { StorageSlot } -func (store *Storage) OpenStorageBackedAddress(offset uint64) StorageBackedAddress { - return StorageBackedAddress{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedAddress(offset uint64) StorageBackedAddress { + return StorageBackedAddress{s.NewSlot(offset)} } func (sba *StorageBackedAddress) Get() (common.Address, error) { @@ -563,8 +614,8 @@ func init() { NilAddressRepresentation = common.BigToHash(new(big.Int).Lsh(big.NewInt(1), 255)) } -func (store *Storage) OpenStorageBackedAddressOrNil(offset uint64) StorageBackedAddressOrNil { - return StorageBackedAddressOrNil{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedAddressOrNil(offset uint64) StorageBackedAddressOrNil { + return StorageBackedAddressOrNil{s.NewSlot(offset)} } func (sba *StorageBackedAddressOrNil) Get() (*common.Address, error) { @@ -588,9 +639,9 @@ type StorageBackedBytes struct { Storage } -func (store *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { +func (s *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { return StorageBackedBytes{ - *store.OpenSubStorage(id), + *s.OpenSubStorage(id), } } diff --git a/arbos/storage/storage_test.go b/arbos/storage/storage_test.go index 35e6b7c4be..a8d424d14e 100644 --- a/arbos/storage/storage_test.go +++ b/arbos/storage/storage_test.go @@ -1,11 +1,16 @@ package storage import ( + "bytes" + "fmt" "math/big" + "math/rand" + "sync" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -91,3 +96,73 @@ func TestStorageBackedBigInt(t *testing.T) { }) } } + +func TestOpenCachedSubStorage(t *testing.T) { + s := NewMemoryBacked(burn.NewSystemBurner(nil, false)) + var subSpaceIDs [][]byte + for i := 0; i < 20; i++ { + subSpaceIDs = append(subSpaceIDs, []byte{byte(rand.Intn(0xff))}) + } + var expectedKeys [][]byte + for _, subSpaceID := range subSpaceIDs { + expectedKeys = append(expectedKeys, crypto.Keccak256(s.storageKey, subSpaceID)) + } + n := len(subSpaceIDs) * 50 + start := make(chan struct{}) + errs := make(chan error, n) + var wg sync.WaitGroup + for i := 0; i < n; i++ { + j := i % len(subSpaceIDs) + subSpaceID, expectedKey := subSpaceIDs[j], expectedKeys[j] + wg.Add(1) + go func() { + defer wg.Done() + <-start + ss := s.OpenCachedSubStorage(subSpaceID) + if !bytes.Equal(ss.storageKey, expectedKey) { + errs <- fmt.Errorf("unexpected storage key, want: %v, have: %v", expectedKey, ss.storageKey) + } + }() + } + close(start) + wg.Wait() + select { + case err := <-errs: + t.Fatal(err) + default: + } +} + +func TestMapAddressCache(t *testing.T) { + s := NewMemoryBacked(burn.NewSystemBurner(nil, false)) + var keys []common.Hash + for i := 0; i < 20; i++ { + keys = append(keys, common.BytesToHash([]byte{byte(rand.Intn(0xff))})) + } + var expectedMapped []common.Hash + for _, key := range keys { + expectedMapped = append(expectedMapped, s.mapAddress(key)) + } + n := len(keys) * 50 + start := make(chan struct{}) + errs := make(chan error, n) + var wg sync.WaitGroup + for i := 0; i < n; i++ { + j := i % len(keys) + key, expected := keys[j], expectedMapped[j] + wg.Add(1) + go func() { + defer wg.Done() + <-start + mapped := s.mapAddress(key) + if !bytes.Equal(mapped.Bytes(), expected.Bytes()) { + errs <- fmt.Errorf("unexpected storage key, want: %v, have: %v", expected, mapped) + } + }() + } + close(start) + wg.Wait() + if len(errs) > 0 { + t.Fatal(<-errs) + } +} diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index d0f999d0de..569edb7c63 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -110,11 +110,11 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r evm := p.evm startTracer := func() func() { - if !evm.Config.Debug { + tracer := evm.Config.Tracer + if tracer == nil { return func() {} } evm.IncrementDepth() // fake a call - tracer := evm.Config.Tracer from := p.msg.From tracer.CaptureStart(evm, from, *p.msg.To, false, p.msg.Data, p.msg.GasLimit, p.msg.Value) @@ -245,16 +245,17 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } balance := statedb.GetBalance(tx.From) - basefee := evm.Context.BaseFee + effectiveBaseFee := evm.Context.BaseFee usergas := p.msg.GasLimit - maxGasCost := arbmath.BigMulByUint(tx.GasFeeCap, usergas) - maxFeePerGasTooLow := arbmath.BigLessThan(tx.GasFeeCap, basefee) - if p.msg.TxRunMode == core.MessageGasEstimationMode && tx.GasFeeCap.BitLen() == 0 { - // In gas estimation mode, we permit a zero gas fee cap. - // This matches behavior with normal tx gas estimation. - maxFeePerGasTooLow = false + if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.BitLen() == 0 { + // In gas estimation or eth_call mode, we permit a zero gas fee cap. + // This matches behavior with normal tx gas estimation and eth_call. + effectiveBaseFee = common.Big0 } + + maxGasCost := arbmath.BigMulByUint(tx.GasFeeCap, usergas) + maxFeePerGasTooLow := arbmath.BigLessThan(tx.GasFeeCap, effectiveBaseFee) if arbmath.BigLessThan(balance, maxGasCost) || usergas < params.TxGas || maxFeePerGasTooLow { // User either specified too low of a gas fee cap, didn't have enough balance to pay for gas, // or the specified gas limit is below the minimum transaction gas cost. @@ -268,15 +269,33 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } // pay for the retryable's gas and update the pools - gascost := arbmath.BigMulByUint(basefee, usergas) - if err := transfer(&tx.From, &networkFeeAccount, gascost); err != nil { - // should be impossible because we just checked the tx.From balance - glog.Error("failed to transfer gas cost to network fee account", "err", err) - return true, 0, nil, ticketId.Bytes() + gascost := arbmath.BigMulByUint(effectiveBaseFee, usergas) + networkCost := gascost + if p.state.ArbOSVersion() >= 11 { + infraFeeAccount, err := p.state.InfraFeeAccount() + p.state.Restrict(err) + if infraFeeAccount != (common.Address{}) { + minBaseFee, err := p.state.L2PricingState().MinBaseFeeWei() + p.state.Restrict(err) + infraFee := arbmath.BigMin(minBaseFee, effectiveBaseFee) + infraCost := arbmath.BigMulByUint(infraFee, usergas) + infraCost = takeFunds(networkCost, infraCost) + if err := transfer(&tx.From, &infraFeeAccount, infraCost); err != nil { + glog.Error("failed to transfer gas cost to infrastructure fee account", "err", err) + return true, 0, nil, ticketId.Bytes() + } + } + } + if arbmath.BigGreaterThan(networkCost, common.Big0) { + if err := transfer(&tx.From, &networkFeeAccount, networkCost); err != nil { + // should be impossible because we just checked the tx.From balance + glog.Error("failed to transfer gas cost to network fee account", "err", err) + return true, 0, nil, ticketId.Bytes() + } } withheldGasFunds := takeFunds(availableRefund, gascost) // gascost is conceptually charged before the gas price refund - gasPriceRefund := arbmath.BigMulByUint(arbmath.BigSub(tx.GasFeeCap, basefee), tx.Gas) + gasPriceRefund := arbmath.BigMulByUint(arbmath.BigSub(tx.GasFeeCap, effectiveBaseFee), tx.Gas) if gasPriceRefund.Sign() < 0 { // This should only be possible during gas estimation mode gasPriceRefund.SetInt64(0) @@ -292,7 +311,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r retryTxInner, err := retryable.MakeTx( underlyingTx.ChainId(), 0, - basefee, + effectiveBaseFee, usergas, ticketId, tx.FeeRefundAddr, @@ -318,7 +337,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r glog.Error("failed to emit RedeemScheduled event", "err", err) } - if evm.Config.Debug { + if tracer := evm.Config.Tracer; tracer != nil { redeem, err := util.PackArbRetryableTxRedeem(ticketId) if err == nil { tracingInfo.MockCall(redeem, usergas, from, types.ArbRetryableTxAddress, common.Big0) @@ -385,11 +404,18 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (common.Address, err poster = p.evm.Context.Coinbase } - if basefee.Sign() > 0 { + if p.msg.TxRunMode == core.MessageCommitMode { + p.msg.SkipL1Charging = false + } + if basefee.Sign() > 0 && !p.msg.SkipL1Charging { // Since tips go to the network, and not to the poster, we use the basefee. // Note, this only determines the amount of gas bought, not the price per gas. - posterCost, calldataUnits := p.state.L1PricingState().PosterDataCost(p.msg, poster) + brotliCompressionLevel, err := p.state.BrotliCompressionLevel() + if err != nil { + return common.Address{}, fmt.Errorf("failed to get brotli compression level: %w", err) + } + posterCost, calldataUnits := p.state.L1PricingState().PosterDataCost(p.msg, poster, brotliCompressionLevel) if calldataUnits > 0 { p.state.Restrict(p.state.L1PricingState().AddToUnitsSinceUpdate(calldataUnits)) } @@ -431,7 +457,6 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { underlyingTx := p.msg.Tx networkFeeAccount, _ := p.state.NetworkFeeAccount() - basefee := p.evm.Context.BaseFee scenario := util.TracingAfterEVM if gasLeft > p.msg.GasLimit { @@ -441,46 +466,71 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { if underlyingTx != nil && underlyingTx.Type() == types.ArbitrumRetryTxType { inner, _ := underlyingTx.GetInner().(*types.ArbitrumRetryTx) + effectiveBaseFee := inner.GasFeeCap + if p.msg.TxRunMode == core.MessageCommitMode && !arbmath.BigEquals(effectiveBaseFee, p.evm.Context.BaseFee) { + log.Error( + "ArbitrumRetryTx GasFeeCap doesn't match basefee in commit mode", + "txHash", underlyingTx.Hash(), + "gasFeeCap", inner.GasFeeCap, + "baseFee", p.evm.Context.BaseFee, + ) + // revert to the old behavior to avoid diverging from older nodes + effectiveBaseFee = p.evm.Context.BaseFee + } // undo Geth's refund to the From address - gasRefund := arbmath.BigMulByUint(basefee, gasLeft) + gasRefund := arbmath.BigMulByUint(effectiveBaseFee, gasLeft) err := util.BurnBalance(&inner.From, gasRefund, p.evm, scenario, "undoRefund") if err != nil { log.Error("Uh oh, Geth didn't refund the user", inner.From, gasRefund) } maxRefund := new(big.Int).Set(inner.MaxRefund) - refundNetworkFee := func(amount *big.Int) { - const errLog = "network fee address doesn't have enough funds to give user refund" + refund := func(refundFrom common.Address, amount *big.Int) { + const errLog = "fee address doesn't have enough funds to give user refund" // Refund funds to the fee refund address without overdrafting the L1 deposit. toRefundAddr := takeFunds(maxRefund, amount) - err = util.TransferBalance(&networkFeeAccount, &inner.RefundTo, toRefundAddr, p.evm, scenario, "refund") + err = util.TransferBalance(&refundFrom, &inner.RefundTo, toRefundAddr, p.evm, scenario, "refund") if err != nil { // Normally the network fee address should be holding any collected fees. - // However, in theory, they could've been transfered out during the redeem attempt. + // However, in theory, they could've been transferred out during the redeem attempt. // If the network fee address doesn't have the necessary balance, log an error and don't give a refund. - log.Error(errLog, "err", err) + log.Error(errLog, "err", err, "feeAddress", refundFrom) } // Any extra refund can't be given to the fee refund address if it didn't come from the L1 deposit. // Instead, give the refund to the retryable from address. - err = util.TransferBalance(&networkFeeAccount, &inner.From, arbmath.BigSub(amount, toRefundAddr), p.evm, scenario, "refund") + err = util.TransferBalance(&refundFrom, &inner.From, arbmath.BigSub(amount, toRefundAddr), p.evm, scenario, "refund") if err != nil { - log.Error(errLog, "err", err) + log.Error(errLog, "err", err, "feeAddress", refundFrom) } } if success { // If successful, refund the submission fee. - refundNetworkFee(inner.SubmissionFeeRefund) + refund(networkFeeAccount, inner.SubmissionFeeRefund) } else { // The submission fee is still taken from the L1 deposit earlier, even if it's not refunded. takeFunds(maxRefund, inner.SubmissionFeeRefund) } // Conceptually, the gas charge is taken from the L1 deposit pool if possible. - takeFunds(maxRefund, arbmath.BigMulByUint(basefee, gasUsed)) + takeFunds(maxRefund, arbmath.BigMulByUint(effectiveBaseFee, gasUsed)) // Refund any unused gas, without overdrafting the L1 deposit. - refundNetworkFee(gasRefund) + networkRefund := gasRefund + if p.state.ArbOSVersion() >= 11 { + infraFeeAccount, err := p.state.InfraFeeAccount() + p.state.Restrict(err) + if infraFeeAccount != (common.Address{}) { + minBaseFee, err := p.state.L2PricingState().MinBaseFeeWei() + p.state.Restrict(err) + // TODO MinBaseFeeWei change during RetryTx execution may cause incorrect calculation of the part of the refund that should be taken from infraFeeAccount. Unless the balances of network and infra fee accounts are too low, the amount transferred to refund address should remain correct. + infraFee := arbmath.BigMin(minBaseFee, effectiveBaseFee) + infraRefund := arbmath.BigMulByUint(infraFee, gasLeft) + infraRefund = takeFunds(networkRefund, infraRefund) + refund(infraFeeAccount, infraRefund) + } + } + refund(networkFeeAccount, networkRefund) if success { // we don't want to charge for this @@ -502,6 +552,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { return } + basefee := p.evm.Context.BaseFee totalCost := arbmath.BigMul(basefee, arbmath.UintToBig(gasUsed)) // total cost = price of gas * gas burnt computeCost := arbmath.BigSub(totalCost, p.PosterFee) // total cost = network's compute + poster's L1 costs if computeCost.Sign() < 0 { @@ -518,11 +569,9 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { infraFeeAccount, err := p.state.InfraFeeAccount() p.state.Restrict(err) if infraFeeAccount != (common.Address{}) { - infraFee, err := p.state.L2PricingState().MinBaseFeeWei() + minBaseFee, err := p.state.L2PricingState().MinBaseFeeWei() p.state.Restrict(err) - if arbmath.BigLessThan(basefee, infraFee) { - infraFee = basefee - } + infraFee := arbmath.BigMin(minBaseFee, basefee) computeGas := arbmath.SaturatingUSub(gasUsed, p.posterGas) infraComputeCost := arbmath.BigMulByUint(infraFee, computeGas) util.MintBalance(&infraFeeAccount, infraComputeCost, p.evm, scenario, purpose) @@ -565,9 +614,15 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { func (p *TxProcessor) ScheduledTxes() types.Transactions { scheduled := types.Transactions{} time := p.evm.Context.Time - basefee := p.evm.Context.BaseFee + effectiveBaseFee := p.evm.Context.BaseFee chainID := p.evm.ChainConfig().ChainID + if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.BitLen() == 0 { + // In gas estimation or eth_call mode, we permit a zero gas fee cap. + // This matches behavior with normal tx gas estimation and eth_call. + effectiveBaseFee = common.Big0 + } + logs := p.evm.StateDB.GetCurrentTxLogs() for _, log := range logs { if log.Address != ArbRetryableTxAddress || log.Topics[0] != RedeemScheduledEventID { @@ -586,7 +641,7 @@ func (p *TxProcessor) ScheduledTxes() types.Transactions { redeem, _ := retryable.MakeTx( chainID, event.SequenceNum, - basefee, + effectiveBaseFee, event.DonatedGas, event.TicketId, event.GasDonor, @@ -644,7 +699,7 @@ func (p *TxProcessor) GetPaidGasPrice() *big.Int { if version != 9 { gasPrice = p.evm.Context.BaseFee if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.Sign() == 0 { - gasPrice.SetInt64(0) // gasprice zero behavior + gasPrice = common.Big0 } } return gasPrice diff --git a/arbos/util/tracing.go b/arbos/util/tracing.go index b91df3b403..e4cde0f42b 100644 --- a/arbos/util/tracing.go +++ b/arbos/util/tracing.go @@ -36,7 +36,7 @@ func (a addressHolder) Address() common.Address { } func NewTracingInfo(evm *vm.EVM, from, to common.Address, scenario TracingScenario) *TracingInfo { - if evm.Config.Tracer == nil || !evm.Config.Debug { + if evm.Config.Tracer == nil { return nil } return &TracingInfo{ diff --git a/arbos/util/transfer.go b/arbos/util/transfer.go index 6f05c2e5e1..3a81181200 100644 --- a/arbos/util/transfer.go +++ b/arbos/util/transfer.go @@ -15,7 +15,7 @@ import ( "github.com/offchainlabs/nitro/util/arbmath" ) -// TransferBalance represents a balance change occuring aside from a call. +// TransferBalance represents a balance change occurring aside from a call. // While most uses will be transfers, setting `from` or `to` to nil will mint or burn funds, respectively. func TransferBalance( from, to *common.Address, @@ -37,11 +37,9 @@ func TransferBalance( if to != nil { evm.StateDB.AddBalance(*to, amount) } - if evm.Config.Debug { - tracer := evm.Config.Tracer - + if tracer := evm.Config.Tracer; tracer != nil { if evm.Depth() != 0 && scenario != TracingDuringEVM { - // A non-zero depth implies this transfer is occuring inside EVM execution + // A non-zero depth implies this transfer is occurring inside EVM execution log.Error("Tracing scenario mismatch", "scenario", scenario, "depth", evm.Depth()) return errors.New("tracing scenario mismatch") } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 80d40322c9..3995bcf308 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/zeroheavy" ) @@ -128,9 +129,16 @@ func RecoverPayloadFromDasBatch( batchNum uint64, sequencerMsg []byte, dasReader DataAvailabilityReader, - preimages map[common.Hash][]byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, keysetValidationMode KeysetValidationMode, ) ([]byte, error) { + var keccakPreimages map[common.Hash][]byte + if preimages != nil { + if preimages[arbutil.Keccak256PreimageType] == nil { + preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + } + keccakPreimages = preimages[arbutil.Keccak256PreimageType] + } cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) if err != nil { log.Error("Failed to deserialize DAS message", "err", err) @@ -138,7 +146,7 @@ func RecoverPayloadFromDasBatch( } version := cert.Version recordPreimage := func(key common.Hash, value []byte) { - preimages[key] = value + keccakPreimages[key] = value } if version >= 2 { @@ -179,7 +187,7 @@ func RecoverPayloadFromDasBatch( log.Error("Couldn't get keyset", "err", err) return nil, err } - if preimages != nil { + if keccakPreimages != nil { dastree.RecordHash(recordPreimage, keysetPreimage) } @@ -211,11 +219,11 @@ func RecoverPayloadFromDasBatch( return nil, err } - if preimages != nil { + if keccakPreimages != nil { if version == 0 { treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) - preimages[dataHash] = payload - preimages[crypto.Keccak256Hash(treeLeaf)] = treeLeaf + keccakPreimages[dataHash] = payload + keccakPreimages[crypto.Keccak256Hash(treeLeaf)] = treeLeaf } else { dastree.RecordHash(recordPreimage, payload) } diff --git a/arbutil/hash.go b/arbutil/hash.go new file mode 100644 index 0000000000..c6e91c8ebf --- /dev/null +++ b/arbutil/hash.go @@ -0,0 +1,26 @@ +package arbutil + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// PaddedKeccak256 pads each argument to 32 bytes, concatenates and returns +// keccak256 hash of the result. +func PaddedKeccak256(args ...[]byte) []byte { + var data []byte + for _, arg := range args { + data = append(data, common.BytesToHash(arg).Bytes()...) + } + return crypto.Keccak256(data) +} + +// SumBytes sums two byte slices and returns the result. +// If the sum of bytes are over 32 bytes, it return last 32. +func SumBytes(a, b []byte) []byte { + A := big.NewInt(0).SetBytes(a) + B := big.NewInt(0).SetBytes(b) + return common.BytesToHash((A.Add(A, B)).Bytes()).Bytes() +} diff --git a/arbutil/hash_test.go b/arbutil/hash_test.go new file mode 100644 index 0000000000..2b93353d08 --- /dev/null +++ b/arbutil/hash_test.go @@ -0,0 +1,83 @@ +package arbutil + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/go-cmp/cmp" +) + +func TestSlotAddress(t *testing.T) { + for _, tc := range []struct { + name string + args [][]byte + want []byte + }{ + { + name: "isBatchPoster[batchPosterAddr]", // Keccak256(addr, 3) + args: [][]byte{ + common.FromHex("0xC1b634853Cb333D3aD8663715b08f41A3Aec47cc"), // mainnet batch poster address + {3}, + }, + want: common.HexToHash("0xa10aa54071443520884ed767b0684edf43acec528b7da83ab38ce60126562660").Bytes(), + }, + { + name: "allowedContracts[msg.sender]", // Keccak256(msg.sender, 1) + args: [][]byte{ + common.FromHex("0x1c479675ad559DC151F6Ec7ed3FbF8ceE79582B6"), // mainnet sequencer address + {1}, + }, + want: common.HexToHash("0xe85fd79f89ff278fc57d40aecb7947873df9f0beac531c8f71a98f630e1eab62").Bytes(), + }, + { + name: "allowedRefundees[refundee]", // Keccak256(msg.sender, 2) + args: [][]byte{ + common.FromHex("0xC1b634853Cb333D3aD8663715b08f41A3Aec47cc"), // mainnet batch poster address + {2}, + }, + want: common.HexToHash("0x7686888b19bb7b75e46bb1aa328b65150743f4899443d722f0adf8e252ccda41").Bytes(), + }, + } { + t.Run(tc.name, func(t *testing.T) { + got := PaddedKeccak256(tc.args...) + if !bytes.Equal(got, tc.want) { + t.Errorf("slotAddress(%x) = %x, want %x", tc.args, got, tc.want) + } + }) + } + +} + +func TestSumBytes(t *testing.T) { + for _, tc := range []struct { + desc string + a, b, want []byte + }{ + { + desc: "simple case", + a: []byte{0x0a, 0x0b}, + b: []byte{0x03, 0x04}, + want: common.HexToHash("0x0d0f").Bytes(), + }, + { + desc: "carry over last byte", + a: []byte{0x0a, 0xff}, + b: []byte{0x01}, + want: common.HexToHash("0x0b00").Bytes(), + }, + { + desc: "overflow", + a: common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), + b: []byte{0x01}, + want: common.HexToHash("0x00").Bytes(), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + got := SumBytes(tc.a, tc.b) + if diff := cmp.Diff(got, tc.want); diff != "" { + t.Errorf("SumBytes(%x, %x) = %x want: %x", tc.a, tc.b, got, tc.want) + } + }) + } +} diff --git a/arbutil/preimage_type.go b/arbutil/preimage_type.go new file mode 100644 index 0000000000..7eaf60c010 --- /dev/null +++ b/arbutil/preimage_type.go @@ -0,0 +1,13 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbutil + +type PreimageType uint8 + +// These values must be kept in sync with `arbitrator/arbutil/src/types.rs`, +// and the if statement in `contracts/src/osp/OneStepProverHostIo.sol` (search for "UNKNOWN_PREIMAGE_TYPE"). +const ( + Keccak256PreimageType PreimageType = iota + Sha2_256PreimageType +) diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index ec6bb5a380..12d494a230 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -12,8 +12,8 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" ) type L1Interface interface { @@ -88,7 +88,7 @@ func DetailTxError(ctx context.Context, client L1Interface, tx *types.Transactio } _, err = SendTxAsCall(ctx, client, tx, from, txRes.BlockNumber, true) if err == nil { - return fmt.Errorf("%w for tx hash %v", core.ErrGasLimitReached, tx.Hash()) + return fmt.Errorf("%w for tx hash %v", vm.ErrOutOfGas, tx.Hash()) } return fmt.Errorf("SendTxAsCall got: %w for tx hash %v", err, tx.Hash()) } diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index f78ef2aa9f..e94daa463c 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -68,13 +68,14 @@ type Config struct { RequireChainId bool `koanf:"require-chain-id" reload:"hot"` RequireFeedVersion bool `koanf:"require-feed-version" reload:"hot"` Timeout time.Duration `koanf:"timeout" reload:"hot"` - URLs []string `koanf:"url"` - Verifier signature.VerifierConfig `koanf:"verify"` + URL []string `koanf:"url"` + SecondaryURL []string `koanf:"secondary-url"` + Verify signature.VerifierConfig `koanf:"verify"` EnableCompression bool `koanf:"enable-compression" reload:"hot"` } func (c *Config) Enable() bool { - return len(c.URLs) > 0 && c.URLs[0] != "" + return len(c.URL) > 0 && c.URL[0] != "" } type ConfigFetcher func() *Config @@ -85,7 +86,8 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".require-chain-id", DefaultConfig.RequireChainId, "require chain id to be present on connect") f.Bool(prefix+".require-feed-version", DefaultConfig.RequireFeedVersion, "require feed version to be present on connect") f.Duration(prefix+".timeout", DefaultConfig.Timeout, "duration to wait before timing out connection to sequencer feed") - f.StringSlice(prefix+".url", DefaultConfig.URLs, "URL of sequencer feed source") + f.StringSlice(prefix+".url", DefaultConfig.URL, "list of primary URLs of sequencer feed source") + f.StringSlice(prefix+".secondary-url", DefaultConfig.SecondaryURL, "list of secondary URLs of sequencer feed source. Would be started in the order they appear in the list when primary feeds fails") signature.FeedVerifierConfigAddOptions(prefix+".verify", f) f.Bool(prefix+".enable-compression", DefaultConfig.EnableCompression, "enable per message deflate compression support") } @@ -95,8 +97,9 @@ var DefaultConfig = Config{ ReconnectMaximumBackoff: time.Second * 64, RequireChainId: false, RequireFeedVersion: false, - Verifier: signature.DefultFeedVerifierConfig, - URLs: []string{""}, + Verify: signature.DefultFeedVerifierConfig, + URL: []string{}, + SecondaryURL: []string{}, Timeout: 20 * time.Second, EnableCompression: true, } @@ -106,8 +109,9 @@ var DefaultTestConfig = Config{ ReconnectMaximumBackoff: 0, RequireChainId: false, RequireFeedVersion: false, - Verifier: signature.DefultFeedVerifierConfig, - URLs: []string{""}, + Verify: signature.DefultFeedVerifierConfig, + URL: []string{""}, + SecondaryURL: []string{}, Timeout: 200 * time.Millisecond, EnableCompression: true, } @@ -153,10 +157,10 @@ func NewBroadcastClient( txStreamer TransactionStreamerInterface, confirmedSequencerNumberListener chan arbutil.MessageIndex, fatalErrChan chan error, - bpVerifier contracts.BatchPosterVerifierInterface, + addrVerifier contracts.AddressVerifierInterface, adjustCount func(int32), ) (*BroadcastClient, error) { - sigVerifier, err := signature.NewVerifier(&config().Verifier, bpVerifier) + sigVerifier, err := signature.NewVerifier(&config().Verify, addrVerifier) if err != nil { return nil, err } @@ -480,7 +484,7 @@ func (bc *BroadcastClient) StopAndWait() { } func (bc *BroadcastClient) isValidSignature(ctx context.Context, message *broadcaster.BroadcastFeedMessage) error { - if bc.config().Verifier.Dangerous.AcceptMissing && bc.sigVerifier == nil { + if bc.config().Verify.Dangerous.AcceptMissing && bc.sigVerifier == nil { // Verifier disabled return nil } diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go index 871d9d8d8a..fa743d4229 100644 --- a/broadcastclient/broadcastclient_test.go +++ b/broadcastclient/broadcastclient_test.go @@ -200,14 +200,14 @@ func (ts *dummyTransactionStreamer) AddBroadcastMessages(feedMessages []*broadca func newTestBroadcastClient(config Config, listenerAddress net.Addr, chainId uint64, currentMessageCount arbutil.MessageIndex, txStreamer TransactionStreamerInterface, confirmedSequenceNumberListener chan arbutil.MessageIndex, feedErrChan chan error, validAddr *common.Address) (*BroadcastClient, error) { port := listenerAddress.(*net.TCPAddr).Port - var bpv contracts.BatchPosterVerifierInterface + var av contracts.AddressVerifierInterface if validAddr != nil { - config.Verifier.AcceptSequencer = true - bpv = contracts.NewMockBatchPosterVerifier(*validAddr) + config.Verify.AcceptSequencer = true + av = contracts.NewMockAddressVerifier(*validAddr) } else { - config.Verifier.AcceptSequencer = false + config.Verify.AcceptSequencer = false } - return NewBroadcastClient(func() *Config { return &config }, fmt.Sprintf("ws://127.0.0.1:%d/", port), chainId, currentMessageCount, txStreamer, confirmedSequenceNumberListener, feedErrChan, bpv, func(_ int32) {}) + return NewBroadcastClient(func() *Config { return &config }, fmt.Sprintf("ws://127.0.0.1:%d/", port), chainId, currentMessageCount, txStreamer, confirmedSequenceNumberListener, feedErrChan, av, func(_ int32) {}) } func startMakeBroadcastClient(ctx context.Context, t *testing.T, clientConfig Config, addr net.Addr, index int, expectedCount int, chainId uint64, wg *sync.WaitGroup, sequencerAddr *common.Address) { diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index baf7cf6394..551dcdb462 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -6,21 +6,53 @@ package broadcastclients import ( "context" "sync/atomic" + "time" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" + "github.com/offchainlabs/nitro/broadcaster" "github.com/offchainlabs/nitro/util/contracts" + "github.com/offchainlabs/nitro/util/stopwaiter" ) +const ROUTER_QUEUE_SIZE = 1024 +const RECENT_FEED_INITIAL_MAP_SIZE = 1024 +const RECENT_FEED_ITEM_TTL = time.Second * 10 +const MAX_FEED_INACTIVE_TIME = time.Second * 5 +const PRIMARY_FEED_UPTIME = time.Minute * 10 + +type Router struct { + stopwaiter.StopWaiter + messageChan chan broadcaster.BroadcastFeedMessage + confirmedSequenceNumberChan chan arbutil.MessageIndex + + forwardTxStreamer broadcastclient.TransactionStreamerInterface + forwardConfirmationChan chan arbutil.MessageIndex +} + +func (r *Router) AddBroadcastMessages(feedMessages []*broadcaster.BroadcastFeedMessage) error { + for _, feedMessage := range feedMessages { + r.messageChan <- *feedMessage + } + return nil +} + type BroadcastClients struct { - clients []*broadcastclient.BroadcastClient + primaryClients []*broadcastclient.BroadcastClient + secondaryClients []*broadcastclient.BroadcastClient + secondaryURL []string + + primaryRouter *Router + secondaryRouter *Router // Use atomic access connected int32 } +var makeClient func(string, *Router) (*broadcastclient.BroadcastClient, error) + func NewBroadcastClients( configFetcher broadcastclient.ConfigFetcher, l2ChainId uint64, @@ -28,37 +60,54 @@ func NewBroadcastClients( txStreamer broadcastclient.TransactionStreamerInterface, confirmedSequenceNumberListener chan arbutil.MessageIndex, fatalErrChan chan error, - bpVerifier contracts.BatchPosterVerifierInterface, + addrVerifier contracts.AddressVerifierInterface, ) (*BroadcastClients, error) { config := configFetcher() - urlCount := len(config.URLs) - if urlCount <= 0 { + if len(config.URL) == 0 && len(config.SecondaryURL) == 0 { return nil, nil } - - clients := BroadcastClients{} - clients.clients = make([]*broadcastclient.BroadcastClient, 0, urlCount) - var lastClientErr error - for _, address := range config.URLs { - client, err := broadcastclient.NewBroadcastClient( + newStandardRouter := func() *Router { + return &Router{ + messageChan: make(chan broadcaster.BroadcastFeedMessage, ROUTER_QUEUE_SIZE), + confirmedSequenceNumberChan: make(chan arbutil.MessageIndex, ROUTER_QUEUE_SIZE), + forwardTxStreamer: txStreamer, + forwardConfirmationChan: confirmedSequenceNumberListener, + } + } + clients := BroadcastClients{ + primaryRouter: newStandardRouter(), + secondaryRouter: newStandardRouter(), + primaryClients: make([]*broadcastclient.BroadcastClient, 0, len(config.URL)), + secondaryClients: make([]*broadcastclient.BroadcastClient, 0, len(config.SecondaryURL)), + secondaryURL: config.SecondaryURL, + } + makeClient = func(url string, router *Router) (*broadcastclient.BroadcastClient, error) { + return broadcastclient.NewBroadcastClient( configFetcher, - address, + url, l2ChainId, currentMessageCount, - txStreamer, - confirmedSequenceNumberListener, + router, + router.confirmedSequenceNumberChan, fatalErrChan, - bpVerifier, + addrVerifier, func(delta int32) { clients.adjustCount(delta) }, ) + } + + var lastClientErr error + for _, address := range config.URL { + client, err := makeClient(address, clients.primaryRouter) if err != nil { lastClientErr = err log.Warn("init broadcast client failed", "address", address) + continue } - clients.clients = append(clients.clients, client) + clients.primaryClients = append(clients.primaryClients, client) } - if len(clients.clients) == 0 { + if len(clients.primaryClients) == 0 { log.Error("no connected feed on startup, last error: %w", lastClientErr) + return nil, nil } return &clients, nil @@ -72,12 +121,128 @@ func (bcs *BroadcastClients) adjustCount(delta int32) { } func (bcs *BroadcastClients) Start(ctx context.Context) { - for _, client := range bcs.clients { + bcs.primaryRouter.StopWaiter.Start(ctx, bcs.primaryRouter) + bcs.secondaryRouter.StopWaiter.Start(ctx, bcs.secondaryRouter) + + for _, client := range bcs.primaryClients { client.Start(ctx) } + + var lastConfirmed arbutil.MessageIndex + recentFeedItemsNew := make(map[arbutil.MessageIndex]time.Time, RECENT_FEED_INITIAL_MAP_SIZE) + recentFeedItemsOld := make(map[arbutil.MessageIndex]time.Time, RECENT_FEED_INITIAL_MAP_SIZE) + bcs.primaryRouter.LaunchThread(func(ctx context.Context) { + recentFeedItemsCleanup := time.NewTicker(RECENT_FEED_ITEM_TTL) + startSecondaryFeedTimer := time.NewTicker(MAX_FEED_INACTIVE_TIME) + stopSecondaryFeedTimer := time.NewTicker(PRIMARY_FEED_UPTIME) + primaryFeedIsDownTimer := time.NewTicker(MAX_FEED_INACTIVE_TIME) + defer recentFeedItemsCleanup.Stop() + defer startSecondaryFeedTimer.Stop() + defer stopSecondaryFeedTimer.Stop() + defer primaryFeedIsDownTimer.Stop() + for { + select { + case <-ctx.Done(): + return + + // Primary feeds + case msg := <-bcs.primaryRouter.messageChan: + startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + primaryFeedIsDownTimer.Reset(MAX_FEED_INACTIVE_TIME) + if _, ok := recentFeedItemsNew[msg.SequenceNumber]; ok { + continue + } + if _, ok := recentFeedItemsOld[msg.SequenceNumber]; ok { + continue + } + recentFeedItemsNew[msg.SequenceNumber] = time.Now() + if err := bcs.primaryRouter.forwardTxStreamer.AddBroadcastMessages([]*broadcaster.BroadcastFeedMessage{&msg}); err != nil { + log.Error("Error routing message from Primary Sequencer Feeds", "err", err) + } + case cs := <-bcs.primaryRouter.confirmedSequenceNumberChan: + startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + primaryFeedIsDownTimer.Reset(MAX_FEED_INACTIVE_TIME) + if cs == lastConfirmed { + continue + } + lastConfirmed = cs + bcs.primaryRouter.forwardConfirmationChan <- cs + + // Secondary Feeds + case msg := <-bcs.secondaryRouter.messageChan: + startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + if _, ok := recentFeedItemsNew[msg.SequenceNumber]; ok { + continue + } + if _, ok := recentFeedItemsOld[msg.SequenceNumber]; ok { + continue + } + recentFeedItemsNew[msg.SequenceNumber] = time.Now() + if err := bcs.secondaryRouter.forwardTxStreamer.AddBroadcastMessages([]*broadcaster.BroadcastFeedMessage{&msg}); err != nil { + log.Error("Error routing message from Secondary Sequencer Feeds", "err", err) + } + case cs := <-bcs.secondaryRouter.confirmedSequenceNumberChan: + startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + if cs == lastConfirmed { + continue + } + lastConfirmed = cs + bcs.secondaryRouter.forwardConfirmationChan <- cs + + // Cycle buckets to get rid of old entries + case <-recentFeedItemsCleanup.C: + recentFeedItemsOld = recentFeedItemsNew + recentFeedItemsNew = make(map[arbutil.MessageIndex]time.Time, RECENT_FEED_INITIAL_MAP_SIZE) + + // failed to get messages from both primary and secondary feeds for ~5 seconds, start a new secondary feed + case <-startSecondaryFeedTimer.C: + bcs.startSecondaryFeed(ctx) + + // failed to get messages from primary feed for ~5 seconds, reset the timer responsible for stopping a secondary + case <-primaryFeedIsDownTimer.C: + stopSecondaryFeedTimer.Reset(PRIMARY_FEED_UPTIME) + + // primary feeds have been up and running for PRIMARY_FEED_UPTIME=10 mins without a failure, stop the recently started secondary feed + case <-stopSecondaryFeedTimer.C: + bcs.stopSecondaryFeed() + } + } + }) } + +func (bcs *BroadcastClients) startSecondaryFeed(ctx context.Context) { + pos := len(bcs.secondaryClients) + if pos < len(bcs.secondaryURL) { + url := bcs.secondaryURL[pos] + client, err := makeClient(url, bcs.secondaryRouter) + if err != nil { + log.Warn("init broadcast secondary client failed", "address", url) + bcs.secondaryURL = append(bcs.secondaryURL[:pos], bcs.secondaryURL[pos+1:]...) + return + } + bcs.secondaryClients = append(bcs.secondaryClients, client) + client.Start(ctx) + log.Info("secondary feed started", "url", url) + } else if len(bcs.secondaryURL) > 0 { + log.Warn("failed to start a new secondary feed all available secondary feeds were started") + } +} + +func (bcs *BroadcastClients) stopSecondaryFeed() { + pos := len(bcs.secondaryClients) + if pos > 0 { + pos -= 1 + bcs.secondaryClients[pos].StopAndWait() + bcs.secondaryClients = bcs.secondaryClients[:pos] + log.Info("disconnected secondary feed", "url", bcs.secondaryURL[pos]) + } +} + func (bcs *BroadcastClients) StopAndWait() { - for _, client := range bcs.clients { + for _, client := range bcs.primaryClients { + client.StopAndWait() + } + for _, client := range bcs.secondaryClients { client.StopAndWait() } } diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index bde80c93d1..c3f4c62ce0 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -61,7 +61,7 @@ type ConfirmedSequenceNumberMessage struct { } func NewBroadcaster(config wsbroadcastserver.BroadcasterConfigFetcher, chainId uint64, feedErrChan chan error, dataSigner signature.DataSignerFunc) *Broadcaster { - catchupBuffer := NewSequenceNumberCatchupBuffer(func() bool { return config().LimitCatchup }) + catchupBuffer := NewSequenceNumberCatchupBuffer(func() bool { return config().LimitCatchup }, func() int { return config().MaxCatchup }) return &Broadcaster{ server: wsbroadcastserver.NewWSBroadcastServer(config, catchupBuffer, chainId, feedErrChan), catchupBuffer: catchupBuffer, diff --git a/broadcaster/sequencenumbercatchupbuffer.go b/broadcaster/sequencenumbercatchupbuffer.go index 7664f1b8da..bdd3e60c5b 100644 --- a/broadcaster/sequencenumbercatchupbuffer.go +++ b/broadcaster/sequencenumbercatchupbuffer.go @@ -29,11 +29,13 @@ type SequenceNumberCatchupBuffer struct { messages []*BroadcastFeedMessage messageCount int32 limitCatchup func() bool + maxCatchup func() int } -func NewSequenceNumberCatchupBuffer(limitCatchup func() bool) *SequenceNumberCatchupBuffer { +func NewSequenceNumberCatchupBuffer(limitCatchup func() bool, maxCatchup func() int) *SequenceNumberCatchupBuffer { return &SequenceNumberCatchupBuffer{ limitCatchup: limitCatchup, + maxCatchup: maxCatchup, } } @@ -98,6 +100,15 @@ func (b *SequenceNumberCatchupBuffer) OnRegisterClient(clientConnection *wsbroad return nil, bmCount, time.Since(start) } +// Takes as input an index into the messages array, not a message index +func (b *SequenceNumberCatchupBuffer) pruneBufferToIndex(idx int) { + b.messages = b.messages[idx:] + if len(b.messages) > 10 && cap(b.messages) > len(b.messages)*10 { + // Too much spare capacity, copy to fresh slice to reset memory usage + b.messages = append([]*BroadcastFeedMessage(nil), b.messages[:len(b.messages)]...) + } +} + func (b *SequenceNumberCatchupBuffer) deleteConfirmed(confirmedSequenceNumber arbutil.MessageIndex) { if len(b.messages) == 0 { return @@ -126,11 +137,7 @@ func (b *SequenceNumberCatchupBuffer) deleteConfirmed(confirmedSequenceNumber ar return } - b.messages = b.messages[confirmedIndex+1:] - if len(b.messages) > 10 && cap(b.messages) > len(b.messages)*10 { - // Too much spare capacity, copy to fresh slice to reset memory usage - b.messages = append([]*BroadcastFeedMessage(nil), b.messages[:len(b.messages)]...) - } + b.pruneBufferToIndex(int(confirmedIndex) + 1) } func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { @@ -147,6 +154,12 @@ func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { confirmedSequenceNumberGauge.Update(int64(confirmMsg.SequenceNumber)) } + maxCatchup := b.maxCatchup() + if maxCatchup == 0 { + b.messages = nil + return nil + } + for _, newMsg := range broadcastMessage.Messages { if len(b.messages) == 0 { // Add to empty list @@ -167,6 +180,10 @@ func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { } } + if maxCatchup >= 0 && len(b.messages) > maxCatchup { + b.pruneBufferToIndex(len(b.messages) - maxCatchup) + } + return nil } diff --git a/broadcaster/sequencenumbercatchupbuffer_test.go b/broadcaster/sequencenumbercatchupbuffer_test.go index 40fae9875f..fc6655057e 100644 --- a/broadcaster/sequencenumbercatchupbuffer_test.go +++ b/broadcaster/sequencenumbercatchupbuffer_test.go @@ -22,6 +22,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/util/arbmath" ) func TestGetEmptyCacheMessages(t *testing.T) { @@ -29,6 +30,7 @@ func TestGetEmptyCacheMessages(t *testing.T) { messages: nil, messageCount: 0, limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Get everything @@ -60,6 +62,7 @@ func TestGetCacheMessages(t *testing.T) { messages: createDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Get everything @@ -110,6 +113,7 @@ func TestDeleteConfirmedNil(t *testing.T) { messages: nil, messageCount: 0, limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } buffer.deleteConfirmed(0) @@ -124,6 +128,7 @@ func TestDeleteConfirmInvalidOrder(t *testing.T) { messages: createDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Confirm before cache @@ -139,6 +144,7 @@ func TestDeleteConfirmed(t *testing.T) { messages: createDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Confirm older than cache @@ -154,6 +160,7 @@ func TestDeleteFreeMem(t *testing.T) { messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Confirm older than cache @@ -169,6 +176,7 @@ func TestBroadcastBadMessage(t *testing.T) { messages: nil, messageCount: 0, limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } var foo int @@ -187,6 +195,7 @@ func TestBroadcastPastSeqNum(t *testing.T) { messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } bm := BroadcastMessage{ @@ -208,6 +217,8 @@ func TestBroadcastFutureSeqNum(t *testing.T) { buffer := SequenceNumberCatchupBuffer{ messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), + limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } bm := BroadcastMessage{ @@ -223,3 +234,38 @@ func TestBroadcastFutureSeqNum(t *testing.T) { } } + +func TestMaxCatchupBufferSize(t *testing.T) { + limit := 5 + buffer := SequenceNumberCatchupBuffer{ + messages: nil, + messageCount: 0, + limitCatchup: func() bool { return false }, + maxCatchup: func() int { return limit }, + } + + firstMessage := 10 + for i := firstMessage; i <= 20; i += 2 { + bm := BroadcastMessage{ + Messages: []*BroadcastFeedMessage{ + { + SequenceNumber: arbutil.MessageIndex(i), + }, + { + SequenceNumber: arbutil.MessageIndex(i + 1), + }, + }, + } + err := buffer.OnDoBroadcast(bm) + Require(t, err) + haveMessages := buffer.getCacheMessages(0) + expectedCount := arbmath.MinInt(i+len(bm.Messages)-firstMessage, limit) + if len(haveMessages.Messages) != expectedCount { + t.Errorf("after broadcasting messages %v and %v, expected to have %v messages but got %v", i, i+1, expectedCount, len(haveMessages.Messages)) + } + expectedFirstMessage := arbutil.MessageIndex(arbmath.MaxInt(firstMessage, i+len(bm.Messages)-limit)) + if haveMessages.Messages[0].SequenceNumber != expectedFirstMessage { + t.Errorf("after broadcasting messages %v and %v, expected the first message to be %v but got %v", i, i+1, expectedFirstMessage, haveMessages.Messages[0].SequenceNumber) + } + } +} diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json index 01b60e9c05..051ccd03c5 100644 --- a/cmd/chaininfo/arbitrum_chain_info.json +++ b/cmd/chaininfo/arbitrum_chain_info.json @@ -1,10 +1,10 @@ [ { - "chain-id": 42161, "chain-name": "arb1", "parent-chain-id": 1, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://arb1-sequencer.arbitrum.io/rpc", - "feed-url": "wss://arb1.arbitrum.io/feed", + "feed-url": "wss://arb1-feed.arbitrum.io/feed", "has-genesis-state": true, "chain-config": { @@ -50,11 +50,11 @@ } }, { - "chain-id": 42170, "chain-name": "nova", "parent-chain-id": 1, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://nova.arbitrum.io/rpc", - "feed-url": "wss://nova.arbitrum.io/feed", + "feed-url": "wss://nova-feed.arbitrum.io/feed", "das-index-url": "https://nova.arbitrum.io/das-servers", "chain-config": { @@ -100,9 +100,9 @@ } }, { - "chain-id": 421613, "chain-name": "goerli-rollup", "parent-chain-id": 5, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://goerli-rollup.arbitrum.io/rpc", "feed-url": "wss://goerli-rollup.arbitrum.io/feed", "chain-config": @@ -149,7 +149,6 @@ } }, { - "chain-id": 412346, "chain-name": "arb-dev-test", "chain-config": { @@ -185,7 +184,6 @@ } }, { - "chain-id": 412347, "chain-name": "anytrust-dev-test", "chain-config": { @@ -219,5 +217,55 @@ "GenesisBlockNum": 0 } } + }, + { + "chain-id": 421614, + "parent-chain-id": 11155111, + "parent-chain-is-arbitrum": false, + "chain-name": "sepolia-rollup", + "sequencer-url": "https://sepolia-rollup-sequencer.arbitrum.io/rpc", + "feed-url": "wss://sepolia-rollup.arbitrum.io/feed", + "chain-config": + { + "chainId": 421614, + "homesteadBlock": 0, + "daoForkBlock": null, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "clique": + { + "period": 0, + "epoch": 0 + }, + "arbitrum": + { + "EnableArbOS": true, + "AllowDebugPrecompiles": false, + "DataAvailabilityCommittee": false, + "InitialArbOSVersion": 10, + "InitialChainOwner": "0x71B61c2E250AFa05dFc36304D6c91501bE0965D8", + "GenesisBlockNum": 0 + } + }, + "rollup": + { + "bridge": "0x38f918D0E9F1b721EDaA41302E399fa1B79333a9", + "inbox": "0xaAe29B0366299461418F5324a79Afc425BE5ae21", + "sequencer-inbox": "0x6c97864CE4bEf387dE0b3310A44230f7E3F1be0D", + "rollup": "0xd80810638dbDF9081b72C1B33c65375e807281C8", + "validator-utils": "0x1f6860C3cac255fFFa72B7410b1183c3a0D261e0", + "validator-wallet-creator": "0x894fC71fA0A666352824EC954B401573C861D664", + "deployed-at": 4139226 + } } -] \ No newline at end of file +] diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index 46e7ada966..cc13321513 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -18,9 +18,9 @@ import ( var DefaultChainInfo []byte type ChainInfo struct { - ChainId uint64 `json:"chain-id"` - ChainName string `json:"chain-name"` - ParentChainId uint64 `json:"parent-chain-id"` + ChainName string `json:"chain-name"` + ParentChainId uint64 `json:"parent-chain-id"` + ParentChainIsArbitrum *bool `json:"parent-chain-is-arbitrum"` // This is the forwarding target to submit transactions to, called the sequencer URL for clarity SequencerUrl string `json:"sequencer-url"` FeedUrl string `json:"feed-url"` @@ -94,7 +94,7 @@ func findChainInfo(chainId uint64, chainName string, chainsInfoBytes []byte) (*C return nil, err } for _, chainInfo := range chainsInfo { - if (chainId == 0 || chainInfo.ChainId == chainId) && (chainName == "" || chainInfo.ChainName == chainName) { + if (chainId == 0 || chainInfo.ChainConfig.ChainID.Uint64() == chainId) && (chainName == "" || chainInfo.ChainName == chainName) { return &chainInfo, nil } } @@ -106,6 +106,8 @@ type RollupAddresses struct { Inbox common.Address `json:"inbox"` SequencerInbox common.Address `json:"sequencer-inbox"` Rollup common.Address `json:"rollup"` + NativeToken common.Address `json:"native-token"` + UpgradeExecutor common.Address `json:"upgrade-executor"` ValidatorUtils common.Address `json:"validator-utils"` ValidatorWalletCreator common.Address `json:"validator-wallet-creator"` DeployedAt uint64 `json:"deployed-at"` diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index 54b6176f96..505957f45e 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -12,7 +12,7 @@ import ( ) type L1Config struct { - ChainID uint64 `koanf:"id"` + ID uint64 `koanf:"id"` Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` Wallet genericconf.WalletConfig `koanf:"wallet"` } @@ -25,21 +25,21 @@ var L1ConnectionConfigDefault = rpcclient.ClientConfig{ } var L1ConfigDefault = L1Config{ - ChainID: 0, + ID: 0, Connection: L1ConnectionConfigDefault, Wallet: DefaultL1WalletConfig, } var DefaultL1WalletConfig = genericconf.WalletConfig{ Pathname: "wallet", - PasswordImpl: genericconf.WalletConfigDefault.PasswordImpl, + Password: genericconf.WalletConfigDefault.Password, PrivateKey: genericconf.WalletConfigDefault.PrivateKey, Account: genericconf.WalletConfigDefault.Account, OnlyCreateKey: genericconf.WalletConfigDefault.OnlyCreateKey, } func L1ConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".id", L1ConfigDefault.ChainID, "if set other than 0, will be used to validate database and L1 connection") + f.Uint64(prefix+".id", L1ConfigDefault.ID, "if set other than 0, will be used to validate database and L1 connection") rpcclient.RPCClientAddOptions(prefix+".connection", f, &L1ConfigDefault.Connection) genericconf.WalletConfigAddOptions(prefix+".wallet", f, L1ConfigDefault.Wallet.Pathname) } @@ -53,35 +53,35 @@ func (c *L1Config) Validate() error { } type L2Config struct { - ChainID uint64 `koanf:"id"` - ChainName string `koanf:"name"` - ChainInfoFiles []string `koanf:"info-files"` - ChainInfoJson string `koanf:"info-json"` - DevWallet genericconf.WalletConfig `koanf:"dev-wallet"` - ChainInfoIpfsUrl string `koanf:"info-ipfs-url"` - ChainInfoIpfsDownloadPath string `koanf:"info-ipfs-download-path"` + ID uint64 `koanf:"id"` + Name string `koanf:"name"` + InfoFiles []string `koanf:"info-files"` + InfoJson string `koanf:"info-json"` + DevWallet genericconf.WalletConfig `koanf:"dev-wallet"` + InfoIpfsUrl string `koanf:"info-ipfs-url"` + InfoIpfsDownloadPath string `koanf:"info-ipfs-download-path"` } var L2ConfigDefault = L2Config{ - ChainID: 0, - ChainName: "", - ChainInfoFiles: []string{}, // Default file used is chaininfo/arbitrum_chain_info.json, stored in DefaultChainInfo in chain_info.go - ChainInfoJson: "", - DevWallet: genericconf.WalletConfigDefault, - ChainInfoIpfsUrl: "", - ChainInfoIpfsDownloadPath: "/tmp/", + ID: 0, + Name: "", + InfoFiles: []string{}, // Default file used is chaininfo/arbitrum_chain_info.json, stored in DefaultChainInfo in chain_info.go + InfoJson: "", + DevWallet: genericconf.WalletConfigDefault, + InfoIpfsUrl: "", + InfoIpfsDownloadPath: "/tmp/", } func L2ConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".id", L2ConfigDefault.ChainID, "L2 chain ID (determines Arbitrum network)") - f.String(prefix+".name", L2ConfigDefault.ChainName, "L2 chain name (determines Arbitrum network)") - f.StringSlice(prefix+".info-files", L2ConfigDefault.ChainInfoFiles, "L2 chain info json files") - f.String(prefix+".info-json", L2ConfigDefault.ChainInfoJson, "L2 chain info in json string format") + f.Uint64(prefix+".id", L2ConfigDefault.ID, "L2 chain ID (determines Arbitrum network)") + f.String(prefix+".name", L2ConfigDefault.Name, "L2 chain name (determines Arbitrum network)") + f.StringSlice(prefix+".info-files", L2ConfigDefault.InfoFiles, "L2 chain info json files") + f.String(prefix+".info-json", L2ConfigDefault.InfoJson, "L2 chain info in json string format") // Dev wallet does not exist unless specified genericconf.WalletConfigAddOptions(prefix+".dev-wallet", f, "") - f.String(prefix+".info-ipfs-url", L2ConfigDefault.ChainInfoIpfsUrl, "url to download chain info file") - f.String(prefix+".info-ipfs-download-path", L2ConfigDefault.ChainInfoIpfsDownloadPath, "path to save temp downloaded file") + f.String(prefix+".info-ipfs-url", L2ConfigDefault.InfoIpfsUrl, "url to download chain info file") + f.String(prefix+".info-ipfs-download-path", L2ConfigDefault.InfoIpfsDownloadPath, "path to save temp downloaded file") } diff --git a/cmd/conf/database.go b/cmd/conf/database.go index a06a9facea..b049375d66 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -15,22 +15,28 @@ import ( type PersistentConfig struct { GlobalConfig string `koanf:"global-config"` Chain string `koanf:"chain"` + LogDir string `koanf:"log-dir"` Handles int `koanf:"handles"` Ancient string `koanf:"ancient"` + DBEngine string `koanf:"db-engine"` } var PersistentConfigDefault = PersistentConfig{ GlobalConfig: ".arbitrum", Chain: "", + LogDir: "", Handles: 512, Ancient: "", + DBEngine: "leveldb", } func PersistentConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".global-config", PersistentConfigDefault.GlobalConfig, "directory to store global config") f.String(prefix+".chain", PersistentConfigDefault.Chain, "directory to store chain state") + f.String(prefix+".log-dir", PersistentConfigDefault.LogDir, "directory to store log file") f.Int(prefix+".handles", PersistentConfigDefault.Handles, "number of file descriptor handles to use for the database") f.String(prefix+".ancient", PersistentConfigDefault.Ancient, "directory of ancient where the chain freezer can be opened") + f.String(prefix+".db-engine", PersistentConfigDefault.DBEngine, "backing database implementation to use ('leveldb' or 'pebble')") } func (c *PersistentConfig) ResolveDirectoryNames() error { @@ -60,6 +66,19 @@ func (c *PersistentConfig) ResolveDirectoryNames() error { return fmt.Errorf("database in --persistent.chain (%s) directory, try specifying parent directory", c.Chain) } + // Make Log directory relative to persistent storage directory if not already absolute + if !filepath.IsAbs(c.LogDir) { + c.LogDir = path.Join(c.Chain, c.LogDir) + } + if c.LogDir != c.Chain { + err = os.MkdirAll(c.LogDir, os.ModePerm) + if err != nil { + return fmt.Errorf("unable to create Log directory: %w", err) + } + if DatabaseInDirectory(c.LogDir) { + return fmt.Errorf("database in --persistent.log-dir (%s) directory, try specifying parent directory", c.LogDir) + } + } return nil } @@ -69,3 +88,11 @@ func DatabaseInDirectory(path string) bool { return err == nil } + +func (c *PersistentConfig) Validate() error { + // we are validating .db-engine here to avoid unintended behaviour as empty string value also has meaning in geth's node.Config.DBEngine + if c.DBEngine != "leveldb" && c.DBEngine != "pebble" { + return fmt.Errorf(`invalid .db-engine choice: %q, allowed "leveldb" or "pebble"`, c.DBEngine) + } + return nil +} diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 7b6b504e40..07481651b2 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -17,6 +17,7 @@ import ( flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics/exp" @@ -24,6 +25,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" ) @@ -38,13 +40,16 @@ type DAServerConfig struct { RESTPort uint64 `koanf:"rest-port"` RESTServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"rest-server-timeouts"` - DAConf das.DataAvailabilityConfig `koanf:"data-availability"` + DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` - LogLevel int `koanf:"log-level"` + Conf genericconf.ConfConfig `koanf:"conf"` + LogLevel int `koanf:"log-level"` + LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` + PProf bool `koanf:"pprof"` + PprofCfg genericconf.PProf `koanf:"pprof-cfg"` } var DefaultDAServerConfig = DAServerConfig{ @@ -56,11 +61,14 @@ var DefaultDAServerConfig = DAServerConfig{ RESTAddr: "localhost", RESTPort: 9877, RESTServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, - DAConf: das.DefaultDataAvailabilityConfig, - ConfConfig: genericconf.ConfConfigDefault, + DataAvailability: das.DefaultDataAvailabilityConfig, + Conf: genericconf.ConfConfigDefault, + LogLevel: int(log.LvlInfo), + LogType: "plaintext", Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, - LogLevel: 3, + PProf: false, + PprofCfg: genericconf.PProfDefault, } func main() { @@ -89,7 +97,12 @@ func parseDAServer(args []string) (*DAServerConfig, error) { f.Bool("metrics", DefaultDAServerConfig.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) + f.Bool("pprof", DefaultDAServerConfig.PProf, "enable pprof") + genericconf.PProfAddOptions("pprof-cfg", f) + f.Int("log-level", int(log.LvlInfo), "log level; 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE") + f.String("log-type", DefaultDAServerConfig.LogType, "log type (plaintext or json)") + das.DataAvailabilityConfigAddDaserverOptions("data-availability", f) genericconf.ConfConfigAddOptions("conf", f) @@ -102,7 +115,7 @@ func parseDAServer(args []string) (*DAServerConfig, error) { if err := confighelpers.EndCommonParse(k, &serverConfig); err != nil { return nil, err } - if serverConfig.ConfConfig.Dump { + if serverConfig.Conf.Dump { err = confighelpers.DumpConfig(k, map[string]interface{}{ "data-availability.key.priv-key": "", }) @@ -135,6 +148,28 @@ func (c *L1ReaderCloser) String() string { return "l1 reader closer" } +// Checks metrics and PProf flag, runs them if enabled. +// Note: they are separate so one can enable/disable them as they wish, the only +// requirement is that they can't run on the same address and port. +func startMetrics(cfg *DAServerConfig) error { + mAddr := fmt.Sprintf("%v:%v", cfg.MetricsServer.Addr, cfg.MetricsServer.Port) + pAddr := fmt.Sprintf("%v:%v", cfg.PprofCfg.Addr, cfg.PprofCfg.Port) + if cfg.Metrics && !metrics.Enabled { + return fmt.Errorf("metrics must be enabled via command line by adding --metrics, json config has no effect") + } + if cfg.Metrics && cfg.PProf && mAddr == pAddr { + return fmt.Errorf("metrics and pprof cannot be enabled on the same address:port: %s", mAddr) + } + if cfg.Metrics { + go metrics.CollectProcessMetrics(cfg.MetricsServer.UpdateInterval) + exp.Setup(fmt.Sprintf("%v:%v", cfg.MetricsServer.Addr, cfg.MetricsServer.Port)) + } + if cfg.PProf { + genericconf.StartPprof(pAddr) + } + return nil +} + func startup() error { // Some different defaults to DAS config in a node. das.DefaultDataAvailabilityConfig.Enable = true @@ -147,20 +182,17 @@ func startup() error { confighelpers.PrintErrorAndExit(errors.New("please specify at least one of --enable-rest or --enable-rpc"), printSampleUsage) } - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + logFormat, err := genericconf.ParseLogType(serverConfig.LogType) + if err != nil { + flag.Usage() + panic(fmt.Sprintf("Error parsing log type: %v", err)) + } + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, logFormat)) glogger.Verbosity(log.Lvl(serverConfig.LogLevel)) log.Root().SetHandler(glogger) - if serverConfig.Metrics { - if len(serverConfig.MetricsServer.Addr) == 0 { - fmt.Printf("Metrics is enabled, but missing --metrics-server.addr") - return nil - } - - go metrics.CollectProcessMetrics(serverConfig.MetricsServer.UpdateInterval) - - address := fmt.Sprintf("%v:%v", serverConfig.MetricsServer.Addr, serverConfig.MetricsServer.Port) - exp.Setup(address) + if err := startMetrics(serverConfig); err != nil { + return err } sigint := make(chan os.Signal, 1) @@ -170,22 +202,23 @@ func startup() error { defer cancel() var l1Reader *headerreader.HeaderReader - if serverConfig.DAConf.L1NodeURL != "" && serverConfig.DAConf.L1NodeURL != "none" { - l1Client, err := das.GetL1Client(ctx, serverConfig.DAConf.L1ConnectionAttempts, serverConfig.DAConf.L1NodeURL) + if serverConfig.DataAvailability.ParentChainNodeURL != "" && serverConfig.DataAvailability.ParentChainNodeURL != "none" { + l1Client, err := das.GetL1Client(ctx, serverConfig.DataAvailability.ParentChainConnectionAttempts, serverConfig.DataAvailability.ParentChainNodeURL) if err != nil { return err } - l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }) // TODO: config + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys) // TODO: config if err != nil { return err } } var seqInboxAddress *common.Address - if serverConfig.DAConf.SequencerInboxAddress == "none" { + if serverConfig.DataAvailability.SequencerInboxAddress == "none" { seqInboxAddress = nil - } else if len(serverConfig.DAConf.SequencerInboxAddress) > 0 { - seqInboxAddress, err = das.OptionalAddressFromString(serverConfig.DAConf.SequencerInboxAddress) + } else if len(serverConfig.DataAvailability.SequencerInboxAddress) > 0 { + seqInboxAddress, err = das.OptionalAddressFromString(serverConfig.DataAvailability.SequencerInboxAddress) if err != nil { return err } @@ -196,7 +229,7 @@ func startup() error { return errors.New("sequencer-inbox-address must be set to a valid L1 URL and contract address, or 'none'") } - daReader, daWriter, daHealthChecker, dasLifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig.DAConf, l1Reader, seqInboxAddress) + daReader, daWriter, daHealthChecker, dasLifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig.DataAvailability, l1Reader, seqInboxAddress) if err != nil { return err } @@ -206,7 +239,7 @@ func startup() error { dasLifecycleManager.Register(&L1ReaderCloser{l1Reader}) } - vcsRevision, vcsTime := confighelpers.GetVersion() + vcsRevision, _, vcsTime := confighelpers.GetVersion() var rpcServer *http.Server if serverConfig.EnableRPC { log.Info("Starting HTTP-RPC server", "addr", serverConfig.RPCAddr, "port", serverConfig.RPCPort, "revision", vcsRevision, "vcs.time", vcsTime) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 522e021ee1..d20a5b52cd 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -16,6 +16,7 @@ import ( "strings" "time" + koanfjson "github.com/knadh/koanf/parsers/json" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" @@ -34,7 +35,7 @@ import ( func main() { args := os.Args if len(args) < 2 { - panic("Usage: datool [client|keygen|generatehash] ...") + panic("Usage: datool [client|keygen|generatehash|dumpkeyset] ...") } var err error @@ -45,6 +46,8 @@ func main() { err = startKeyGen(args[2:]) case "generatehash": err = generateHash(args[2]) + case "dumpkeyset": + err = dumpKeyset(args[2:]) default: panic(fmt.Sprintf("Unknown tool '%s' specified, valid tools are 'client', 'keygen', 'generatehash'", args[1])) } @@ -81,14 +84,13 @@ func startClient(args []string) error { // datool client rpc store type ClientStoreConfig struct { - URL string `koanf:"url"` - Message string `koanf:"message"` - RandomMessageSize int `koanf:"random-message-size"` - DASRetentionPeriod time.Duration `koanf:"das-retention-period"` - SigningKey string `koanf:"signing-key"` - SigningWallet string `koanf:"signing-wallet"` - SigningWalletPassword string `koanf:"signing-wallet-password"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` + URL string `koanf:"url"` + Message string `koanf:"message"` + RandomMessageSize int `koanf:"random-message-size"` + DASRetentionPeriod time.Duration `koanf:"das-retention-period"` + SigningKey string `koanf:"signing-key"` + SigningWallet string `koanf:"signing-wallet"` + SigningWalletPassword string `koanf:"signing-wallet-password"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -148,7 +150,7 @@ func startClientStore(args []string) error { } else if config.SigningWallet != "" { walletConf := &genericconf.WalletConfig{ Pathname: config.SigningWallet, - PasswordImpl: config.SigningWalletPassword, + Password: config.SigningWalletPassword, PrivateKey: "", Account: "", OnlyCreateKey: false, @@ -193,9 +195,8 @@ func startClientStore(args []string) error { // datool client rest getbyhash type RESTClientGetByHashConfig struct { - URL string `koanf:"url"` - DataHash string `koanf:"data-hash"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` + URL string `koanf:"url"` + DataHash string `koanf:"data-hash"` } func parseRESTClientGetByHashConfig(args []string) (*RESTClientGetByHashConfig, error) { @@ -254,10 +255,11 @@ func startRESTClientGetByHash(args []string) error { // das keygen type KeyGenConfig struct { - Dir string - ConfConfig genericconf.ConfConfig `koanf:"conf"` - ECDSAMode bool `koanf:"ecdsa"` - WalletMode bool `koanf:"wallet"` + Dir string + // ECDSA mode. + ECDSA bool `koanf:"ecdsa"` + // Wallet mode. + Wallet bool `koanf:"wallet"` } func parseKeyGenConfig(args []string) (*KeyGenConfig, error) { @@ -285,18 +287,18 @@ func startKeyGen(args []string) error { return err } - if !config.ECDSAMode { + if !config.ECDSA { _, _, err = das.GenerateAndStoreKeys(config.Dir) if err != nil { return err } return nil - } else if !config.WalletMode { + } else if !config.Wallet { return das.GenerateAndStoreECDSAKeys(config.Dir) } else { walletConf := &genericconf.WalletConfig{ Pathname: config.Dir, - PasswordImpl: genericconf.PASSWORD_NOT_SET, // This causes a prompt for the password + Password: genericconf.PASSWORD_NOT_SET, // This causes a prompt for the password PrivateKey: "", Account: "", OnlyCreateKey: true, @@ -313,3 +315,68 @@ func generateHash(message string) error { fmt.Printf("Hex Encoded Data Hash: %s\n", hexutil.Encode(dastree.HashBytes([]byte(message)))) return nil } + +func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { + f := flag.NewFlagSet("dump keyset", flag.ContinueOnError) + + das.AggregatorConfigAddOptions("keyset", f) + genericconf.ConfConfigAddOptions("conf", f) + + k, err := confighelpers.BeginCommonParse(f, args) + if err != nil { + return nil, err + } + + var config DumpKeysetConfig + if err := confighelpers.EndCommonParse(k, &config); err != nil { + return nil, err + } + + if config.Conf.Dump { + c, err := k.Marshal(koanfjson.Parser()) + if err != nil { + return nil, fmt.Errorf("unable to marshal config file to JSON: %w", err) + } + + fmt.Println(string(c)) + os.Exit(0) + } + + if config.Keyset.AssumedHonest == 0 { + return nil, errors.New("--keyset.assumed-honest must be set") + } + if config.Keyset.Backends == "" { + return nil, errors.New("--keyset.backends must be set") + } + + return &config, nil +} + +// das keygen + +type DumpKeysetConfig struct { + Keyset das.AggregatorConfig `koanf:"keyset"` + Conf genericconf.ConfConfig `koanf:"conf"` +} + +func dumpKeyset(args []string) error { + config, err := parseDumpKeyset(args) + if err != nil { + return err + } + + services, err := das.ParseServices(config.Keyset) + if err != nil { + return err + } + + keysetHash, keysetBytes, err := das.KeysetHashFromServices(services, uint64(config.Keyset.AssumedHonest)) + if err != nil { + return err + } + + fmt.Printf("Keyset: %s\n", hexutil.Encode(keysetBytes)) + fmt.Printf("KeysetHash: %s\n", hexutil.Encode(keysetHash[:])) + + return err +} diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 91775ced25..0b72038908 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -14,10 +14,12 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/validator/server_common" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -38,10 +40,13 @@ func main() { deployAccount := flag.String("l1DeployAccount", "", "l1 seq account to use (default is first account in keystore)") ownerAddressString := flag.String("ownerAddress", "", "the rollup owner's address") sequencerAddressString := flag.String("sequencerAddress", "", "the sequencer's address") + nativeTokenAddressString := flag.String("nativeTokenAddress", "0x0000000000000000000000000000000000000000", "address of the ERC20 token which is used as native L2 currency") + maxDataSizeUint := flag.Uint64("maxDataSize", 117964, "maximum data size of a batch or a cross-chain message (default = 90% of Geth's 128KB tx size limit)") loserEscrowAddressString := flag.String("loserEscrowAddress", "", "the address which half of challenge loser's funds accumulate at") wasmmoduleroot := flag.String("wasmmoduleroot", "", "WASM module root hash") wasmrootpath := flag.String("wasmrootpath", "", "path to machine folders") l1passphrase := flag.String("l1passphrase", "passphrase", "l1 private key file passphrase") + l1privatekey := flag.String("l1privatekey", "", "l1 private key") outfile := flag.String("l1deployment", "deploy.json", "deployment output json file") l1ChainIdUint := flag.Uint64("l1chainid", 1337, "L1 chain ID") l2ChainConfig := flag.String("l2chainconfig", "l2_chain_config.json", "L2 chain config json file") @@ -52,6 +57,7 @@ func main() { prod := flag.Bool("prod", false, "Whether to configure the rollup for production or testing") flag.Parse() l1ChainId := new(big.Int).SetUint64(*l1ChainIdUint) + maxDataSize := new(big.Int).SetUint64(*maxDataSizeUint) if *prod { if *wasmmoduleroot == "" { @@ -63,9 +69,10 @@ func main() { } wallet := genericconf.WalletConfig{ - Pathname: *l1keystore, - Account: *deployAccount, - PasswordImpl: *l1passphrase, + Pathname: *l1keystore, + Account: *deployAccount, + Password: *l1passphrase, + PrivateKey: *l1privatekey, } l1TransactionOpts, _, err := util.OpenWallet("l1", &wallet, l1ChainId) if err != nil { @@ -125,14 +132,24 @@ func main() { panic(fmt.Errorf("failed to deserialize chain config: %w", err)) } + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerReaderConfig }, arbSys) + if err != nil { + panic(fmt.Errorf("failed to create header reader: %w", err)) + } + l1Reader.Start(ctx) + defer l1Reader.StopAndWait() + + nativeToken := common.HexToAddress(*nativeTokenAddressString) deployedAddresses, err := arbnode.DeployOnL1( ctx, - l1client, + l1Reader, l1TransactionOpts, sequencerAddress, *authorizevalidators, - func() *headerreader.Config { return &headerReaderConfig }, arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress), + nativeToken, + maxDataSize, ) if err != nil { flag.Usage() @@ -146,13 +163,14 @@ func main() { if err := os.WriteFile(*outfile, deployData, 0600); err != nil { panic(err) } + parentChainIsArbitrum := l1Reader.IsParentChainArbitrum() chainsInfo := []chaininfo.ChainInfo{ { - ChainId: chainConfig.ChainID.Uint64(), - ChainName: *l2ChainName, - ParentChainId: l1ChainId.Uint64(), - ChainConfig: &chainConfig, - RollupAddresses: deployedAddresses, + ChainName: *l2ChainName, + ParentChainId: l1ChainId.Uint64(), + ParentChainIsArbitrum: &parentChainIsArbitrum, + ChainConfig: &chainConfig, + RollupAddresses: deployedAddresses, }, } chainsInfoJson, err := json.Marshal(chainsInfo) diff --git a/cmd/genericconf/config.go b/cmd/genericconf/config.go index 8e75b61772..c3282fe1af 100644 --- a/cmd/genericconf/config.go +++ b/cmd/genericconf/config.go @@ -33,7 +33,7 @@ func ConfConfigAddOptions(prefix string, f *flag.FlagSet) { var ConfConfigDefault = ConfConfig{ Dump: false, EnvPrefix: "", - File: nil, + File: []string{}, S3: DefaultS3Config, String: "", ReloadInterval: 0, diff --git a/cmd/genericconf/getversion18.go b/cmd/genericconf/getversion18.go index 2c183f6879..4aabae91ef 100644 --- a/cmd/genericconf/getversion18.go +++ b/cmd/genericconf/getversion18.go @@ -7,7 +7,7 @@ package genericconf import "runtime/debug" -func GetVersion(definedVersion string, definedTime string, definedModified string) (string, string) { +func GetVersion(definedVersion string, definedTime string, definedModified string) (string, string, string) { vcsVersion := "development" vcsTime := "development" vcsModified := "false" @@ -43,5 +43,10 @@ func GetVersion(definedVersion string, definedTime string, definedModified strin vcsVersion = vcsVersion + "-modified" } - return vcsVersion, vcsTime + strippedVersion := vcsVersion + if len(strippedVersion) > 0 && strippedVersion[0] == 'v' { + strippedVersion = strippedVersion[1:] + } + + return vcsVersion, strippedVersion, vcsTime } diff --git a/cmd/genericconf/pprof.go b/cmd/genericconf/pprof.go index 8f756bbf45..9fd3a6f2a4 100644 --- a/cmd/genericconf/pprof.go +++ b/cmd/genericconf/pprof.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" + // Blank import pprof registers its HTTP handlers. _ "net/http/pprof" // #nosec G108 "github.com/ethereum/go-ethereum/log" @@ -16,8 +17,7 @@ func StartPprof(address string) { log.Info("Starting metrics server with pprof", "addr", fmt.Sprintf("http://%s/debug/metrics", address)) log.Info("Pprof endpoint", "addr", fmt.Sprintf("http://%s/debug/pprof", address)) go func() { - // #nosec G114 - if err := http.ListenAndServe(address, http.DefaultServeMux); err != nil { + if err := http.ListenAndServe(address, http.DefaultServeMux); /* #nosec G114 */ err != nil { log.Error("Failure in running pprof server", "err", err) } }() diff --git a/cmd/genericconf/server.go b/cmd/genericconf/server.go index 45c99eee63..34176f84c9 100644 --- a/cmd/genericconf/server.go +++ b/cmd/genericconf/server.go @@ -28,7 +28,7 @@ var HTTPConfigDefault = HTTPConfig{ Port: 8547, API: append(node.DefaultConfig.HTTPModules, "eth", "arb"), RPCPrefix: node.DefaultConfig.HTTPPathPrefix, - CORSDomain: node.DefaultConfig.HTTPCors, + CORSDomain: []string{}, VHosts: node.DefaultConfig.HTTPVirtualHosts, ServerTimeouts: HTTPServerTimeoutConfigDefault, } @@ -93,7 +93,7 @@ var WSConfigDefault = WSConfig{ Port: 8548, API: append(node.DefaultConfig.WSModules, "eth", "arb"), RPCPrefix: node.DefaultConfig.WSPathPrefix, - Origins: node.DefaultConfig.WSOrigins, + Origins: []string{}, ExposeAll: node.DefaultConfig.WSExposeAll, } @@ -139,7 +139,7 @@ type GraphQLConfig struct { var GraphQLConfigDefault = GraphQLConfig{ Enable: false, - CORSDomain: node.DefaultConfig.GraphQLCors, + CORSDomain: []string{}, VHosts: node.DefaultConfig.GraphQLVirtualHosts, } @@ -167,11 +167,8 @@ func (a AuthRPCConfig) Apply(stackConf *node.Config) { stackConf.AuthPort = a.Port stackConf.AuthVirtualHosts = []string{} // dont allow http access stackConf.JWTSecret = a.JwtSecret - // a few settings are not available as stanard config, but we can change the default. sigh.. - node.DefaultAuthOrigins = make([]string, len(a.Origins)) - copy(node.DefaultAuthOrigins, a.Origins) - node.DefaultAuthModules = make([]string, len(a.API)) - copy(node.DefaultAuthModules, a.API) + stackConf.AuthModules = a.API + stackConf.AuthOrigins = a.Origins } var AuthRPCConfigDefault = AuthRPCConfig{ @@ -193,20 +190,32 @@ func AuthRPCConfigAddOptions(prefix string, f *flag.FlagSet) { type MetricsServerConfig struct { Addr string `koanf:"addr"` Port int `koanf:"port"` - Pprof bool `koanf:"pprof"` UpdateInterval time.Duration `koanf:"update-interval"` } var MetricsServerConfigDefault = MetricsServerConfig{ Addr: "127.0.0.1", Port: 6070, - Pprof: false, UpdateInterval: 3 * time.Second, } +type PProf struct { + Addr string `koanf:"addr"` + Port int `koanf:"port"` +} + +var PProfDefault = PProf{ + Addr: "127.0.0.1", + Port: 6071, +} + func MetricsServerAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".addr", MetricsServerConfigDefault.Addr, "metrics server address") f.Int(prefix+".port", MetricsServerConfigDefault.Port, "metrics server port") - f.Bool(prefix+".pprof", MetricsServerConfigDefault.Pprof, "enable profiling for Go") f.Duration(prefix+".update-interval", MetricsServerConfigDefault.UpdateInterval, "metrics server update interval") } + +func PProfAddOptions(prefix string, f *flag.FlagSet) { + f.String(prefix+".addr", PProfDefault.Addr, "pprof server address") + f.Int(prefix+".port", PProfDefault.Port, "pprof server port") +} diff --git a/cmd/genericconf/wallet.go b/cmd/genericconf/wallet.go index 6e6f30e0c5..e05452e3b3 100644 --- a/cmd/genericconf/wallet.go +++ b/cmd/genericconf/wallet.go @@ -14,22 +14,22 @@ const PASSWORD_NOT_SET = "PASSWORD_NOT_SET" type WalletConfig struct { Pathname string `koanf:"pathname"` - PasswordImpl string `koanf:"password"` + Password string `koanf:"password"` PrivateKey string `koanf:"private-key"` Account string `koanf:"account"` OnlyCreateKey bool `koanf:"only-create-key"` } -func (w *WalletConfig) Password() *string { - if w.PasswordImpl == PASSWORD_NOT_SET { +func (w *WalletConfig) Pwd() *string { + if w.Password == PASSWORD_NOT_SET { return nil } - return &w.PasswordImpl + return &w.Password } var WalletConfigDefault = WalletConfig{ Pathname: "", - PasswordImpl: PASSWORD_NOT_SET, + Password: PASSWORD_NOT_SET, PrivateKey: "", Account: "", OnlyCreateKey: false, @@ -37,7 +37,7 @@ var WalletConfigDefault = WalletConfig{ func WalletConfigAddOptions(prefix string, f *flag.FlagSet, defaultPathname string) { f.String(prefix+".pathname", defaultPathname, "pathname for wallet") - f.String(prefix+".password", WalletConfigDefault.PasswordImpl, "wallet passphrase") + f.String(prefix+".password", WalletConfigDefault.Password, "wallet passphrase") f.String(prefix+".private-key", WalletConfigDefault.PrivateKey, "private key for wallet") f.String(prefix+".account", WalletConfigDefault.Account, "account to use (default is first account in keystore)") f.Bool(prefix+".only-create-key", WalletConfigDefault.OnlyCreateKey, "if true, creates new key then exits") diff --git a/cmd/nitro-val/config.go b/cmd/nitro-val/config.go index 5ab1521f96..cf10787d6d 100644 --- a/cmd/nitro-val/config.go +++ b/cmd/nitro-val/config.go @@ -27,9 +27,11 @@ type ValidationNodeConfig struct { HTTP genericconf.HTTPConfig `koanf:"http"` WS genericconf.WSConfig `koanf:"ws"` IPC genericconf.IPCConfig `koanf:"ipc"` - AuthRPC genericconf.AuthRPCConfig `koanf:"auth"` + Auth genericconf.AuthRPCConfig `koanf:"auth"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` + PProf bool `koanf:"pprof"` + PprofCfg genericconf.PProf `koanf:"pprof-cfg"` Workdir string `koanf:"workdir" reload:"hot"` } @@ -64,9 +66,11 @@ var ValidationNodeConfigDefault = ValidationNodeConfig{ HTTP: HTTPConfigDefault, WS: WSConfigDefault, IPC: IPCConfigDefault, - AuthRPC: genericconf.AuthRPCConfigDefault, + Auth: genericconf.AuthRPCConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, + PProf: false, + PprofCfg: genericconf.PProfDefault, Workdir: "", } @@ -83,6 +87,8 @@ func ValidationNodeConfigAddOptions(f *flag.FlagSet) { genericconf.AuthRPCConfigAddOptions("auth", f) f.Bool("metrics", ValidationNodeConfigDefault.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) + f.Bool("pprof", ValidationNodeConfigDefault.PProf, "enable pprof") + genericconf.PProfAddOptions("pprof-cfg", f) f.String("workdir", ValidationNodeConfigDefault.Workdir, "path used for purpose of resolving relative paths (ia. jwt secret file, log files), if empty then current working directory will be used.") } diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index c3aba6404a..33a5e034d9 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -32,6 +32,28 @@ func main() { os.Exit(mainImpl()) } +// Checks metrics and PProf flag, runs them if enabled. +// Note: they are separate so one can enable/disable them as they wish, the only +// requirement is that they can't run on the same address and port. +func startMetrics(cfg *ValidationNodeConfig) error { + mAddr := fmt.Sprintf("%v:%v", cfg.MetricsServer.Addr, cfg.MetricsServer.Port) + pAddr := fmt.Sprintf("%v:%v", cfg.PprofCfg.Addr, cfg.PprofCfg.Port) + if cfg.Metrics && !metrics.Enabled { + return fmt.Errorf("metrics must be enabled via command line by adding --metrics, json config has no effect") + } + if cfg.Metrics && cfg.PProf && mAddr == pAddr { + return fmt.Errorf("metrics and pprof cannot be enabled on the same address:port: %s", mAddr) + } + if cfg.Metrics { + go metrics.CollectProcessMetrics(cfg.MetricsServer.UpdateInterval) + exp.Setup(fmt.Sprintf("%v:%v", cfg.MetricsServer.Addr, cfg.MetricsServer.Port)) + } + if cfg.PProf { + genericconf.StartPprof(pAddr) + } + return nil +} + // Returns the exit code func mainImpl() int { ctx, cancelFunc := context.WithCancel(context.Background()) @@ -46,13 +68,13 @@ func mainImpl() int { stackConf.DataDir = "" // ephemeral nodeConfig.HTTP.Apply(&stackConf) nodeConfig.WS.Apply(&stackConf) - nodeConfig.AuthRPC.Apply(&stackConf) + nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) stackConf.P2P.ListenAddr = "" stackConf.P2P.NoDial = true stackConf.P2P.NoDiscovery = true - vcsRevision, vcsTime := confighelpers.GetVersion() - stackConf.Version = vcsRevision + vcsRevision, strippedRevision, vcsTime := confighelpers.GetVersion() + stackConf.Version = strippedRevision pathResolver := func(workdir string) func(string) string { if workdir == "" { @@ -69,13 +91,13 @@ func mainImpl() int { } } - err = genericconf.InitLog(nodeConfig.LogType, log.Lvl(nodeConfig.LogLevel), &nodeConfig.FileLogging, pathResolver(nodeConfig.Workdir)) + err = genericconf.InitLog(nodeConfig.LogType, log.Lvl(nodeConfig.LogLevel), &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing logging: %v\n", err) return 1 } if stackConf.JWTSecret == "" && stackConf.AuthAddr != "" { - filename := pathResolver(nodeConfig.Workdir)("jwtsecret") + filename := pathResolver(nodeConfig.Persistent.GlobalConfig)("jwtsecret") if err := genericconf.TryCreatingJWTSecret(filename); err != nil { log.Error("Failed to prepare jwt secret file", "err", err) return 1 @@ -88,28 +110,19 @@ func mainImpl() int { liveNodeConfig := genericconf.NewLiveConfig[*ValidationNodeConfig](args, nodeConfig, ParseNode) liveNodeConfig.SetOnReloadHook(func(oldCfg *ValidationNodeConfig, newCfg *ValidationNodeConfig) error { - return genericconf.InitLog(newCfg.LogType, log.Lvl(newCfg.LogLevel), &newCfg.FileLogging, pathResolver(newCfg.Workdir)) + return genericconf.InitLog(newCfg.LogType, log.Lvl(newCfg.LogLevel), &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) }) + + valnode.EnsureValidationExposedViaAuthRPC(&stackConf) + stack, err := node.New(&stackConf) if err != nil { flag.Usage() log.Crit("failed to initialize geth stack", "err", err) } - if nodeConfig.Metrics { - go metrics.CollectProcessMetrics(nodeConfig.MetricsServer.UpdateInterval) - - if nodeConfig.MetricsServer.Addr != "" { - address := fmt.Sprintf("%v:%v", nodeConfig.MetricsServer.Addr, nodeConfig.MetricsServer.Port) - if nodeConfig.MetricsServer.Pprof { - genericconf.StartPprof(address) - } else { - exp.Setup(address) - } - } - } else if nodeConfig.MetricsServer.Pprof { - flag.Usage() - log.Error("--metrics must be enabled in order to use pprof with the metrics server") + if err := startMetrics(nodeConfig); err != nil { + log.Error("Error starting metrics", "error", err) return 1 } diff --git a/cmd/nitro/config_test.go b/cmd/nitro/config_test.go index b26d595b88..ea04d4eb1f 100644 --- a/cmd/nitro/config_test.go +++ b/cmd/nitro/config_test.go @@ -15,10 +15,29 @@ import ( "time" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/testhelpers" + + "github.com/r3labs/diff/v3" + flag "github.com/spf13/pflag" ) +func TestEmptyCliConfig(t *testing.T) { + f := flag.NewFlagSet("", flag.ContinueOnError) + NodeConfigAddOptions(f) + k, err := confighelpers.BeginCommonParse(f, []string{}) + Require(t, err) + var emptyCliNodeConfig NodeConfig + err = confighelpers.EndCommonParse(k, &emptyCliNodeConfig) + Require(t, err) + if !reflect.DeepEqual(emptyCliNodeConfig, NodeConfigDefault) { + changelog, err := diff.Diff(emptyCliNodeConfig, NodeConfigDefault) + Require(t, err) + Fail(t, "empty cli config differs from expected default", changelog) + } +} + func TestSeqConfig(t *testing.T) { args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") _, _, _, err := ParseNode(context.Background(), args) @@ -69,7 +88,7 @@ func TestReloads(t *testing.T) { config := NodeConfigDefault update := NodeConfigDefault - update.Node.BatchPoster.BatchPollDelay++ + update.Node.BatchPoster.MaxSize++ check(reflect.ValueOf(config), false, "config") Require(t, config.CanReload(&config)) @@ -86,7 +105,7 @@ func TestReloads(t *testing.T) { // check that non-reloadable fields fail assignment update.Metrics = !update.Metrics testUnsafe() - update.L2.ChainID++ + update.ParentChain.ID++ testUnsafe() update.Node.Staker.Enable = !update.Node.Staker.Enable testUnsafe() @@ -114,8 +133,8 @@ func TestLiveNodeConfig(t *testing.T) { // check updating the config update := config.ShallowClone() expected := config.ShallowClone() - update.Node.BatchPoster.BatchPollDelay += 2 * time.Millisecond - expected.Node.BatchPoster.BatchPollDelay += 2 * time.Millisecond + update.Node.BatchPoster.MaxSize += 100 + expected.Node.BatchPoster.MaxSize += 100 Require(t, liveConfig.Set(update)) if !reflect.DeepEqual(liveConfig.Get(), expected) { Fail(t, "failed to set config") @@ -123,7 +142,7 @@ func TestLiveNodeConfig(t *testing.T) { // check that an invalid reload gets rejected update = config.ShallowClone() - update.L2.ChainID++ + update.ParentChain.ID++ if liveConfig.Set(update) == nil { Fail(t, "failed to reject invalid update") } @@ -150,19 +169,19 @@ func TestLiveNodeConfig(t *testing.T) { // change the config file expected = config.ShallowClone() - expected.Node.BatchPoster.BatchPollDelay += time.Millisecond - jsonConfig = fmt.Sprintf("{\"node\":{\"batch-poster\":{\"poll-delay\":\"%s\"}}, \"chain\":{\"id\":421613}}", expected.Node.BatchPoster.BatchPollDelay.String()) + expected.Node.BatchPoster.MaxSize += 100 + jsonConfig = fmt.Sprintf("{\"node\":{\"batch-poster\":{\"max-size\":\"%d\"}}, \"chain\":{\"id\":421613}}", expected.Node.BatchPoster.MaxSize) Require(t, WriteToConfigFile(configFile, jsonConfig)) // trigger LiveConfig reload Require(t, syscall.Kill(syscall.Getpid(), syscall.SIGUSR1)) if !PollLiveConfigUntilEqual(liveConfig, expected) { - Fail(t, "failed to update config", config.Node.BatchPoster.BatchPollDelay, update.Node.BatchPoster.BatchPollDelay) + Fail(t, "failed to update config", config.Node.BatchPoster.MaxSize, update.Node.BatchPoster.MaxSize) } // change chain.id in the config file (currently non-reloadable) - jsonConfig = fmt.Sprintf("{\"node\":{\"batch-poster\":{\"poll-delay\":\"%s\"}}, \"chain\":{\"id\":421703}}", expected.Node.BatchPoster.BatchPollDelay.String()) + jsonConfig = fmt.Sprintf("{\"node\":{\"batch-poster\":{\"max-size\":\"%d\"}}, \"chain\":{\"id\":421703}}", expected.Node.BatchPoster.MaxSize) Require(t, WriteToConfigFile(configFile, jsonConfig)) // trigger LiveConfig reload diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 6a534b743b..f874b5d71e 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -10,6 +10,7 @@ import ( "fmt" "math/big" "os" + "reflect" "regexp" "runtime" "strings" @@ -33,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" @@ -41,7 +43,7 @@ import ( "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/statetransfer" - flag "github.com/spf13/pflag" + "github.com/spf13/pflag" ) type InitConfig struct { @@ -50,7 +52,7 @@ type InitConfig struct { DownloadPath string `koanf:"download-path"` DownloadPoll time.Duration `koanf:"download-poll"` DevInit bool `koanf:"dev-init"` - DevInitAddr string `koanf:"dev-init-address"` + DevInitAddress string `koanf:"dev-init-address"` DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` Empty bool `koanf:"empty"` AccountsPerSync uint `koanf:"accounts-per-sync"` @@ -58,7 +60,7 @@ type InitConfig struct { ThenQuit bool `koanf:"then-quit"` Prune string `koanf:"prune"` PruneBloomSize uint64 `koanf:"prune-bloom-size"` - ResetToMsg int64 `koanf:"reset-to-message"` + ResetToMessage int64 `koanf:"reset-to-message"` } var InitConfigDefault = InitConfig{ @@ -67,31 +69,31 @@ var InitConfigDefault = InitConfig{ DownloadPath: "/tmp/", DownloadPoll: time.Minute, DevInit: false, - DevInitAddr: "", + DevInitAddress: "", DevInitBlockNum: 0, ImportFile: "", AccountsPerSync: 100000, ThenQuit: false, Prune: "", PruneBloomSize: 2048, - ResetToMsg: -1, + ResetToMessage: -1, } -func InitConfigAddOptions(prefix string, f *flag.FlagSet) { +func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".force", InitConfigDefault.Force, "if true: in case database exists init code will be reexecuted and genesis block compared to database") f.String(prefix+".url", InitConfigDefault.Url, "url to download initializtion data - will poll if download fails") f.String(prefix+".download-path", InitConfigDefault.DownloadPath, "path to save temp downloaded file") f.Duration(prefix+".download-poll", InitConfigDefault.DownloadPoll, "how long to wait between polling attempts") f.Bool(prefix+".dev-init", InitConfigDefault.DevInit, "init with dev data (1 account with balance) instead of file import") - f.String(prefix+".dev-init-address", InitConfigDefault.DevInitAddr, "Address of dev-account. Leave empty to use the dev-wallet.") + f.String(prefix+".dev-init-address", InitConfigDefault.DevInitAddress, "Address of dev-account. Leave empty to use the dev-wallet.") f.Uint64(prefix+".dev-init-blocknum", InitConfigDefault.DevInitBlockNum, "Number of preinit blocks. Must exist in ancient database.") - f.Bool(prefix+".empty", InitConfigDefault.DevInit, "init with empty state") + f.Bool(prefix+".empty", InitConfigDefault.Empty, "init with empty state") f.Bool(prefix+".then-quit", InitConfigDefault.ThenQuit, "quit after init is done") f.String(prefix+".import-file", InitConfigDefault.ImportFile, "path for json data to import") f.Uint(prefix+".accounts-per-sync", InitConfigDefault.AccountsPerSync, "during init - sync database every X accounts. Lower value for low-memory systems. 0 disables.") f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") - f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMsg, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") + f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") } func downloadInit(ctx context.Context, initConfig *InitConfig) (string, error) { @@ -295,7 +297,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, err } if initConfig.Prune == "validator" { - if l1Client == nil { + if l1Client == nil || reflect.ValueOf(l1Client).IsNil() { return nil, errors.New("an L1 connection is required for validator pruning") } callOpts := bind.CallOpts{ @@ -329,7 +331,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node log.Warn("missing latest confirmed block", "hash", confirmedHash) } - validatorDb := rawdb.NewTable(arbDb, arbnode.BlockValidatorPrefix) + validatorDb := rawdb.NewTable(arbDb, storage.BlockValidatorPrefix) lastValidated, err := staker.ReadLastValidatedInfo(validatorDb) if err != nil { return nil, err @@ -514,7 +516,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo NextBlockNumber: config.Init.DevInitBlockNum, Accounts: []statetransfer.AccountInitializationInfo{ { - Addr: common.HexToAddress(config.Init.DevInitAddr), + Addr: common.HexToAddress(config.Init.DevInitAddress), EthBalance: new(big.Int).Mul(big.NewInt(params.Ether), big.NewInt(1000)), Nonce: 0, }, @@ -550,15 +552,15 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, nil, err } - combinedL2ChainInfoFiles := config.L2.ChainInfoFiles - if config.L2.ChainInfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, config.L2.ChainInfoIpfsUrl, config.L2.ChainInfoIpfsDownloadPath) + combinedL2ChainInfoFiles := config.Chain.InfoFiles + if config.Chain.InfoIpfsUrl != "" { + l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, config.Chain.InfoIpfsUrl, config.Chain.InfoIpfsDownloadPath) if err != nil { log.Error("error getting l2 chain info file from ipfs", "err", err) } combinedL2ChainInfoFiles = append(combinedL2ChainInfoFiles, l2ChainInfoIpfsFile) } - chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.L2.ChainID), config.L2.ChainName, genesisBlockNr, combinedL2ChainInfoFiles, config.L2.ChainInfoJson) + chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.Chain.ID), config.Chain.Name, genesisBlockNr, combinedL2ChainInfoFiles, config.Chain.InfoJson) if err != nil { return chainDb, nil, err } @@ -583,7 +585,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo cacheConfig.SnapshotWait = true } var parsedInitMessage *arbostypes.ParsedInitMessage - if config.Node.L1Reader.Enable { + if config.Node.ParentChainReader.Enable { delayedBridge, err := arbnode.NewDelayedBridge(l1Client, rollupAddrs.Bridge, rollupAddrs.DeployedAt) if err != nil { return chainDb, nil, fmt.Errorf("failed creating delayed bridge while attempting to get serialized chain config from init message: %w", err) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 62c76874c0..0edbc98ce4 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -12,11 +12,13 @@ import ( "math/big" "os" "os/signal" + "path/filepath" "reflect" "strings" "syscall" "time" + "github.com/cockroachdb/pebble" "github.com/knadh/koanf" "github.com/knadh/koanf/providers/confmap" flag "github.com/spf13/pflag" @@ -25,7 +27,9 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" _ "github.com/ethereum/go-ethereum/eth/tracers/js" _ "github.com/ethereum/go-ethereum/eth/tracers/native" @@ -47,7 +51,9 @@ import ( "github.com/offchainlabs/nitro/execution/execclient" "github.com/offchainlabs/nitro/execution/gethexec" _ "github.com/offchainlabs/nitro/execution/nodeInterface" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/staker/validatorwallet" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/rpcclient" @@ -56,7 +62,10 @@ import ( ) func printSampleUsage(name string) { - fmt.Printf("Sample usage: %s --help \n", name) + fmt.Printf("Sample usage: %s [OPTIONS] \n\n", name) + fmt.Printf("Options:\n") + fmt.Printf(" --help\n") + fmt.Printf(" --dev: Start a default L2-only dev chain\n") } func addUnlockWallet(accountManager *accounts.Manager, walletConf *genericconf.WalletConfig) (common.Address, error) { @@ -87,21 +96,21 @@ func addUnlockWallet(accountManager *accounts.Manager, walletConf *genericconf.W account.Address = common.HexToAddress(walletConf.Account) account, err = myKeystore.Find(account) } else { - if walletConf.Password() == nil { + if walletConf.Pwd() == nil { return common.Address{}, errors.New("l2 password not set") } if devPrivKey == nil { return common.Address{}, errors.New("l2 private key not set") } - account, err = myKeystore.ImportECDSA(devPrivKey, *walletConf.Password()) + account, err = myKeystore.ImportECDSA(devPrivKey, *walletConf.Pwd()) } if err != nil { return common.Address{}, err } - if walletConf.Password() == nil { + if walletConf.Pwd() == nil { return common.Address{}, errors.New("l2 password not set") } - err = myKeystore.Unlock(account, *walletConf.Password()) + err = myKeystore.Unlock(account, *walletConf.Pwd()) if err != nil { return common.Address{}, err } @@ -113,7 +122,7 @@ func closeDb(db io.Closer, name string) { if db != nil { err := db.Close() // unfortunately the freezer db means we can't just use errors.Is - if err != nil && !strings.Contains(err.Error(), leveldb.ErrClosed.Error()) { + if err != nil && !strings.Contains(err.Error(), leveldb.ErrClosed.Error()) && !strings.Contains(err.Error(), pebble.ErrClosed.Error()) { log.Warn("failed to close database on shutdown", "db", name, "err", err) } } @@ -123,6 +132,28 @@ func main() { os.Exit(mainImpl()) } +// Checks metrics and PProf flag, runs them if enabled. +// Note: they are separate so one can enable/disable them as they wish, the only +// requirement is that they can't run on the same address and port. +func startMetrics(cfg *NodeConfig) error { + mAddr := fmt.Sprintf("%v:%v", cfg.MetricsServer.Addr, cfg.MetricsServer.Port) + pAddr := fmt.Sprintf("%v:%v", cfg.PprofCfg.Addr, cfg.PprofCfg.Port) + if cfg.Metrics && !metrics.Enabled { + return fmt.Errorf("metrics must be enabled via command line by adding --metrics, json config has no effect") + } + if cfg.Metrics && cfg.PProf && mAddr == pAddr { + return fmt.Errorf("metrics and pprof cannot be enabled on the same address:port: %s", mAddr) + } + if cfg.Metrics { + go metrics.CollectProcessMetrics(cfg.MetricsServer.UpdateInterval) + exp.Setup(fmt.Sprintf("%v:%v", cfg.MetricsServer.Addr, cfg.MetricsServer.Port)) + } + if cfg.PProf { + genericconf.StartPprof(pAddr) + } + return nil +} + // Returns the exit code func mainImpl() int { ctx, cancelFunc := context.WithCancel(context.Background()) @@ -135,9 +166,10 @@ func mainImpl() int { } stackConf := node.DefaultConfig stackConf.DataDir = nodeConfig.Persistent.Chain + stackConf.DBEngine = nodeConfig.Persistent.DBEngine nodeConfig.HTTP.Apply(&stackConf) nodeConfig.WS.Apply(&stackConf) - nodeConfig.AuthRPC.Apply(&stackConf) + nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) nodeConfig.GraphQL.Apply(&stackConf) if nodeConfig.WS.ExposeAll { @@ -146,50 +178,54 @@ func mainImpl() int { stackConf.P2P.ListenAddr = "" stackConf.P2P.NoDial = true stackConf.P2P.NoDiscovery = true - vcsRevision, vcsTime := confighelpers.GetVersion() - stackConf.Version = vcsRevision + vcsRevision, strippedRevision, vcsTime := confighelpers.GetVersion() + stackConf.Version = strippedRevision + + pathResolver := func(workdir string) func(string) string { + if workdir == "" { + workdir, err = os.Getwd() + if err != nil { + log.Warn("Failed to get workdir", "err", err) + } + } + return func(path string) string { + if filepath.IsAbs(path) { + return path + } + return filepath.Join(workdir, path) + } + } if stackConf.JWTSecret == "" && stackConf.AuthAddr != "" { - filename := stackConf.ResolvePath("jwtsecret") + filename := pathResolver(nodeConfig.Persistent.GlobalConfig)("jwtsecret") if err := genericconf.TryCreatingJWTSecret(filename); err != nil { log.Error("Failed to prepare jwt secret file", "err", err) return 1 } stackConf.JWTSecret = filename } - - err = genericconf.InitLog(nodeConfig.LogType, log.Lvl(nodeConfig.LogLevel), &nodeConfig.FileLogging, stackConf.ResolvePath) + err = genericconf.InitLog(nodeConfig.LogType, log.Lvl(nodeConfig.LogLevel), &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing logging: %v\n", err) return 1 } - if nodeConfig.Execution.Archive { - log.Warn("--node.archive has been deprecated. Please use --node.caching.archive instead.") - nodeConfig.Execution.Caching.Archive = true - } log.Info("Running Arbitrum nitro node", "revision", vcsRevision, "vcs.time", vcsTime) if nodeConfig.Node.Dangerous.NoL1Listener { - nodeConfig.Node.L1Reader.Enable = false + nodeConfig.Node.ParentChainReader.Enable = false nodeConfig.Node.BatchPoster.Enable = false nodeConfig.Node.DelayedSequencer.Enable = false } else { - nodeConfig.Node.L1Reader.Enable = true + nodeConfig.Node.ParentChainReader.Enable = true } - if nodeConfig.Execution.Sequencer.Enable { - if nodeConfig.Execution.ForwardingTarget() != "" { - flag.Usage() - log.Crit("forwarding-target cannot be set when sequencer is enabled") - } - if nodeConfig.Node.L1Reader.Enable && nodeConfig.Node.InboxReader.HardReorg { - flag.Usage() - log.Crit("hard reorgs cannot safely be enabled with sequencer mode enabled") - } - } else if nodeConfig.Execution.ForwardingTargetImpl == "" { + if nodeConfig.Execution.Sequencer.Enable && nodeConfig.Node.ParentChainReader.Enable && nodeConfig.Node.InboxReader.HardReorg { flag.Usage() - log.Crit("forwarding-target unset, and not sequencer (can set to \"null\" to disable forwarding)") + log.Crit("hard reorgs cannot safely be enabled with sequencer mode enabled") + } + if nodeConfig.Execution.Sequencer.Enable != nodeConfig.Node.Sequencer { + log.Error("consensus and execution must agree if sequencing is enabled or not", "Execution.Sequencer.Enable", nodeConfig.Execution.Sequencer.Enable, "Node.Sequencer", nodeConfig.Node.Sequencer) } var l1TransactionOpts *bind.TransactOpts @@ -203,21 +239,24 @@ func mainImpl() int { defaultL1WalletConfig := conf.DefaultL1WalletConfig defaultL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - nodeConfig.Node.Staker.L1Wallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) + nodeConfig.Node.Staker.ParentChainWallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) defaultValidatorL1WalletConfig := staker.DefaultValidatorL1WalletConfig defaultValidatorL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - nodeConfig.Node.BatchPoster.L1Wallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) + nodeConfig.Node.BatchPoster.ParentChainWallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) defaultBatchPosterL1WalletConfig := arbnode.DefaultBatchPosterL1WalletConfig defaultBatchPosterL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - if nodeConfig.Node.Staker.L1Wallet == defaultValidatorL1WalletConfig && nodeConfig.Node.BatchPoster.L1Wallet == defaultBatchPosterL1WalletConfig { + if nodeConfig.Node.Staker.ParentChainWallet == defaultValidatorL1WalletConfig && nodeConfig.Node.BatchPoster.ParentChainWallet == defaultBatchPosterL1WalletConfig { if sequencerNeedsKey || validatorNeedsKey || l1Wallet.OnlyCreateKey { - l1TransactionOpts, dataSigner, err = util.OpenWallet("l1", l1Wallet, new(big.Int).SetUint64(nodeConfig.L1.ChainID)) + l1TransactionOpts, dataSigner, err = util.OpenWallet("l1", l1Wallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) if err != nil { flag.Usage() log.Crit("error opening parent chain wallet", "path", l1Wallet.Pathname, "account", l1Wallet.Account, "err", err) } + if l1Wallet.OnlyCreateKey { + return 0 + } l1TransactionOptsBatchPoster = l1TransactionOpts l1TransactionOptsValidator = l1TransactionOpts } @@ -225,25 +264,31 @@ func mainImpl() int { if *l1Wallet != defaultL1WalletConfig { log.Crit("--parent-chain.wallet cannot be set if either --node.staker.l1-wallet or --node.batch-poster.l1-wallet are set") } - if sequencerNeedsKey || nodeConfig.Node.BatchPoster.L1Wallet.OnlyCreateKey { - l1TransactionOptsBatchPoster, dataSigner, err = util.OpenWallet("l1-batch-poster", &nodeConfig.Node.BatchPoster.L1Wallet, new(big.Int).SetUint64(nodeConfig.L1.ChainID)) + if sequencerNeedsKey || nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { + l1TransactionOptsBatchPoster, dataSigner, err = util.OpenWallet("l1-batch-poster", &nodeConfig.Node.BatchPoster.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) if err != nil { flag.Usage() - log.Crit("error opening Batch poster parent chain wallet", "path", nodeConfig.Node.BatchPoster.L1Wallet.Pathname, "account", nodeConfig.Node.BatchPoster.L1Wallet.Account, "err", err) + log.Crit("error opening Batch poster parent chain wallet", "path", nodeConfig.Node.BatchPoster.ParentChainWallet.Pathname, "account", nodeConfig.Node.BatchPoster.ParentChainWallet.Account, "err", err) + } + if nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { + return 0 } } - if validatorNeedsKey || nodeConfig.Node.Staker.L1Wallet.OnlyCreateKey { - l1TransactionOptsValidator, _, err = util.OpenWallet("l1-validator", &nodeConfig.Node.Staker.L1Wallet, new(big.Int).SetUint64(nodeConfig.L1.ChainID)) + if validatorNeedsKey || nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { + l1TransactionOptsValidator, _, err = util.OpenWallet("l1-validator", &nodeConfig.Node.Staker.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) if err != nil { flag.Usage() - log.Crit("error opening Validator parent chain wallet", "path", nodeConfig.Node.Staker.L1Wallet.Pathname, "account", nodeConfig.Node.Staker.L1Wallet.Account, "err", err) + log.Crit("error opening Validator parent chain wallet", "path", nodeConfig.Node.Staker.ParentChainWallet.Pathname, "account", nodeConfig.Node.Staker.ParentChainWallet.Account, "err", err) + } + if nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { + return 0 } } } - combinedL2ChainInfoFile := nodeConfig.L2.ChainInfoFiles - if nodeConfig.L2.ChainInfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, nodeConfig.L2.ChainInfoIpfsUrl, nodeConfig.L2.ChainInfoIpfsDownloadPath) + combinedL2ChainInfoFile := nodeConfig.Chain.InfoFiles + if nodeConfig.Chain.InfoIpfsUrl != "" { + l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, nodeConfig.Chain.InfoIpfsUrl, nodeConfig.Chain.InfoIpfsDownloadPath) if err != nil { log.Error("error getting chain info file from ipfs", "err", err) } @@ -251,7 +296,7 @@ func mainImpl() int { } if nodeConfig.Node.Staker.Enable { - if !nodeConfig.Node.L1Reader.Enable { + if !nodeConfig.Node.ParentChainReader.Enable { flag.Usage() log.Crit("validator must have the parent chain reader enabled") } @@ -264,6 +309,13 @@ func mainImpl() int { } } + if nodeConfig.Execution.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { + if nodeConfig.Execution.Caching.Archive { + nodeConfig.Execution.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth + } else { + nodeConfig.Execution.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth + } + } liveNodeConfig := genericconf.NewLiveConfig[*NodeConfig](args, nodeConfig, func(ctx context.Context, args []string) (*NodeConfig, error) { nodeConfig, _, _, err := ParseNode(ctx, args) return nodeConfig, err @@ -271,8 +323,8 @@ func mainImpl() int { var rollupAddrs chaininfo.RollupAddresses var l1Client *ethclient.Client - if nodeConfig.Node.L1Reader.Enable { - confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().L1.Connection } + if nodeConfig.Node.ParentChainReader.Enable { + confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().ParentChain.Connection } rpcClient := rpcclient.NewRpcClient(confFetcher, nil) err := rpcClient.Start(ctx) if err != nil { @@ -283,13 +335,13 @@ func mainImpl() int { if err != nil { log.Crit("couldn't read L1 chainid", "err", err) } - if l1ChainId.Uint64() != nodeConfig.L1.ChainID { - log.Crit("L1 chainID doesn't fit config", "found", l1ChainId.Uint64(), "expected", nodeConfig.L1.ChainID) + if l1ChainId.Uint64() != nodeConfig.ParentChain.ID { + log.Crit("L1 chainID doesn't fit config", "found", l1ChainId.Uint64(), "expected", nodeConfig.ParentChain.ID) } - log.Info("connected to l1 chain", "l1url", nodeConfig.L1.Connection.URL, "l1chainid", nodeConfig.L1.ChainID) + log.Info("connected to l1 chain", "l1url", nodeConfig.ParentChain.Connection.URL, "l1chainid", nodeConfig.ParentChain.ID) - rollupAddrs, err = chaininfo.GetRollupAddressesConfig(nodeConfig.L2.ChainID, nodeConfig.L2.ChainName, combinedL2ChainInfoFile, nodeConfig.L2.ChainInfoJson) + rollupAddrs, err = chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) if err != nil { log.Crit("error getting rollup addresses", "err", err) } @@ -300,18 +352,18 @@ func mainImpl() int { flag.Usage() log.Crit("--node.validator.only-create-wallet-contract requires --node.validator.use-smart-contract-wallet") } - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.L1Reader }) + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) if err != nil { log.Crit("failed to get L1 headerreader", "error", err) - } // Just create validator smart wallet if needed then exit - deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.L2.ChainID, nodeConfig.L2.ChainName, combinedL2ChainInfoFile, nodeConfig.L2.ChainInfoJson) + deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) if err != nil { log.Crit("error getting rollup addresses config", "err", err) } - addr, err := staker.GetValidatorWalletContract(ctx, deployInfo.ValidatorWalletCreator, int64(deployInfo.DeployedAt), l1TransactionOptsValidator, l1Reader, true) + addr, err := validatorwallet.GetValidatorWalletContract(ctx, deployInfo.ValidatorWalletCreator, int64(deployInfo.DeployedAt), l1TransactionOptsValidator, l1Reader, true) if err != nil { log.Crit("error creating validator wallet contract", "error", err, "address", l1TransactionOptsValidator.From.Hex()) } @@ -324,8 +376,16 @@ func mainImpl() int { nodeConfig.Execution.TxLookupLimit = 0 } - resourcemanager.Init(&nodeConfig.Node.ResourceManagement) + if err := resourcemanager.Init(&nodeConfig.Node.ResourceMgmt); err != nil { + flag.Usage() + log.Crit("Failed to start resource management module", "err", err) + } + var sameProcessValidationNodeEnabled bool + if nodeConfig.Node.BlockValidator.Enable && (nodeConfig.Node.BlockValidator.ValidationServer.URL == "self" || nodeConfig.Node.BlockValidator.ValidationServer.URL == "self-auth") { + sameProcessValidationNodeEnabled = true + valnode.EnsureValidationExposedViaAuthRPC(&stackConf) + } stack, err := node.New(&stackConf) if err != nil { flag.Usage() @@ -338,16 +398,27 @@ func mainImpl() int { log.Crit("error opening L2 dev wallet", "err", err) } if devAddr != (common.Address{}) { - nodeConfig.Init.DevInitAddr = devAddr.String() + nodeConfig.Init.DevInitAddress = devAddr.String() } } - chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.L2.ChainID), gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching), l1Client, rollupAddrs) - defer closeDb(chainDb, "chainDb") + if err := startMetrics(nodeConfig); err != nil { + log.Error("Error starting metrics", "error", err) + return 1 + } + + var deferFuncs []func() + defer func() { + for i := range deferFuncs { + deferFuncs[i]() + } + }() + + chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching), l1Client, rollupAddrs) if l2BlockChain != nil { - // Calling Stop on the blockchain multiple times does nothing - defer l2BlockChain.Stop() + deferFuncs = append(deferFuncs, func() { l2BlockChain.Stop() }) } + deferFuncs = append(deferFuncs, func() { closeDb(chainDb, "chainDb") }) if err != nil { flag.Usage() log.Error("error initializing database", "err", err) @@ -355,13 +426,13 @@ func mainImpl() int { } arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "", false) - defer closeDb(arbDb, "arbDb") + deferFuncs = append(deferFuncs, func() { closeDb(arbDb, "arbDb") }) if err != nil { log.Error("failed to open database", "err", err) return 1 } - if nodeConfig.Init.ThenQuit && nodeConfig.Init.ResetToMsg < 0 { + if nodeConfig.Init.ThenQuit && nodeConfig.Init.ResetToMessage < 0 { return 0 } @@ -371,27 +442,10 @@ func mainImpl() int { return 1 } - if nodeConfig.Metrics { - go metrics.CollectProcessMetrics(nodeConfig.MetricsServer.UpdateInterval) - - if nodeConfig.MetricsServer.Addr != "" { - address := fmt.Sprintf("%v:%v", nodeConfig.MetricsServer.Addr, nodeConfig.MetricsServer.Port) - if nodeConfig.MetricsServer.Pprof { - genericconf.StartPprof(address) - } else { - exp.Setup(address) - } - } - } else if nodeConfig.MetricsServer.Pprof { - flag.Usage() - log.Error("--metrics must be enabled in order to use pprof with the metrics server") - return 1 - } - fatalErrChan := make(chan error, 10) var valNode *valnode.ValidationNode - if nodeConfig.Node.BlockValidator.Enable && (nodeConfig.Node.BlockValidator.ValidationServer.URL == "self" || nodeConfig.Node.BlockValidator.ValidationServer.URL == "self-auth") { + if sameProcessValidationNodeEnabled { valNode, err = valnode.CreateValidationNode( func() *valnode.Config { return &liveNodeConfig.Get().Validation }, stack, @@ -437,7 +491,7 @@ func mainImpl() int { return 1 } liveNodeConfig.SetOnReloadHook(func(oldCfg *NodeConfig, newCfg *NodeConfig) error { - if err := genericconf.InitLog(newCfg.LogType, log.Lvl(newCfg.LogLevel), &newCfg.FileLogging, stackConf.ResolvePath); err != nil { + if err := genericconf.InitLog(newCfg.LogType, log.Lvl(newCfg.LogLevel), &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)); err != nil { return fmt.Errorf("failed to re-init logging: %w", err) } return currentNode.OnConfigReload(&oldCfg.Node, &newCfg.Node) @@ -483,7 +537,8 @@ func mainImpl() int { if err != nil { fatalErrChan <- fmt.Errorf("error starting node: %w", err) } - defer currentNode.StopAndWait() + // remove previous deferFuncs, StopAndWait closes database and blockchain. + deferFuncs = []func(){func() { currentNode.StopAndWait() }} } if err == nil { err = execNode.Start(ctx) @@ -499,8 +554,8 @@ func mainImpl() int { exitCode := 0 - if err == nil && nodeConfig.Init.ResetToMsg > 0 { - err = currentNode.TxStreamer.ReorgTo(arbutil.MessageIndex(nodeConfig.Init.ResetToMsg)) + if err == nil && nodeConfig.Init.ResetToMessage > 0 { + err = currentNode.TxStreamer.ReorgTo(arbutil.MessageIndex(nodeConfig.Init.ResetToMessage)) if err != nil { fatalErrChan <- fmt.Errorf("error reseting message: %w", err) exitCode = 1 @@ -532,8 +587,8 @@ type NodeConfig struct { Node arbnode.Config `koanf:"node" reload:"hot"` Execution gethexec.Config `koanf:"execution" reload:"hot"` Validation valnode.Config `koanf:"validation" reload:"hot"` - L1 conf.L1Config `koanf:"parent-chain" reload:"hot"` - L2 conf.L2Config `koanf:"chain"` + ParentChain conf.L1Config `koanf:"parent-chain" reload:"hot"` + Chain conf.L2Config `koanf:"chain"` LogLevel int `koanf:"log-level" reload:"hot"` LogType string `koanf:"log-type" reload:"hot"` FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` @@ -541,10 +596,12 @@ type NodeConfig struct { HTTP genericconf.HTTPConfig `koanf:"http"` WS genericconf.WSConfig `koanf:"ws"` IPC genericconf.IPCConfig `koanf:"ipc"` - AuthRPC genericconf.AuthRPCConfig `koanf:"auth"` + Auth genericconf.AuthRPCConfig `koanf:"auth"` GraphQL genericconf.GraphQLConfig `koanf:"graphql"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` + PProf bool `koanf:"pprof"` + PprofCfg genericconf.PProf `koanf:"pprof-cfg"` Init InitConfig `koanf:"init"` Rpc genericconf.RpcConfig `koanf:"rpc"` } @@ -552,16 +609,25 @@ type NodeConfig struct { var NodeConfigDefault = NodeConfig{ Conf: genericconf.ConfConfigDefault, Node: arbnode.ConfigDefault, - L1: conf.L1ConfigDefault, - L2: conf.L2ConfigDefault, + Execution: gethexec.ConfigDefault, + Validation: valnode.DefaultValidationConfig, + ParentChain: conf.L1ConfigDefault, + Chain: conf.L2ConfigDefault, LogLevel: int(log.LvlInfo), LogType: "plaintext", + FileLogging: genericconf.DefaultFileLoggingConfig, Persistent: conf.PersistentConfigDefault, HTTP: genericconf.HTTPConfigDefault, WS: genericconf.WSConfigDefault, IPC: genericconf.IPCConfigDefault, + Auth: genericconf.AuthRPCConfigDefault, + GraphQL: genericconf.GraphQLConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, + Init: InitConfigDefault, + Rpc: genericconf.DefaultRpcConfig, + PProf: false, + PprofCfg: genericconf.PProfDefault, } func NodeConfigAddOptions(f *flag.FlagSet) { @@ -582,6 +648,9 @@ func NodeConfigAddOptions(f *flag.FlagSet) { genericconf.GraphQLConfigAddOptions("graphql", f) f.Bool("metrics", NodeConfigDefault.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) + f.Bool("pprof", NodeConfigDefault.PProf, "enable pprof") + genericconf.PProfAddOptions("pprof-cfg", f) + InitConfigAddOptions("init", f) genericconf.RpcConfigAddOptions("rpc", f) } @@ -591,8 +660,8 @@ func (c *NodeConfig) ResolveDirectoryNames() error { if err != nil { return err } - c.L1.ResolveDirectoryNames(c.Persistent.Chain) - c.L2.ResolveDirectoryNames(c.Persistent.Chain) + c.ParentChain.ResolveDirectoryNames(c.Persistent.Chain) + c.Chain.ResolveDirectoryNames(c.Persistent.Chain) return nil } @@ -636,10 +705,13 @@ func (c *NodeConfig) CanReload(new *NodeConfig) error { } func (c *NodeConfig) Validate() error { - if err := c.L1.Validate(); err != nil { + if err := c.ParentChain.Validate(); err != nil { return err } - return c.Node.Validate() + if err := c.Node.Validate(); err != nil { + return err + } + return c.Persistent.Validate() } func (c *NodeConfig) GetReloadInterval() time.Duration { @@ -710,12 +782,12 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa } // Don't pass around wallet contents with normal configuration - l1Wallet := nodeConfig.L1.Wallet - l2DevWallet := nodeConfig.L2.DevWallet - nodeConfig.L1.Wallet = genericconf.WalletConfigDefault - nodeConfig.L2.DevWallet = genericconf.WalletConfigDefault + l1Wallet := nodeConfig.ParentChain.Wallet + l2DevWallet := nodeConfig.Chain.DevWallet + nodeConfig.ParentChain.Wallet = genericconf.WalletConfigDefault + nodeConfig.Chain.DevWallet = genericconf.WalletConfigDefault - if nodeConfig.Execution.Archive { + if nodeConfig.Execution.Caching.Archive { nodeConfig.Node.MessagePruner.Enable = false } err = nodeConfig.Validate() @@ -739,9 +811,19 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c if err != nil { return false, err } + var parentChainIsArbitrum bool + if chainInfo.ParentChainIsArbitrum != nil { + parentChainIsArbitrum = *chainInfo.ParentChainIsArbitrum + } else { + log.Warn("Chain information parentChainIsArbitrum field missing, in the future this will be required", "chainId", chainInfo.ChainConfig.ChainID, "parentChainId", chainInfo.ParentChainId) + _, err := chaininfo.ProcessChainInfo(chainInfo.ParentChainId, "", combinedL2ChainInfoFiles, "") + if err == nil { + parentChainIsArbitrum = true + } + } chainDefaults := map[string]interface{}{ "persistent.chain": chainInfo.ChainName, - "chain.id": chainInfo.ChainId, + "chain.id": chainInfo.ChainConfig.ChainID.Uint64(), "parent-chain.id": chainInfo.ParentChainId, } if chainInfo.SequencerUrl != "" { @@ -758,6 +840,16 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c if !chainInfo.HasGenesisState { chainDefaults["init.empty"] = true } + if parentChainIsArbitrum { + l2MaxTxSize := gethexec.DefaultSequencerConfig.MaxTxDataSize + bufferSpace := 5000 + if l2MaxTxSize < bufferSpace*2 { + return false, fmt.Errorf("not enough room in parent chain max tx size %v for bufferSpace %v * 2", l2MaxTxSize, bufferSpace) + } + safeBatchSize := l2MaxTxSize - bufferSpace + chainDefaults["node.batch-poster.max-size"] = safeBatchSize + chainDefaults["node.sequencer.max-tx-data-size"] = safeBatchSize - bufferSpace + } err = k.Load(confmap.Provider(chainDefaults, "."), nil) if err != nil { return false, err diff --git a/cmd/relay/relay.go b/cmd/relay/relay.go index 9f5669454f..b25aadf57b 100644 --- a/cmd/relay/relay.go +++ b/cmd/relay/relay.go @@ -37,11 +37,33 @@ func printSampleUsage(progname string) { fmt.Printf("Sample usage: %s --node.feed.input.url= --chain.id= \n", progname) } +// Checks metrics and PProf flag, runs them if enabled. +// Note: they are separate so one can enable/disable them as they wish, the only +// requirement is that they can't run on the same address and port. +func startMetrics(cfg *relay.Config) error { + mAddr := fmt.Sprintf("%v:%v", cfg.MetricsServer.Addr, cfg.MetricsServer.Port) + pAddr := fmt.Sprintf("%v:%v", cfg.PprofCfg.Addr, cfg.PprofCfg.Port) + if cfg.Metrics && !metrics.Enabled { + return fmt.Errorf("metrics must be enabled via command line by adding --metrics, json config has no effect") + } + if cfg.Metrics && cfg.PProf && mAddr == pAddr { + return fmt.Errorf("metrics and pprof cannot be enabled on the same address:port: %s", mAddr) + } + if cfg.Metrics { + go metrics.CollectProcessMetrics(cfg.MetricsServer.UpdateInterval) + exp.Setup(fmt.Sprintf("%v:%v", cfg.MetricsServer.Addr, cfg.MetricsServer.Port)) + } + if cfg.PProf { + genericconf.StartPprof(pAddr) + } + return nil +} + func startup() error { ctx := context.Background() relayConfig, err := relay.ParseRelay(ctx, os.Args[1:]) - if err != nil || len(relayConfig.Node.Feed.Input.URLs) == 0 || relayConfig.Node.Feed.Input.URLs[0] == "" || relayConfig.L2.ChainId == 0 { + if err != nil || len(relayConfig.Node.Feed.Input.URL) == 0 || relayConfig.Node.Feed.Input.URL[0] == "" || relayConfig.Chain.ID == 0 { confighelpers.PrintErrorAndExit(err, printSampleUsage) } @@ -54,7 +76,7 @@ func startup() error { glogger.Verbosity(log.Lvl(relayConfig.LogLevel)) log.Root().SetHandler(glogger) - vcsRevision, vcsTime := confighelpers.GetVersion() + vcsRevision, _, vcsTime := confighelpers.GetVersion() log.Info("Running Arbitrum nitro relay", "revision", vcsRevision, "vcs.time", vcsTime) defer log.Info("Cleanly shutting down relay") @@ -68,16 +90,13 @@ func startup() error { if err != nil { return err } - err = newRelay.Start(ctx) - if err != nil { + + if err := startMetrics(relayConfig); err != nil { return err } - if relayConfig.Metrics && relayConfig.MetricsServer.Addr != "" { - go metrics.CollectProcessMetrics(relayConfig.MetricsServer.UpdateInterval) - - address := fmt.Sprintf("%v:%v", relayConfig.MetricsServer.Addr, relayConfig.MetricsServer.Port) - exp.Setup(address) + if err := newRelay.Start(ctx); err != nil { + return err } select { diff --git a/cmd/replay/db.go b/cmd/replay/db.go index 9a065789cf..7147c48f75 100644 --- a/cmd/replay/db.go +++ b/cmd/replay/db.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/wavmio" ) @@ -34,7 +35,7 @@ func (db PreimageDb) Get(key []byte) ([]byte, error) { } else { return nil, fmt.Errorf("preimage DB attempted to access non-hash key %v", hex.EncodeToString(key)) } - return wavmio.ResolvePreImage(hash) + return wavmio.ResolveTypedPreimage(arbutil.Keccak256PreimageType, hash) } func (db PreimageDb) Put(key []byte, value []byte) error { diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 501562d265..2fb13ceed8 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -26,6 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/gethhook" @@ -33,7 +34,7 @@ import ( ) func getBlockHeaderByHash(hash common.Hash) *types.Header { - enc, err := wavmio.ResolvePreImage(hash) + enc, err := wavmio.ResolveTypedPreimage(arbutil.Keccak256PreimageType, hash) if err != nil { panic(fmt.Errorf("Error resolving preimage: %w", err)) } @@ -102,7 +103,10 @@ type PreimageDASReader struct { } func (dasReader *PreimageDASReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { - return dastree.Content(hash, wavmio.ResolvePreImage) + oracle := func(hash common.Hash) ([]byte, error) { + return wavmio.ResolveTypedPreimage(arbutil.Keccak256PreimageType, hash) + } + return dastree.Content(hash, oracle) } func (dasReader *PreimageDASReader) HealthCheck(ctx context.Context) error { diff --git a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go new file mode 100644 index 0000000000..782ab3801b --- /dev/null +++ b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go @@ -0,0 +1,28 @@ +package rediscoordinator + +import ( + "context" + "errors" + "strings" + + "github.com/go-redis/redis/v8" + "github.com/offchainlabs/nitro/util/redisutil" +) + +// RedisCoordinator builds upon RedisCoordinator of redisutil with additional functionality +type RedisCoordinator struct { + *redisutil.RedisCoordinator +} + +// UpdatePriorities updates the priority list of sequencers +func (rc *RedisCoordinator) UpdatePriorities(ctx context.Context, priorities []string) error { + prioritiesString := strings.Join(priorities, ",") + err := rc.Client.Set(ctx, redisutil.PRIORITIES_KEY, prioritiesString, 0).Err() + if err != nil { + if errors.Is(err, redis.Nil) { + err = errors.New("sequencer priorities unset") + } + return err + } + return nil +} diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go new file mode 100644 index 0000000000..07bc26af2c --- /dev/null +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -0,0 +1,318 @@ +package main + +import ( + "context" + "fmt" + "os" + "strconv" + + "github.com/enescakir/emoji" + "github.com/ethereum/go-ethereum/log" + "github.com/gdamore/tcell/v2" + "github.com/offchainlabs/nitro/cmd/seq-coordinator-manager/rediscoordinator" + "github.com/offchainlabs/nitro/util/redisutil" + "github.com/rivo/tview" +) + +// Tview +var pages = tview.NewPages() +var app = tview.NewApplication() + +// Lists +var prioritySeqList = tview.NewList().ShowSecondaryText(false) +var nonPrioritySeqList = tview.NewList().ShowSecondaryText(false) + +// Forms +var addSeqForm = tview.NewForm() +var priorityForm = tview.NewForm() +var nonPriorityForm = tview.NewForm() + +// Sequencer coordinator management UI data store +type manager struct { + redisCoordinator *rediscoordinator.RedisCoordinator + prioritiesSet map[string]bool + livelinessSet map[string]bool + priorityList []string + nonPriorityList []string +} + +func main() { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + args := os.Args[1:] + if len(args) != 1 { + fmt.Fprintf(os.Stderr, "Usage: seq-coordinator-manager [redis-url]\n") + os.Exit(1) + } + redisURL := args[0] + redisutilCoordinator, err := redisutil.NewRedisCoordinator(redisURL) + if err != nil { + panic(err) + } + + seqManager := &manager{ + redisCoordinator: &rediscoordinator.RedisCoordinator{ + RedisCoordinator: redisutilCoordinator, + }, + prioritiesSet: make(map[string]bool), + livelinessSet: make(map[string]bool), + } + + seqManager.refreshAllLists(ctx) + seqManager.populateLists(ctx) + + prioritySeqList.SetSelectedFunc(func(index int, name string, second_name string, shortcut rune) { + nonPriorityForm.Clear(true) + + n := len(seqManager.priorityList) + priorities := make([]string, n) + for i := 0; i < n; i++ { + priorities[i] = strconv.Itoa(i) + } + + target := index + priorityForm.Clear(true) + priorityForm.AddDropDown("Change priority to ->", priorities, index, func(priority string, selection int) { + target = selection + }) + priorityForm.AddButton("Update", func() { + if target != index { + seqManager.updatePriorityList(ctx, index, target) + } + priorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + }) + priorityForm.AddButton("Cancel", func() { + priorityForm.Clear(true) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + }) + priorityForm.AddButton("Remove", func() { + url := seqManager.priorityList[index] + delete(seqManager.prioritiesSet, url) + seqManager.updatePriorityList(ctx, index, 0) + seqManager.priorityList = seqManager.priorityList[1:] + + priorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + }) + priorityForm.SetFocus(0) + app.SetFocus(priorityForm) + }) + + nonPrioritySeqList.SetSelectedFunc(func(index int, name string, second_name string, shortcut rune) { + priorityForm.Clear(true) + + n := len(seqManager.priorityList) + priorities := make([]string, n+1) + for i := 0; i < n+1; i++ { + priorities[i] = strconv.Itoa(i) + } + + target := index + nonPriorityForm.Clear(true) + nonPriorityForm.AddDropDown("Set priority to ->", priorities, index, func(priority string, selection int) { + target = selection + }) + nonPriorityForm.AddButton("Update", func() { + key := seqManager.nonPriorityList[index] + seqManager.priorityList = append(seqManager.priorityList, key) + seqManager.prioritiesSet[key] = true + + index = len(seqManager.priorityList) - 1 + seqManager.updatePriorityList(ctx, index, target) + + nonPriorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + if len(seqManager.nonPriorityList) > 0 { + app.SetFocus(nonPrioritySeqList) + } else { + app.SetFocus(prioritySeqList) + } + }) + nonPriorityForm.AddButton("Cancel", func() { + nonPriorityForm.Clear(true) + pages.SwitchToPage("Menu") + app.SetFocus(nonPrioritySeqList) + }) + nonPriorityForm.SetFocus(0) + app.SetFocus(nonPriorityForm) + }) + + // UI design + flex := tview.NewFlex() + priorityHeading := tview.NewTextView(). + SetTextColor(tcell.ColorYellow). + SetText("-----Priority List-----") + nonPriorityHeading := tview.NewTextView(). + SetTextColor(tcell.ColorYellow). + SetText("-----Not in priority list but online-----") + instructions := tview.NewTextView(). + SetTextColor(tcell.ColorYellow). + SetText("(r) to refresh\n(s) to save all changes\n(c) to switch between lists\n(a) to add sequencer\n(q) to quit\n(tab) to navigate") + + flex.SetDirection(tview.FlexRow). + AddItem(priorityHeading, 0, 1, false). + AddItem(tview.NewFlex(). + AddItem(prioritySeqList, 0, 2, true). + AddItem(priorityForm, 0, 3, true), 0, 12, true). + AddItem(nonPriorityHeading, 0, 1, false). + AddItem(tview.NewFlex(). + AddItem(nonPrioritySeqList, 0, 2, true). + AddItem(nonPriorityForm, 0, 3, true), 0, 12, true). + AddItem(instructions, 0, 3, false).SetBorder(true) + + flex.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Rune() == 114 { + seqManager.refreshAllLists(ctx) + priorityForm.Clear(true) + nonPriorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + } else if event.Rune() == 115 { + seqManager.pushUpdates(ctx) + priorityForm.Clear(true) + nonPriorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + } else if event.Rune() == 97 { + addSeqForm.Clear(true) + seqManager.addSeqPriorityForm(ctx) + pages.SwitchToPage("Add Sequencer") + } else if event.Rune() == 99 { + if prioritySeqList.HasFocus() || priorityForm.HasFocus() { + priorityForm.Clear(true) + app.SetFocus(nonPrioritySeqList) + } else { + nonPriorityForm.Clear(true) + app.SetFocus(prioritySeqList) + } + } else if event.Rune() == 113 { + app.Stop() + } + return event + }) + + pages.AddPage("Menu", flex, true, true) + pages.AddPage("Add Sequencer", addSeqForm, true, false) + + if err := app.SetRoot(pages, true).EnableMouse(true).Run(); err != nil { + panic(err) + } +} + +// updatePriorityList updates the list by changing the position of seq present at `index` to target +func (sm *manager) updatePriorityList(ctx context.Context, index int, target int) { + for i := index - 1; i >= target; i-- { + sm.priorityList[i], sm.priorityList[i+1] = sm.priorityList[i+1], sm.priorityList[i] + } + for i := index + 1; i <= target; i++ { + sm.priorityList[i], sm.priorityList[i-1] = sm.priorityList[i-1], sm.priorityList[i] + } + + urlList := []string{} + for url := range sm.livelinessSet { + if _, ok := sm.prioritiesSet[url]; !ok { + urlList = append(urlList, url) + } + } + sm.nonPriorityList = urlList +} + +// populateLists populates seq's in priority list and seq's that are online but not in priority +func (sm *manager) populateLists(ctx context.Context) { + prioritySeqList.Clear() + chosen, err := sm.redisCoordinator.CurrentChosenSequencer(ctx) + if err != nil { + panic(err) + } + for index, seqURL := range sm.priorityList { + sec := "" + if seqURL == chosen { + sec = fmt.Sprintf(" %vchosen", emoji.LeftArrow) + } + status := fmt.Sprintf("(%d) %v ", index, emoji.RedCircle) + if _, ok := sm.livelinessSet[seqURL]; ok { + status = fmt.Sprintf("(%d) %v ", index, emoji.GreenCircle) + } + prioritySeqList.AddItem(status+seqURL+sec, "", rune(0), nil).SetSecondaryTextColor(tcell.ColorPurple) + } + + nonPrioritySeqList.Clear() + status := fmt.Sprintf("(-) %v ", emoji.GreenCircle) + for _, seqURL := range sm.nonPriorityList { + nonPrioritySeqList.AddItem(status+seqURL, "", rune(0), nil) + } +} + +// addSeqPriorityForm returns a form with fields to add a new sequencer to priority list +func (sm *manager) addSeqPriorityForm(ctx context.Context) *tview.Form { + URL := "" + addSeqForm.AddInputField("Sequencer URL", "", 0, nil, func(url string) { + URL = url + }) + addSeqForm.AddButton("Cancel", func() { + priorityForm.Clear(true) + sm.populateLists(ctx) + pages.SwitchToPage("Menu") + }) + addSeqForm.AddButton("Add", func() { + // check if url is valid, i.e it doesnt already exist in the priority list + if _, ok := sm.prioritiesSet[URL]; !ok && URL != "" { + sm.prioritiesSet[URL] = true + sm.priorityList = append(sm.priorityList, URL) + } + sm.populateLists(ctx) + pages.SwitchToPage("Menu") + }) + return addSeqForm +} + +// pushUpdates pushes the local changes to the redis server +func (sm *manager) pushUpdates(ctx context.Context) { + err := sm.redisCoordinator.UpdatePriorities(ctx, sm.priorityList) + if err != nil { + log.Warn("Failed to push local changes to the priority list") + } + sm.refreshAllLists(ctx) +} + +// refreshAllLists gets the current status of all the lists displayed in the UI +func (sm *manager) refreshAllLists(ctx context.Context) { + priorityList, err := sm.redisCoordinator.GetPriorities(ctx) + if err != nil { + panic(err) + } + sm.priorityList = priorityList + sm.prioritiesSet = getMapfromlist(priorityList) + + livelinessList, err := sm.redisCoordinator.GetLiveliness(ctx) + if err != nil { + panic(err) + } + sm.livelinessSet = getMapfromlist(livelinessList) + + urlList := []string{} + for url := range sm.livelinessSet { + if _, ok := sm.prioritiesSet[url]; !ok { + urlList = append(urlList, url) + } + } + sm.nonPriorityList = urlList +} + +func getMapfromlist(list []string) map[string]bool { + mapping := make(map[string]bool) + for _, url := range list { + mapping[url] = true + } + return mapping +} diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go index 18a2b10f2f..85a8f4adef 100644 --- a/cmd/util/confighelpers/configuration.go +++ b/cmd/util/confighelpers/configuration.go @@ -118,12 +118,12 @@ func loadS3Variables(k *koanf.Koanf) error { var ErrVersion = errors.New("configuration: version requested") -func GetVersion() (string, string) { +func GetVersion() (string, string, string) { return genericconf.GetVersion(version, datetime, modified) } func PrintErrorAndExit(err error, usage func(string)) { - vcsRevision, vcsTime := GetVersion() + vcsRevision, _, vcsTime := GetVersion() fmt.Printf("Version: %v, time: %v\n", vcsRevision, vcsTime) if err != nil && errors.Is(err, ErrVersion) { // Already printed version, just exit @@ -138,10 +138,32 @@ func PrintErrorAndExit(err error, usage func(string)) { } } +func devFlagArgs() []string { + args := []string{ + "--init.dev-init", + "--init.dev-init-address", "0x3f1Eae7D46d88F08fc2F8ed27FCb2AB183EB2d0E", + "--node.dangerous.no-l1-listener", + "--node.parent-chain-reader.enable=false", + "--parent-chain.id=1337", + "--chain.id=412346", + "--persistent.chain", "/tmp/dev-test", + "--node.sequencer", + "--node.dangerous.no-sequencer-coordinator", + "--node.staker.enable=false", + "--init.empty=false", + "--http.port", "8547", + "--http.addr", "127.0.0.1", + } + return args +} + func BeginCommonParse(f *flag.FlagSet, args []string) (*koanf.Koanf, error) { for _, arg := range args { if arg == "--version" || arg == "-v" { return nil, ErrVersion + } else if arg == "--dev" { + args = devFlagArgs() + break } } if err := f.Parse(args); err != nil { diff --git a/cmd/util/keystore.go b/cmd/util/keystore.go index cf00973295..52a18a42b5 100644 --- a/cmd/util/keystore.go +++ b/cmd/util/keystore.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/util/signature" @@ -51,6 +52,10 @@ func OpenWallet(description string, walletConfig *genericconf.WalletConfig, chai if err != nil { return nil, nil, err } + if walletConfig.OnlyCreateKey { + log.Info(fmt.Sprintf("Wallet key created with address %s, backup wallet (%s) and remove --%s.wallet.only-create-key to run normally", account.Address.Hex(), walletConfig.Pathname, description)) + return nil, nil, nil + } var txOpts *bind.TransactOpts if chainId != nil { @@ -74,7 +79,7 @@ func openKeystore(ks *keystore.KeyStore, description string, walletConfig *gener if !creatingNew && walletConfig.OnlyCreateKey { return nil, fmt.Errorf("wallet key already created, backup key (%s) and remove --%s.wallet.only-create-key to run normally", walletConfig.Pathname, description) } - passOpt := walletConfig.Password() + passOpt := walletConfig.Pwd() var password string if passOpt != nil { password = *passOpt @@ -91,46 +96,37 @@ func openKeystore(ks *keystore.KeyStore, description string, walletConfig *gener } } - var account accounts.Account if creatingNew { - var err error - account, err = ks.NewAccount(password) - if err != nil { - return &accounts.Account{}, err + a, err := ks.NewAccount(password) + return &a, err + } + + var account accounts.Account + if walletConfig.Account == "" { + if len(ks.Accounts()) > 1 { + names := make([]string, 0, len(ks.Accounts())) + for _, acct := range ks.Accounts() { + names = append(names, acct.Address.Hex()) + } + return nil, fmt.Errorf("too many existing accounts, choose one: %s", strings.Join(names, ",")) } + account = ks.Accounts()[0] } else { - if walletConfig.Account == "" { - if len(ks.Accounts()) > 1 { - names := make([]string, 0, len(ks.Accounts())) - for _, acct := range ks.Accounts() { - names = append(names, acct.Address.Hex()) - } - return nil, fmt.Errorf("too many existing accounts, choose one: %s", strings.Join(names, ",")) - } - account = ks.Accounts()[0] - } else { - address := common.HexToAddress(walletConfig.Account) - var emptyAddress common.Address - if address == emptyAddress { - return nil, fmt.Errorf("supplied address is invalid: %s", walletConfig.Account) - } - var err error - account, err = ks.Find(accounts.Account{Address: address}) - if err != nil { - return nil, err - } + address := common.HexToAddress(walletConfig.Account) + var emptyAddress common.Address + if address == emptyAddress { + return nil, fmt.Errorf("supplied address is invalid: %s", walletConfig.Account) + } + var err error + account, err = ks.Find(accounts.Account{Address: address}) + if err != nil { + return nil, err } } - if creatingNew { - return nil, fmt.Errorf("wallet key created with address %s, backup wallet (%s) and remove --%s.wallet.only-create-key to run normally", account.Address.Hex(), walletConfig.Pathname, description) - } - - err := ks.Unlock(account, password) - if err != nil { + if err := ks.Unlock(account, password); err != nil { return nil, err } - return &account, nil } diff --git a/cmd/util/keystore_test.go b/cmd/util/keystore_test.go index 7752825291..1ee579de28 100644 --- a/cmd/util/keystore_test.go +++ b/cmd/util/keystore_test.go @@ -25,10 +25,11 @@ func openTestKeystore(description string, walletConfig *genericconf.WalletConfig } func createWallet(t *testing.T, pathname string) { + t.Helper() walletConf := genericconf.WalletConfigDefault walletConf.Pathname = pathname walletConf.OnlyCreateKey = true - walletConf.PasswordImpl = "foo" + walletConf.Password = "foo" testPassCalled := false testPass := func() (string, error) { @@ -36,13 +37,8 @@ func createWallet(t *testing.T, pathname string) { return "", nil } - _, _, err := openTestKeystore("test", &walletConf, testPass) - if err == nil { - t.Fatalf("should have failed") - } - keyCreatedError := "wallet key created" - if !strings.Contains(err.Error(), keyCreatedError) { - t.Fatalf("incorrect failure: %v, should have been %s", err, keyCreatedError) + if _, _, err := openTestKeystore("test", &walletConf, testPass); err != nil { + t.Fatalf("openTestKeystore() unexpected error: %v", err) } if testPassCalled { t.Error("password prompted for when it should not have been") @@ -73,7 +69,7 @@ func TestExistingKeystoreNoCreate(t *testing.T) { walletConf := genericconf.WalletConfigDefault walletConf.Pathname = pathname walletConf.OnlyCreateKey = true - walletConf.PasswordImpl = "foo" + walletConf.Password = "foo" testPassCalled := false testPass := func() (string, error) { @@ -110,13 +106,8 @@ func TestNewKeystorePromptPasswordTerminal(t *testing.T) { return password, nil } - _, _, err := openTestKeystore("test", &walletConf, getPass) - if err == nil { - t.Fatalf("should have failed") - } - keyCreatedError := "wallet key created" - if !strings.Contains(err.Error(), keyCreatedError) { - t.Fatalf("incorrect failure: %v, should have been %s", err, keyCreatedError) + if _, _, err := openTestKeystore("test", &walletConf, getPass); err != nil { + t.Fatalf("openTestKeystore() unexpected error: %v", err) } if !testPassCalled { t.Error("password not prompted for") @@ -167,13 +158,8 @@ func TestExistingKeystoreAccountName(t *testing.T) { return password, nil } - _, _, err := openTestKeystore("test", &walletConf, testPass) - if err == nil { - t.Fatalf("should have failed") - } - keyCreatedError := "wallet key created" - if !strings.Contains(err.Error(), keyCreatedError) { - t.Fatalf("incorrect failure: %v, should have been %s", err, keyCreatedError) + if _, _, err := openTestKeystore("test", &walletConf, testPass); err != nil { + t.Fatalf("openTestKeystore() unexpected error: %v", err) } if !testPassCalled { t.Error("password not prompted for") @@ -206,6 +192,7 @@ func TestExistingKeystoreAccountName(t *testing.T) { t.Fatal("should have failed") } invalidAddressError := "address is invalid" + keyCreatedError := "wallet key created" if !strings.Contains(err.Error(), invalidAddressError) { t.Fatalf("incorrect failure: %v, should have been %s", err, keyCreatedError) } diff --git a/consensus/consensusapi/consensusapi.go b/consensus/consensusapi/consensusapi.go index 19cdfe92e5..25f12e6026 100644 --- a/consensus/consensusapi/consensusapi.go +++ b/consensus/consensusapi/consensusapi.go @@ -20,8 +20,8 @@ func (a *ConsensusAPI) FetchBatch(ctx context.Context, batchNum uint64) ([]byte, return a.consensus.FetchBatch(batchNum).Await(ctx) } -func (a *ConsensusAPI) FindL1BatchForMessage(ctx context.Context, message arbutil.MessageIndex) (uint64, error) { - return a.consensus.FindL1BatchForMessage(message).Await(ctx) +func (a *ConsensusAPI) FindInboxBatchContainingMessage(ctx context.Context, message arbutil.MessageIndex) (uint64, error) { + return a.consensus.FindInboxBatchContainingMessage(message).Await(ctx) } func (a *ConsensusAPI) GetBatchParentChainBlock(ctx context.Context, seqNum uint64) (uint64, error) { diff --git a/consensus/consensusclient/consensusclient.go b/consensus/consensusclient/consensusclient.go index 0c7559a9a7..64f2b01486 100644 --- a/consensus/consensusclient/consensusclient.go +++ b/consensus/consensusclient/consensusclient.go @@ -53,10 +53,10 @@ func (c *Client) FetchBatch(batchNum uint64) containers.PromiseInterface[[]byte] }) } -func (c *Client) FindL1BatchForMessage(message arbutil.MessageIndex) containers.PromiseInterface[uint64] { +func (c *Client) FindInboxBatchContainingMessage(message arbutil.MessageIndex) containers.PromiseInterface[uint64] { return stopwaiter.LaunchPromiseThread[uint64](c, func(ctx context.Context) (uint64, error) { var res uint64 - err := c.client.CallContext(ctx, &res, consensus.RPCNamespace+"_findL1BatchForMessage", message) + err := c.client.CallContext(ctx, &res, consensus.RPCNamespace+"_findInboxBatchContainingMessage", message) if err != nil { return 0, convertError(err) } diff --git a/consensus/consensusserver/consensusserver.go b/consensus/consensusserver/consensusserver.go index 19cdfe92e5..25f12e6026 100644 --- a/consensus/consensusserver/consensusserver.go +++ b/consensus/consensusserver/consensusserver.go @@ -20,8 +20,8 @@ func (a *ConsensusAPI) FetchBatch(ctx context.Context, batchNum uint64) ([]byte, return a.consensus.FetchBatch(batchNum).Await(ctx) } -func (a *ConsensusAPI) FindL1BatchForMessage(ctx context.Context, message arbutil.MessageIndex) (uint64, error) { - return a.consensus.FindL1BatchForMessage(message).Await(ctx) +func (a *ConsensusAPI) FindInboxBatchContainingMessage(ctx context.Context, message arbutil.MessageIndex) (uint64, error) { + return a.consensus.FindInboxBatchContainingMessage(message).Await(ctx) } func (a *ConsensusAPI) GetBatchParentChainBlock(ctx context.Context, seqNum uint64) (uint64, error) { diff --git a/consensus/interface.go b/consensus/interface.go index b789f0891c..bbdfd8e5e7 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -15,7 +15,7 @@ var ErrSequencerInsertLockTaken = errors.New("insert lock taken") // BatchFetcher is required for any execution node type BatchFetcher interface { FetchBatch(batchNum uint64) containers.PromiseInterface[[]byte] - FindL1BatchForMessage(message arbutil.MessageIndex) containers.PromiseInterface[uint64] + FindInboxBatchContainingMessage(message arbutil.MessageIndex) containers.PromiseInterface[uint64] GetBatchParentChainBlock(seqNum uint64) containers.PromiseInterface[uint64] } diff --git a/contracts b/contracts index 1b10711dc5..695750067b 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 1b10711dc5f2eeefebc8c9f07d5c5f580534f703 +Subproject commit 695750067b2b7658556bdf61ec8cf16132d83dd0 diff --git a/das/aggregator.go b/das/aggregator.go index 7c1504d6f1..4b4571eb43 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -9,13 +9,11 @@ import ( "errors" "fmt" "math/bits" - "os" "time" flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -32,13 +30,11 @@ type AggregatorConfig struct { Enable bool `koanf:"enable"` AssumedHonest int `koanf:"assumed-honest"` Backends string `koanf:"backends"` - DumpKeyset bool `koanf:"dump-keyset"` } var DefaultAggregatorConfig = AggregatorConfig{ AssumedHonest: 0, Backends: "", - DumpKeyset: false, } var BatchToDasFailed = errors.New("unable to batch to DAS") @@ -47,7 +43,6 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultAggregatorConfig.Enable, "enable storage/retrieval of sequencer batch data from a list of RPC endpoints; this should only be used by the batch poster and not in combination with other DAS storage types") f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.String(prefix+".backends", DefaultAggregatorConfig.Backends, "JSON RPC backend configuration") - f.Bool(prefix+".dump-keyset", DefaultAggregatorConfig.DumpKeyset, "Dump the keyset encoded in hexadecimal for the backends string") } type Aggregator struct { @@ -60,7 +55,7 @@ type Aggregator struct { maxAllowedServiceStoreFailures int keysetHash [32]byte keysetBytes []byte - bpVerifier *contracts.BatchPosterVerifier + addrVerifier *contracts.AddressVerifier } type ServiceDetails struct { @@ -87,10 +82,10 @@ func NewServiceDetails(service DataAvailabilityServiceWriter, pubKey blsSignatur } func NewAggregator(ctx context.Context, config DataAvailabilityConfig, services []ServiceDetails) (*Aggregator, error) { - if config.L1NodeURL == "none" { + if config.ParentChainNodeURL == "none" { return NewAggregatorWithSeqInboxCaller(config, services, nil) } - l1client, err := GetL1Client(ctx, config.L1ConnectionAttempts, config.L1NodeURL) + l1client, err := GetL1Client(ctx, config.ParentChainConnectionAttempts, config.ParentChainNodeURL) if err != nil { return nil, err } @@ -122,51 +117,26 @@ func NewAggregatorWithSeqInboxCaller( services []ServiceDetails, seqInboxCaller *bridgegen.SequencerInboxCaller, ) (*Aggregator, error) { - var aggSignersMask uint64 - pubKeys := []blsSignatures.PublicKey{} - for _, d := range services { - if bits.OnesCount64(d.signersMask) != 1 { - return nil, fmt.Errorf("tried to configure backend DAS %v with invalid signersMask %X", d.service, d.signersMask) - } - aggSignersMask |= d.signersMask - pubKeys = append(pubKeys, d.pubKey) - } - if bits.OnesCount64(aggSignersMask) != len(services) { - return nil, errors.New("at least two signers share a mask") - } - keyset := &arbstate.DataAvailabilityKeyset{ - AssumedHonest: uint64(config.AggregatorConfig.AssumedHonest), - PubKeys: pubKeys, - } - ksBuf := bytes.NewBuffer([]byte{}) - if err := keyset.Serialize(ksBuf); err != nil { - return nil, err - } - keysetHash, err := keyset.Hash() + keysetHash, keysetBytes, err := KeysetHashFromServices(services, uint64(config.RPCAggregator.AssumedHonest)) if err != nil { return nil, err } - if config.AggregatorConfig.DumpKeyset { - fmt.Printf("Keyset: %s\n", hexutil.Encode(ksBuf.Bytes())) - fmt.Printf("KeysetHash: %s\n", hexutil.Encode(keysetHash[:])) - os.Exit(0) - } - var bpVerifier *contracts.BatchPosterVerifier + var addrVerifier *contracts.AddressVerifier if seqInboxCaller != nil { - bpVerifier = contracts.NewBatchPosterVerifier(seqInboxCaller) + addrVerifier = contracts.NewAddressVerifier(seqInboxCaller) } return &Aggregator{ - config: config.AggregatorConfig, + config: config.RPCAggregator, services: services, requestTimeout: config.RequestTimeout, - requiredServicesForStore: len(services) + 1 - config.AggregatorConfig.AssumedHonest, - maxAllowedServiceStoreFailures: config.AggregatorConfig.AssumedHonest - 1, + requiredServicesForStore: len(services) + 1 - config.RPCAggregator.AssumedHonest, + maxAllowedServiceStoreFailures: config.RPCAggregator.AssumedHonest - 1, keysetHash: keysetHash, - keysetBytes: ksBuf.Bytes(), - bpVerifier: bpVerifier, + keysetBytes: keysetBytes, + addrVerifier: addrVerifier, }, nil } @@ -196,16 +166,16 @@ type storeResponse struct { // signature is not checked, which is useful for testing. func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { log.Trace("das.Aggregator.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig)) - if a.bpVerifier != nil { + if a.addrVerifier != nil { actualSigner, err := DasRecoverSigner(message, timeout, sig) if err != nil { return nil, err } - isBatchPoster, err := a.bpVerifier.IsBatchPoster(ctx, actualSigner) + isBatchPosterOrSequencer, err := a.addrVerifier.IsBatchPosterOrSequencer(ctx, actualSigner) if err != nil { return nil, err } - if !isBatchPoster { + if !isBatchPosterOrSequencer { return nil, errors.New("store request not properly signed") } } @@ -320,6 +290,10 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, cd.aggSignersMask = aggSignersMask certDetailsChan <- cd returned = true + if a.maxAllowedServiceStoreFailures > 0 && // Ignore the case where AssumedHonest = 1, probably a testnet + storeFailures+1 > a.maxAllowedServiceStoreFailures { + log.Error("das.Aggregator: storing the batch data succeeded to enough DAS commitee members to generate the Data Availability Cert, but if one more had failed then the cert would not have been able to be generated. Look for preceding logs with \"Error from backend\"") + } } else if storeFailures > a.maxAllowedServiceStoreFailures { cd := certDetails{} cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, BatchToDasFailed) diff --git a/das/aggregator_test.go b/das/aggregator_test.go index 1b6c60c675..776af3975b 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -34,10 +34,10 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ PrivKey: privKey, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } storageServices = append(storageServices, NewMemoryBackedStorageService(ctx)) @@ -49,7 +49,7 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { backends = append(backends, *details) } - aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{AggregatorConfig: AggregatorConfig{AssumedHonest: 1}, L1NodeURL: "none"}, backends) + aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{RPCAggregator: AggregatorConfig{AssumedHonest: 1}, ParentChainNodeURL: "none"}, backends) Require(t, err) rawMsg := []byte("It's time for you to see the fnords.") @@ -187,10 +187,10 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ PrivKey: privKey, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } storageServices = append(storageServices, NewMemoryBackedStorageService(ctx)) @@ -205,9 +205,9 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { aggregator, err := NewAggregator( ctx, DataAvailabilityConfig{ - AggregatorConfig: AggregatorConfig{AssumedHonest: assumedHonest}, - L1NodeURL: "none", - RequestTimeout: time.Millisecond * 2000, + RPCAggregator: AggregatorConfig{AssumedHonest: assumedHonest}, + ParentChainNodeURL: "none", + RequestTimeout: time.Millisecond * 2000, }, backends) Require(t, err) diff --git a/das/das.go b/das/das.go index a5d5c8d560..910e511083 100644 --- a/das/das.go +++ b/das/das.go @@ -40,22 +40,22 @@ type DataAvailabilityConfig struct { RequestTimeout time.Duration `koanf:"request-timeout"` - LocalCacheConfig BigCacheConfig `koanf:"local-cache"` - RedisCacheConfig RedisConfig `koanf:"redis-cache"` + LocalCache BigCacheConfig `koanf:"local-cache"` + RedisCache RedisConfig `koanf:"redis-cache"` - LocalDBStorageConfig LocalDBStorageConfig `koanf:"local-db-storage"` - LocalFileStorageConfig LocalFileStorageConfig `koanf:"local-file-storage"` - S3StorageServiceConfig S3StorageServiceConfig `koanf:"s3-storage"` - IpfsStorageServiceConfig IpfsStorageServiceConfig `koanf:"ipfs-storage"` - RegularSyncStorageConfig RegularSyncStorageConfig `koanf:"regular-sync-storage"` + LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` + LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` + S3Storage S3StorageServiceConfig `koanf:"s3-storage"` + IpfsStorage IpfsStorageServiceConfig `koanf:"ipfs-storage"` + RegularSyncStorage RegularSyncStorageConfig `koanf:"regular-sync-storage"` - KeyConfig KeyConfig `koanf:"key"` + Key KeyConfig `koanf:"key"` - AggregatorConfig AggregatorConfig `koanf:"rpc-aggregator"` - RestfulClientAggregatorConfig RestfulClientAggregatorConfig `koanf:"rest-aggregator"` + RPCAggregator AggregatorConfig `koanf:"rpc-aggregator"` + RestAggregator RestfulClientAggregatorConfig `koanf:"rest-aggregator"` - L1NodeURL string `koanf:"parent-chain-node-url"` - L1ConnectionAttempts int `koanf:"parent-chain-connection-attempts"` + ParentChainNodeURL string `koanf:"parent-chain-node-url"` + ParentChainConnectionAttempts int `koanf:"parent-chain-connection-attempts"` SequencerInboxAddress string `koanf:"sequencer-inbox-address"` ExtraSignatureCheckingPublicKey string `koanf:"extra-signature-checking-public-key"` @@ -66,9 +66,10 @@ type DataAvailabilityConfig struct { var DefaultDataAvailabilityConfig = DataAvailabilityConfig{ RequestTimeout: 5 * time.Second, Enable: false, - RestfulClientAggregatorConfig: DefaultRestfulClientAggregatorConfig, - L1ConnectionAttempts: 15, + RestAggregator: DefaultRestfulClientAggregatorConfig, + ParentChainConnectionAttempts: 15, PanicOnError: false, + IpfsStorage: DefaultIpfsStorageServiceConfig, } func OptionalAddressFromString(s string) (*common.Address, error) { @@ -132,9 +133,9 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { IpfsStorageServiceConfigAddOptions(prefix+".ipfs-storage", f) RestfulClientAggregatorConfigAddOptions(prefix+".rest-aggregator", f) - f.String(prefix+".parent-chain-node-url", DefaultDataAvailabilityConfig.L1NodeURL, "URL for L1 node, only used in standalone daserver; when running as part of a node that node's L1 configuration is used") - f.Int(prefix+".parent-chain-connection-attempts", DefaultDataAvailabilityConfig.L1ConnectionAttempts, "layer 1 RPC connection attempts (spaced out at least 1 second per attempt, 0 to retry infinitely), only used in standalone daserver; when running as part of a node that node's L1 configuration is used") - f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "L1 address of SequencerInbox contract") + f.String(prefix+".parent-chain-node-url", DefaultDataAvailabilityConfig.ParentChainNodeURL, "URL for parent chain node, only used in standalone daserver; when running as part of a node that node's L1 configuration is used") + f.Int(prefix+".parent-chain-connection-attempts", DefaultDataAvailabilityConfig.ParentChainConnectionAttempts, "parent chain RPC connection attempts (spaced out at least 1 second per attempt, 0 to retry infinitely), only used in standalone daserver; when running as part of a node that node's parent chain configuration is used") + f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "parent chain address of SequencerInbox contract") } func Serialize(c *arbstate.DataAvailabilityCertificate) []byte { diff --git a/das/das_test.go b/das/das_test.go index 7318afac19..416744535b 100644 --- a/das/das_test.go +++ b/das/das_test.go @@ -32,18 +32,18 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ KeyDir: dbPath, }, - LocalFileStorageConfig: LocalFileStorageConfig{ + LocalFileStorage: LocalFileStorageConfig{ Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorageConfig: LocalDBStorageConfig{ + LocalDBStorage: LocalDBStorageConfig{ Enable: enableDbStorage, DataDir: dbPath, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } var syncFromStorageServicesFirst []*IterableStorageService @@ -124,18 +124,18 @@ func testDASMissingMessage(t *testing.T, storageType string) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ KeyDir: dbPath, }, - LocalFileStorageConfig: LocalFileStorageConfig{ + LocalFileStorage: LocalFileStorageConfig{ Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorageConfig: LocalDBStorageConfig{ + LocalDBStorage: LocalDBStorageConfig{ Enable: enableDbStorage, DataDir: dbPath, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } var syncFromStorageServices []*IterableStorageService diff --git a/das/db_storage_service.go b/das/db_storage_service.go index fb89b1cf30..b9af530b9e 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -20,11 +20,11 @@ import ( ) type LocalDBStorageConfig struct { - Enable bool `koanf:"enable"` - DataDir string `koanf:"data-dir"` - DiscardAfterTimeout bool `koanf:"discard-after-timeout"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + DataDir string `koanf:"data-dir"` + DiscardAfterTimeout bool `koanf:"discard-after-timeout"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultLocalDBStorageConfig = LocalDBStorageConfig{} @@ -33,8 +33,8 @@ func LocalDBStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultLocalDBStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a database on the local filesystem") f.String(prefix+".data-dir", DefaultLocalDBStorageConfig.DataDir, "directory in which to store the database") f.Bool(prefix+".discard-after-timeout", DefaultLocalDBStorageConfig.DiscardAfterTimeout, "discard data after its expiry timeout") - f.Bool(prefix+".sync-from-storage-service", DefaultLocalDBStorageConfig.SyncFromStorageServices, "enable db storage to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultLocalDBStorageConfig.SyncToStorageServices, "enable db storage to be used as a sink for regular sync storage") + f.Bool(prefix+".sync-from-storage-service", DefaultLocalDBStorageConfig.SyncFromStorageService, "enable db storage to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultLocalDBStorageConfig.SyncToStorageService, "enable db storage to be used as a sink for regular sync storage") } type DBStorageService struct { diff --git a/das/factory.go b/das/factory.go index 96df5b474d..0e6b292005 100644 --- a/das/factory.go +++ b/das/factory.go @@ -27,59 +27,59 @@ func CreatePersistentStorageService( ) (StorageService, *LifecycleManager, error) { storageServices := make([]StorageService, 0, 10) var lifecycleManager LifecycleManager - if config.LocalDBStorageConfig.Enable { - s, err := NewDBStorageService(ctx, config.LocalDBStorageConfig.DataDir, config.LocalDBStorageConfig.DiscardAfterTimeout) + if config.LocalDBStorage.Enable { + s, err := NewDBStorageService(ctx, config.LocalDBStorage.DataDir, config.LocalDBStorage.DiscardAfterTimeout) if err != nil { return nil, nil, err } - if config.LocalDBStorageConfig.SyncFromStorageServices { + if config.LocalDBStorage.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) s = iterableStorageService } - if config.LocalDBStorageConfig.SyncToStorageServices { + if config.LocalDBStorage.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, s) } lifecycleManager.Register(s) storageServices = append(storageServices, s) } - if config.LocalFileStorageConfig.Enable { - s, err := NewLocalFileStorageService(config.LocalFileStorageConfig.DataDir) + if config.LocalFileStorage.Enable { + s, err := NewLocalFileStorageService(config.LocalFileStorage.DataDir) if err != nil { return nil, nil, err } - if config.LocalFileStorageConfig.SyncFromStorageServices { + if config.LocalFileStorage.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) s = iterableStorageService } - if config.LocalFileStorageConfig.SyncToStorageServices { + if config.LocalFileStorage.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, s) } lifecycleManager.Register(s) storageServices = append(storageServices, s) } - if config.S3StorageServiceConfig.Enable { - s, err := NewS3StorageService(config.S3StorageServiceConfig) + if config.S3Storage.Enable { + s, err := NewS3StorageService(config.S3Storage) if err != nil { return nil, nil, err } lifecycleManager.Register(s) - if config.S3StorageServiceConfig.SyncFromStorageServices { + if config.S3Storage.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) s = iterableStorageService } - if config.S3StorageServiceConfig.SyncToStorageServices { + if config.S3Storage.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, s) } storageServices = append(storageServices, s) } - if config.IpfsStorageServiceConfig.Enable { - s, err := NewIpfsStorageService(ctx, config.IpfsStorageServiceConfig) + if config.IpfsStorage.Enable { + s, err := NewIpfsStorageService(ctx, config.IpfsStorage) if err != nil { return nil, nil, err } @@ -114,23 +114,23 @@ func WrapStorageWithCache( // Enable caches, Redis and (local) BigCache. Local is the outermost, so it will be tried first. var err error - if config.RedisCacheConfig.Enable { - storageService, err = NewRedisStorageService(config.RedisCacheConfig, storageService) + if config.RedisCache.Enable { + storageService, err = NewRedisStorageService(config.RedisCache, storageService) lifecycleManager.Register(storageService) if err != nil { return nil, err } - if config.RedisCacheConfig.SyncFromStorageServices { + if config.RedisCache.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(storageService)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) storageService = iterableStorageService } - if config.RedisCacheConfig.SyncToStorageServices { + if config.RedisCache.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, storageService) } } - if config.LocalCacheConfig.Enable { - storageService, err = NewBigCacheStorageService(config.LocalCacheConfig, storageService) + if config.LocalCache.Enable { + storageService, err = NewBigCacheStorageService(config.LocalCache, storageService) lifecycleManager.Register(storageService) if err != nil { return nil, err @@ -151,11 +151,11 @@ func CreateBatchPosterDAS( } // Check config requirements - if !config.AggregatorConfig.Enable || !config.RestfulClientAggregatorConfig.Enable { + if !config.RPCAggregator.Enable || !config.RestAggregator.Enable { return nil, nil, nil, errors.New("--node.data-availability.rpc-aggregator.enable and rest-aggregator.enable must be set when running a Batch Poster in AnyTrust mode") } - if config.IpfsStorageServiceConfig.Enable { + if config.IpfsStorage.Enable { return nil, nil, nil, errors.New("--node.data-availability.ipfs-storage.enable may not be set when running a Nitro AnyTrust node in Batch Poster mode") } // Done checking config requirements @@ -173,7 +173,7 @@ func CreateBatchPosterDAS( } } - restAgg, err := NewRestfulClientAggregator(ctx, &config.RestfulClientAggregatorConfig) + restAgg, err := NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { return nil, nil, nil, err } @@ -200,10 +200,10 @@ func CreateDAComponentsForDaserver( } // Check config requirements - if !config.LocalDBStorageConfig.Enable && - !config.LocalFileStorageConfig.Enable && - !config.S3StorageServiceConfig.Enable && - !config.IpfsStorageServiceConfig.Enable { + if !config.LocalDBStorage.Enable && + !config.LocalFileStorage.Enable && + !config.S3Storage.Enable && + !config.IpfsStorage.Enable { return nil, nil, nil, nil, errors.New("At least one of --data-availability.(local-db-storage|local-file-storage|s3-storage|ipfs-storage) must be enabled.") } // Done checking config requirements @@ -222,15 +222,15 @@ func CreateDAComponentsForDaserver( // The REST aggregator is used as the fallback if requested data is not present // in the storage service. - if config.RestfulClientAggregatorConfig.Enable { - restAgg, err := NewRestfulClientAggregator(ctx, &config.RestfulClientAggregatorConfig) + if config.RestAggregator.Enable { + restAgg, err := NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { return nil, nil, nil, nil, err } restAgg.Start(ctx) dasLifecycleManager.Register(restAgg) - syncConf := &config.RestfulClientAggregatorConfig.SyncToStorageConfig + syncConf := &config.RestAggregator.SyncToStorage var retentionPeriodSeconds uint64 if uint64(syncConf.RetentionPeriod) == math.MaxUint64 { retentionPeriodSeconds = math.MaxUint64 @@ -266,7 +266,7 @@ func CreateDAComponentsForDaserver( var daReader DataAvailabilityServiceReader = storageService var daHealthChecker DataAvailabilityServiceHealthChecker = storageService - if config.KeyConfig.KeyDir != "" || config.KeyConfig.PrivKey != "" { + if config.Key.KeyDir != "" || config.Key.PrivKey != "" { var seqInboxCaller *bridgegen.SequencerInboxCaller if seqInboxAddress != nil { seqInbox, err := bridgegen.NewSequencerInbox(*seqInboxAddress, (*l1Reader).Client()) @@ -280,7 +280,7 @@ func CreateDAComponentsForDaserver( seqInboxCaller = nil } - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() if err != nil { return nil, nil, nil, nil, err } @@ -296,8 +296,8 @@ func CreateDAComponentsForDaserver( } } - if config.RegularSyncStorageConfig.Enable && len(syncFromStorageServices) != 0 && len(syncToStorageServices) != 0 { - regularlySyncStorage := NewRegularlySyncStorage(syncFromStorageServices, syncToStorageServices, config.RegularSyncStorageConfig) + if config.RegularSyncStorage.Enable && len(syncFromStorageServices) != 0 && len(syncToStorageServices) != 0 { + regularlySyncStorage := NewRegularlySyncStorage(syncFromStorageServices, syncToStorageServices, config.RegularSyncStorage) regularlySyncStorage.Start(ctx) } @@ -322,15 +322,15 @@ func CreateDAReaderForNode( } // Check config requirements - if config.AggregatorConfig.Enable { + if config.RPCAggregator.Enable { return nil, nil, errors.New("node.data-availability.rpc-aggregator is only for Batch Poster mode") } - if !config.RestfulClientAggregatorConfig.Enable && !config.IpfsStorageServiceConfig.Enable { + if !config.RestAggregator.Enable && !config.IpfsStorage.Enable { return nil, nil, fmt.Errorf("--node.data-availability.enable was set but neither of --node.data-availability.(rest-aggregator|ipfs-storage) were enabled. When running a Nitro Anytrust node in non-Batch Poster mode, some way to get the batch data is required.") } - if config.RestfulClientAggregatorConfig.SyncToStorageConfig.Eager { + if config.RestAggregator.SyncToStorage.Eager { return nil, nil, errors.New("--node.data-availability.rest-aggregator.sync-to-storage.eager can't be used with a Nitro node, only lazy syncing can be used.") } // Done checking config requirements @@ -341,9 +341,9 @@ func CreateDAReaderForNode( } var daReader DataAvailabilityServiceReader - if config.RestfulClientAggregatorConfig.Enable { + if config.RestAggregator.Enable { var restAgg *SimpleDASReaderAggregator - restAgg, err = NewRestfulClientAggregator(ctx, &config.RestfulClientAggregatorConfig) + restAgg, err = NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { return nil, nil, err } @@ -351,7 +351,7 @@ func CreateDAReaderForNode( dasLifecycleManager.Register(restAgg) if storageService != nil { - syncConf := &config.RestfulClientAggregatorConfig.SyncToStorageConfig + syncConf := &config.RestAggregator.SyncToStorage var retentionPeriodSeconds uint64 if uint64(syncConf.RetentionPeriod) == math.MaxUint64 { retentionPeriodSeconds = math.MaxUint64 diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 9fd831f480..5fa5306e39 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -22,10 +22,10 @@ import ( ) type LocalFileStorageConfig struct { - Enable bool `koanf:"enable"` - DataDir string `koanf:"data-dir"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + DataDir string `koanf:"data-dir"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultLocalFileStorageConfig = LocalFileStorageConfig{ @@ -35,8 +35,8 @@ var DefaultLocalFileStorageConfig = LocalFileStorageConfig{ func LocalFileStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultLocalFileStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a directory of files, one per batch") f.String(prefix+".data-dir", DefaultLocalFileStorageConfig.DataDir, "local data directory") - f.Bool(prefix+".sync-from-storage-service", DefaultLocalFileStorageConfig.SyncFromStorageServices, "enable local storage to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultLocalFileStorageConfig.SyncToStorageServices, "enable local storage to be used as a sink for regular sync storage") + f.Bool(prefix+".sync-from-storage-service", DefaultLocalFileStorageConfig.SyncFromStorageService, "enable local storage to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultLocalFileStorageConfig.SyncToStorageService, "enable local storage to be used as a sink for regular sync storage") } type LocalFileStorageService struct { diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index a005c70a44..3449a8e78c 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -24,27 +24,27 @@ import ( ) type RedisConfig struct { - Enable bool `koanf:"enable"` - RedisUrl string `koanf:"redis-url"` - Expiration time.Duration `koanf:"redis-expiration"` - KeyConfig string `koanf:"redis-key-config"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + Url string `koanf:"url"` + Expiration time.Duration `koanf:"expiration"` + KeyConfig string `koanf:"key-config"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultRedisConfig = RedisConfig{ - RedisUrl: "", + Url: "", Expiration: time.Hour, KeyConfig: "", } func RedisConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultRedisConfig.Enable, "enable Redis caching of sequencer batch data") - f.String(prefix+".redis-url", DefaultRedisConfig.RedisUrl, "Redis url") - f.Duration(prefix+".redis-expiration", DefaultRedisConfig.Expiration, "Redis expiration") - f.String(prefix+".redis-key-config", DefaultRedisConfig.KeyConfig, "Redis key config") - f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageServices, "enable Redis to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageServices, "enable Redis to be used as a sink for regular sync storage") + f.String(prefix+".url", DefaultRedisConfig.Url, "Redis url") + f.Duration(prefix+".expiration", DefaultRedisConfig.Expiration, "Redis expiration") + f.String(prefix+".key-config", DefaultRedisConfig.KeyConfig, "Redis key config") + f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageService, "enable Redis to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageService, "enable Redis to be used as a sink for regular sync storage") } type RedisStorageService struct { @@ -55,7 +55,7 @@ type RedisStorageService struct { } func NewRedisStorageService(redisConfig RedisConfig, baseStorageService StorageService) (StorageService, error) { - redisClient, err := redisutil.RedisClientFromURL(redisConfig.RedisUrl) + redisClient, err := redisutil.RedisClientFromURL(redisConfig.Url) if err != nil { return nil, err } diff --git a/das/redis_storage_service_test.go b/das/redis_storage_service_test.go index 2481358cf6..55f3ecd82c 100644 --- a/das/redis_storage_service_test.go +++ b/das/redis_storage_service_test.go @@ -23,7 +23,7 @@ func TestRedisStorageService(t *testing.T) { redisService, err := NewRedisStorageService( RedisConfig{ Enable: true, - RedisUrl: "redis://" + server.Addr(), + Url: "redis://" + server.Addr(), Expiration: time.Hour, KeyConfig: "b561f5d5d98debc783aa8a1472d67ec3bcd532a1c8d95e5cb23caa70c649f7c9", }, baseStorageService) @@ -75,7 +75,7 @@ func TestRedisStorageService(t *testing.T) { redisServiceWithEmptyBaseStorage, err := NewRedisStorageService( RedisConfig{ Enable: true, - RedisUrl: "redis://" + server.Addr(), + Url: "redis://" + server.Addr(), Expiration: time.Hour, KeyConfig: "b561f5d5d98debc783aa8a1472d67ec3bcd532a1c8d95e5cb23caa70c649f7c9", }, emptyBaseStorageService) diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index ff5f4aedb8..134c4229c8 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -4,10 +4,16 @@ package das import ( + "bytes" "context" "encoding/json" + "errors" + "fmt" + "math/bits" "net/url" + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/metricsutil" @@ -22,7 +28,7 @@ type BackendConfig struct { } func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggregator, error) { - services, err := setUpServices(config) + services, err := ParseServices(config.RPCAggregator) if err != nil { return nil, err } @@ -30,7 +36,7 @@ func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggr } func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil.L1Interface, seqInboxAddress common.Address) (*Aggregator, error) { - services, err := setUpServices(config) + services, err := ParseServices(config.RPCAggregator) if err != nil { return nil, err } @@ -38,16 +44,16 @@ func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil. } func NewRPCAggregatorWithSeqInboxCaller(config DataAvailabilityConfig, seqInboxCaller *bridgegen.SequencerInboxCaller) (*Aggregator, error) { - services, err := setUpServices(config) + services, err := ParseServices(config.RPCAggregator) if err != nil { return nil, err } return NewAggregatorWithSeqInboxCaller(config, services, seqInboxCaller) } -func setUpServices(config DataAvailabilityConfig) ([]ServiceDetails, error) { +func ParseServices(config AggregatorConfig) ([]ServiceDetails, error) { var cs []BackendConfig - err := json.Unmarshal([]byte(config.AggregatorConfig.Backends), &cs) + err := json.Unmarshal([]byte(config.Backends), &cs) if err != nil { return nil, err } @@ -81,3 +87,33 @@ func setUpServices(config DataAvailabilityConfig) ([]ServiceDetails, error) { return services, nil } + +func KeysetHashFromServices(services []ServiceDetails, assumedHonest uint64) ([32]byte, []byte, error) { + var aggSignersMask uint64 + pubKeys := []blsSignatures.PublicKey{} + for _, d := range services { + if bits.OnesCount64(d.signersMask) != 1 { + return [32]byte{}, nil, fmt.Errorf("tried to configure backend DAS %v with invalid signersMask %X", d.service, d.signersMask) + } + aggSignersMask |= d.signersMask + pubKeys = append(pubKeys, d.pubKey) + } + if bits.OnesCount64(aggSignersMask) != len(services) { + return [32]byte{}, nil, errors.New("at least two signers share a mask") + } + + keyset := &arbstate.DataAvailabilityKeyset{ + AssumedHonest: uint64(assumedHonest), + PubKeys: pubKeys, + } + ksBuf := bytes.NewBuffer([]byte{}) + if err := keyset.Serialize(ksBuf); err != nil { + return [32]byte{}, nil, err + } + keysetHash, err := keyset.Hash() + if err != nil { + return [32]byte{}, nil, err + } + + return keysetHash, ksBuf.Bytes(), nil +} diff --git a/das/rpc_test.go b/das/rpc_test.go index 6dcb8457c2..044ba597be 100644 --- a/das/rpc_test.go +++ b/das/rpc_test.go @@ -35,15 +35,15 @@ func TestRPC(t *testing.T) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ KeyDir: keyDir, }, - LocalFileStorageConfig: LocalFileStorageConfig{ + LocalFileStorage: LocalFileStorageConfig{ Enable: true, DataDir: dataDir, }, - L1NodeURL: "none", - RequestTimeout: 5 * time.Second, + ParentChainNodeURL: "none", + RequestTimeout: 5 * time.Second, } var syncFromStorageServices []*IterableStorageService @@ -51,7 +51,7 @@ func TestRPC(t *testing.T) { storageService, lifecycleManager, err := CreatePersistentStorageService(ctx, &config, &syncFromStorageServices, &syncToStorageServices) testhelpers.RequireImpl(t, err) defer lifecycleManager.StopAndWaitUntil(time.Second) - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() testhelpers.RequireImpl(t, err) localDas, err := NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, nil, storageService, "") testhelpers.RequireImpl(t, err) @@ -71,7 +71,7 @@ func TestRPC(t *testing.T) { backendsJsonByte, err := json.Marshal([]BackendConfig{beConfig}) testhelpers.RequireImpl(t, err) aggConf := DataAvailabilityConfig{ - AggregatorConfig: AggregatorConfig{ + RPCAggregator: AggregatorConfig{ AssumedHonest: 1, Backends: string(backendsJsonByte), }, diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index 18a9ce1475..1a3ae94114 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -34,15 +34,15 @@ type S3Downloader interface { } type S3StorageServiceConfig struct { - Enable bool `koanf:"enable"` - AccessKey string `koanf:"access-key"` - Bucket string `koanf:"bucket"` - ObjectPrefix string `koanf:"object-prefix"` - Region string `koanf:"region"` - SecretKey string `koanf:"secret-key"` - DiscardAfterTimeout bool `koanf:"discard-after-timeout"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + AccessKey string `koanf:"access-key"` + Bucket string `koanf:"bucket"` + ObjectPrefix string `koanf:"object-prefix"` + Region string `koanf:"region"` + SecretKey string `koanf:"secret-key"` + DiscardAfterTimeout bool `koanf:"discard-after-timeout"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultS3StorageServiceConfig = S3StorageServiceConfig{} @@ -55,8 +55,8 @@ func S3ConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".region", DefaultS3StorageServiceConfig.Region, "S3 region") f.String(prefix+".secret-key", DefaultS3StorageServiceConfig.SecretKey, "S3 secret key") f.Bool(prefix+".discard-after-timeout", DefaultS3StorageServiceConfig.DiscardAfterTimeout, "discard data after its expiry timeout") - f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageServices, "enable s3 to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageServices, "enable s3 to be used as a sink for regular sync storage") + f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageService, "enable s3 to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageService, "enable s3 to be used as a sink for regular sync storage") } type S3StorageService struct { diff --git a/das/sign_after_store_das_writer.go b/das/sign_after_store_das_writer.go index 1a244ab640..50c4ee9aee 100644 --- a/das/sign_after_store_das_writer.go +++ b/das/sign_after_store_das_writer.go @@ -78,7 +78,7 @@ type SignAfterStoreDASWriter struct { keysetHash [32]byte keysetBytes []byte storageService StorageService - bpVerifier *contracts.BatchPosterVerifier + addrVerifier *contracts.AddressVerifier // Extra batch poster verifier, for local installations to have their // own way of testing Stores. @@ -86,14 +86,14 @@ type SignAfterStoreDASWriter struct { } func NewSignAfterStoreDASWriter(ctx context.Context, config DataAvailabilityConfig, storageService StorageService) (*SignAfterStoreDASWriter, error) { - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() if err != nil { return nil, err } - if config.L1NodeURL == "none" { + if config.ParentChainNodeURL == "none" { return NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, nil, storageService, config.ExtraSignatureCheckingPublicKey) } - l1client, err := GetL1Client(ctx, config.L1ConnectionAttempts, config.L1NodeURL) + l1client, err := GetL1Client(ctx, config.ParentChainConnectionAttempts, config.ParentChainNodeURL) if err != nil { return nil, err } @@ -136,9 +136,9 @@ func NewSignAfterStoreDASWriterWithSeqInboxCaller( return nil, err } - var bpVerifier *contracts.BatchPosterVerifier + var addrVerifier *contracts.AddressVerifier if seqInboxCaller != nil { - bpVerifier = contracts.NewBatchPosterVerifier(seqInboxCaller) + addrVerifier = contracts.NewAddressVerifier(seqInboxCaller) } var extraBpVerifier func(message []byte, timeout uint64, sig []byte) bool @@ -173,7 +173,7 @@ func NewSignAfterStoreDASWriterWithSeqInboxCaller( keysetHash: ksHash, keysetBytes: ksBuf.Bytes(), storageService: storageService, - bpVerifier: bpVerifier, + addrVerifier: addrVerifier, extraBpVerifier: extraBpVerifier, }, nil } @@ -187,16 +187,16 @@ func (d *SignAfterStoreDASWriter) Store( verified = d.extraBpVerifier(message, timeout, sig) } - if !verified && d.bpVerifier != nil { + if !verified && d.addrVerifier != nil { actualSigner, err := DasRecoverSigner(message, timeout, sig) if err != nil { return nil, err } - isBatchPoster, err := d.bpVerifier.IsBatchPoster(ctx, actualSigner) + isBatchPosterOrSequencer, err := d.addrVerifier.IsBatchPosterOrSequencer(ctx, actualSigner) if err != nil { return nil, err } - if !isBatchPoster { + if !isBatchPosterOrSequencer { return nil, errors.New("store request not properly signed") } } diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index b2499b680a..eb82a33837 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -25,28 +25,28 @@ import ( // RestfulDasClients, so the configuration and factory function are given more // specific names. type RestfulClientAggregatorConfig struct { - Enable bool `koanf:"enable"` - Urls []string `koanf:"urls"` - OnlineUrlList string `koanf:"online-url-list"` - OnlineUrlListFetchInterval time.Duration `koanf:"online-url-list-fetch-interval"` - Strategy string `koanf:"strategy"` - StrategyUpdateInterval time.Duration `koanf:"strategy-update-interval"` - WaitBeforeTryNext time.Duration `koanf:"wait-before-try-next"` - MaxPerEndpointStats int `koanf:"max-per-endpoint-stats"` - SimpleExploreExploitStrategyConfig SimpleExploreExploitStrategyConfig `koanf:"simple-explore-exploit-strategy"` - SyncToStorageConfig SyncToStorageConfig `koanf:"sync-to-storage"` + Enable bool `koanf:"enable"` + Urls []string `koanf:"urls"` + OnlineUrlList string `koanf:"online-url-list"` + OnlineUrlListFetchInterval time.Duration `koanf:"online-url-list-fetch-interval"` + Strategy string `koanf:"strategy"` + StrategyUpdateInterval time.Duration `koanf:"strategy-update-interval"` + WaitBeforeTryNext time.Duration `koanf:"wait-before-try-next"` + MaxPerEndpointStats int `koanf:"max-per-endpoint-stats"` + SimpleExploreExploitStrategy SimpleExploreExploitStrategyConfig `koanf:"simple-explore-exploit-strategy"` + SyncToStorage SyncToStorageConfig `koanf:"sync-to-storage"` } var DefaultRestfulClientAggregatorConfig = RestfulClientAggregatorConfig{ - Urls: []string{}, - OnlineUrlList: "", - OnlineUrlListFetchInterval: 1 * time.Hour, - Strategy: "simple-explore-exploit", - StrategyUpdateInterval: 10 * time.Second, - WaitBeforeTryNext: 2 * time.Second, - MaxPerEndpointStats: 20, - SimpleExploreExploitStrategyConfig: DefaultSimpleExploreExploitStrategyConfig, - SyncToStorageConfig: DefaultSyncToStorageConfig, + Urls: []string{}, + OnlineUrlList: "", + OnlineUrlListFetchInterval: 1 * time.Hour, + Strategy: "simple-explore-exploit", + StrategyUpdateInterval: 10 * time.Second, + WaitBeforeTryNext: 2 * time.Second, + MaxPerEndpointStats: 20, + SimpleExploreExploitStrategy: DefaultSimpleExploreExploitStrategyConfig, + SyncToStorage: DefaultSyncToStorageConfig, } type SimpleExploreExploitStrategyConfig struct { @@ -120,8 +120,8 @@ func NewRestfulClientAggregator(ctx context.Context, config *RestfulClientAggreg switch strings.ToLower(config.Strategy) { case "simple-explore-exploit": a.strategy = &simpleExploreExploitStrategy{ - exploreIterations: uint32(config.SimpleExploreExploitStrategyConfig.ExploreIterations), - exploitIterations: uint32(config.SimpleExploreExploitStrategyConfig.ExploitIterations), + exploreIterations: uint32(config.SimpleExploreExploitStrategy.ExploreIterations), + exploitIterations: uint32(config.SimpleExploreExploitStrategy.ExploitIterations), } case "testing-sequential": a.strategy = &testingSequentialStrategy{} diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 8af39d7d3a..91f2e522a7 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -57,32 +57,32 @@ func init() { } type SyncToStorageConfig struct { - CheckAlreadyExists bool `koanf:"check-already-exists"` - Eager bool `koanf:"eager"` - EagerLowerBoundBlock uint64 `koanf:"eager-lower-bound-block"` - RetentionPeriod time.Duration `koanf:"retention-period"` - DelayOnError time.Duration `koanf:"delay-on-error"` - IgnoreWriteErrors bool `koanf:"ignore-write-errors"` - L1BlocksPerRead uint64 `koanf:"parent-chain-blocks-per-read"` - StateDir string `koanf:"state-dir"` + CheckAlreadyExists bool `koanf:"check-already-exists"` + Eager bool `koanf:"eager"` + EagerLowerBoundBlock uint64 `koanf:"eager-lower-bound-block"` + RetentionPeriod time.Duration `koanf:"retention-period"` + DelayOnError time.Duration `koanf:"delay-on-error"` + IgnoreWriteErrors bool `koanf:"ignore-write-errors"` + ParentChainBlocksPerRead uint64 `koanf:"parent-chain-blocks-per-read"` + StateDir string `koanf:"state-dir"` } var DefaultSyncToStorageConfig = SyncToStorageConfig{ - CheckAlreadyExists: true, - Eager: false, - EagerLowerBoundBlock: 0, - RetentionPeriod: time.Duration(math.MaxInt64), - DelayOnError: time.Second, - IgnoreWriteErrors: true, - L1BlocksPerRead: 100, - StateDir: "", + CheckAlreadyExists: true, + Eager: false, + EagerLowerBoundBlock: 0, + RetentionPeriod: time.Duration(math.MaxInt64), + DelayOnError: time.Second, + IgnoreWriteErrors: true, + ParentChainBlocksPerRead: 100, + StateDir: "", } func SyncToStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".check-already-exists", DefaultSyncToStorageConfig.CheckAlreadyExists, "check if the data already exists in this DAS's storage. Must be disabled for fast sync with an IPFS backend") f.Bool(prefix+".eager", DefaultSyncToStorageConfig.Eager, "eagerly sync batch data to this DAS's storage from the rest endpoints, using L1 as the index of batch data hashes; otherwise only sync lazily") f.Uint64(prefix+".eager-lower-bound-block", DefaultSyncToStorageConfig.EagerLowerBoundBlock, "when eagerly syncing, start indexing forward from this L1 block. Only used if there is no sync state") - f.Uint64(prefix+".parent-chain-blocks-per-read", DefaultSyncToStorageConfig.L1BlocksPerRead, "when eagerly syncing, max l1 blocks to read per poll") + f.Uint64(prefix+".parent-chain-blocks-per-read", DefaultSyncToStorageConfig.ParentChainBlocksPerRead, "when eagerly syncing, max l1 blocks to read per poll") f.Duration(prefix+".retention-period", DefaultSyncToStorageConfig.RetentionPeriod, "period to retain synced data (defaults to forever)") f.Duration(prefix+".delay-on-error", DefaultSyncToStorageConfig.DelayOnError, "time to wait if encountered an error before retrying") f.Bool(prefix+".ignore-write-errors", DefaultSyncToStorageConfig.IgnoreWriteErrors, "log only on failures to write when syncing; otherwise treat it as an error") @@ -212,22 +212,24 @@ func (s *l1SyncService) processBatchDelivered(ctx context.Context, batchDelivere binary.BigEndian.PutUint64(header[32:40], deliveredEvent.AfterDelayedMessagesRead.Uint64()) data = append(header, data...) - preimages := make(map[common.Hash][]byte) + preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) if _, err = arbstate.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimages, arbstate.KeysetValidate); err != nil { log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data) return err } - for hash, contents := range preimages { - var err error - if s.config.CheckAlreadyExists { - _, err = s.syncTo.GetByHash(ctx, hash) - } - if err == nil || errors.Is(err, ErrNotFound) { - if err := s.syncTo.Put(ctx, contents, storeUntil); err != nil { + for _, preimages := range preimages { + for hash, contents := range preimages { + var err error + if s.config.CheckAlreadyExists { + _, err = s.syncTo.GetByHash(ctx, hash) + } + if err == nil || errors.Is(err, ErrNotFound) { + if err := s.syncTo.Put(ctx, contents, storeUntil); err != nil { + return err + } + } else { return err } - } else { - return err } } seqNumber := deliveredEvent.BatchSequenceNumber @@ -346,9 +348,9 @@ func (s *l1SyncService) readMore(ctx context.Context) error { } } } - if highBlockNr > s.lowBlockNr+s.config.L1BlocksPerRead { + if highBlockNr > s.lowBlockNr+s.config.ParentChainBlocksPerRead { s.catchingUp = true - highBlockNr = s.lowBlockNr + s.config.L1BlocksPerRead + highBlockNr = s.lowBlockNr + s.config.ParentChainBlocksPerRead if finalizedHighBlockNr > highBlockNr { finalizedHighBlockNr = highBlockNr } @@ -370,20 +372,25 @@ func (s *l1SyncService) readMore(ctx context.Context) error { func (s *l1SyncService) mainThread(ctx context.Context) { headerChan, unsubscribe := s.l1Reader.Subscribe(false) defer unsubscribe() + errCount := 0 for { err := s.readMore(ctx) if err != nil { if ctx.Err() != nil { return } - log.Error("error trying to sync from L1", "err", err) + errCount++ + if errCount > 5 { + log.Error("error trying to sync from L1", "err", err) + } select { case <-ctx.Done(): return - case <-time.After(s.config.DelayOnError): + case <-time.After(s.config.DelayOnError * time.Duration(errCount)): } continue } + errCount = 0 if s.catchingUp { // we're behind. Don't wait. continue diff --git a/execution/gethexec/blockchain.go b/execution/gethexec/blockchain.go index 8b7685669a..9e1ee0c30a 100644 --- a/execution/gethexec/blockchain.go +++ b/execution/gethexec/blockchain.go @@ -26,39 +26,45 @@ import ( ) type CachingConfig struct { - Archive bool `koanf:"archive"` - BlockCount uint64 `koanf:"block-count"` - BlockAge time.Duration `koanf:"block-age"` - TrieTimeLimit time.Duration `koanf:"trie-time-limit"` - TrieDirtyCache int `koanf:"trie-dirty-cache"` - TrieCleanCache int `koanf:"trie-clean-cache"` - SnapshotCache int `koanf:"snapshot-cache"` - DatabaseCache int `koanf:"database-cache"` - SnapshotRestoreMaxGas uint64 `koanf:"snapshot-restore-gas-limit"` + Archive bool `koanf:"archive"` + BlockCount uint64 `koanf:"block-count"` + BlockAge time.Duration `koanf:"block-age"` + TrieTimeLimit time.Duration `koanf:"trie-time-limit"` + TrieDirtyCache int `koanf:"trie-dirty-cache"` + TrieCleanCache int `koanf:"trie-clean-cache"` + SnapshotCache int `koanf:"snapshot-cache"` + DatabaseCache int `koanf:"database-cache"` + SnapshotRestoreGasLimit uint64 `koanf:"snapshot-restore-gas-limit"` + MaxNumberOfBlocksToSkipStateSaving uint32 `koanf:"max-number-of-blocks-to-skip-state-saving"` + MaxAmountOfGasToSkipStateSaving uint64 `koanf:"max-amount-of-gas-to-skip-state-saving"` } func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".archive", DefaultCachingConfig.Archive, "retain past block state") f.Uint64(prefix+".block-count", DefaultCachingConfig.BlockCount, "minimum number of recent blocks to keep in memory") - f.Duration(prefix+".block-age", DefaultCachingConfig.BlockAge, "minimum age a block must be to be pruned") + f.Duration(prefix+".block-age", DefaultCachingConfig.BlockAge, "minimum age of recent blocks to keep in memory") f.Duration(prefix+".trie-time-limit", DefaultCachingConfig.TrieTimeLimit, "maximum block processing time before trie is written to hard-disk") f.Int(prefix+".trie-dirty-cache", DefaultCachingConfig.TrieDirtyCache, "amount of memory in megabytes to cache state diffs against disk with (larger cache lowers database growth)") f.Int(prefix+".trie-clean-cache", DefaultCachingConfig.TrieCleanCache, "amount of memory in megabytes to cache unchanged state trie nodes with") f.Int(prefix+".snapshot-cache", DefaultCachingConfig.SnapshotCache, "amount of memory in megabytes to cache state snapshots with") f.Int(prefix+".database-cache", DefaultCachingConfig.DatabaseCache, "amount of memory in megabytes to cache database contents with") - f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreMaxGas, "maximum gas rolled back to recover snapshot") + f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreGasLimit, "maximum gas rolled back to recover snapshot") + f.Uint32(prefix+".max-number-of-blocks-to-skip-state-saving", DefaultCachingConfig.MaxNumberOfBlocksToSkipStateSaving, "maximum number of blocks to skip state saving to persistent storage (archive node only) -- warning: this option seems to cause issues") + f.Uint64(prefix+".max-amount-of-gas-to-skip-state-saving", DefaultCachingConfig.MaxAmountOfGasToSkipStateSaving, "maximum amount of gas in blocks to skip saving state to Persistent storage (archive node only) -- warning: this option seems to cause issues") } var DefaultCachingConfig = CachingConfig{ - Archive: false, - BlockCount: 128, - BlockAge: 30 * time.Minute, - TrieTimeLimit: time.Hour, - TrieDirtyCache: 1024, - TrieCleanCache: 600, - SnapshotCache: 400, - DatabaseCache: 2048, - SnapshotRestoreMaxGas: 300_000_000_000, + Archive: false, + BlockCount: 128, + BlockAge: 30 * time.Minute, + TrieTimeLimit: time.Hour, + TrieDirtyCache: 1024, + TrieCleanCache: 600, + SnapshotCache: 400, + DatabaseCache: 2048, + SnapshotRestoreGasLimit: 300_000_000_000, + MaxNumberOfBlocksToSkipStateSaving: 0, + MaxAmountOfGasToSkipStateSaving: 0, } func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core.CacheConfig { @@ -68,18 +74,20 @@ func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core } return &core.CacheConfig{ - TrieCleanLimit: cachingConfig.TrieCleanCache, - TrieCleanJournal: stack.ResolvePath(baseConf.TrieCleanCacheJournal), - TrieCleanRejournal: baseConf.TrieCleanCacheRejournal, - TrieCleanNoPrefetch: baseConf.NoPrefetch, - TrieDirtyLimit: cachingConfig.TrieDirtyCache, - TrieDirtyDisabled: cachingConfig.Archive, - TrieTimeLimit: cachingConfig.TrieTimeLimit, - TriesInMemory: cachingConfig.BlockCount, - TrieRetention: cachingConfig.BlockAge, - SnapshotLimit: cachingConfig.SnapshotCache, - Preimages: baseConf.Preimages, - SnapshotRestoreMaxGas: cachingConfig.SnapshotRestoreMaxGas, + TrieCleanLimit: cachingConfig.TrieCleanCache, + TrieCleanJournal: stack.ResolvePath(baseConf.TrieCleanCacheJournal), + TrieCleanRejournal: baseConf.TrieCleanCacheRejournal, + TrieCleanNoPrefetch: baseConf.NoPrefetch, + TrieDirtyLimit: cachingConfig.TrieDirtyCache, + TrieDirtyDisabled: cachingConfig.Archive, + TrieTimeLimit: cachingConfig.TrieTimeLimit, + TriesInMemory: cachingConfig.BlockCount, + TrieRetention: cachingConfig.BlockAge, + SnapshotLimit: cachingConfig.SnapshotCache, + Preimages: baseConf.Preimages, + SnapshotRestoreMaxGas: cachingConfig.SnapshotRestoreGasLimit, + MaxNumberOfBlocksToSkipStateSaving: cachingConfig.MaxNumberOfBlocksToSkipStateSaving, + MaxAmountOfGasToSkipStateSaving: cachingConfig.MaxAmountOfGasToSkipStateSaving, } } diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 59bc41716a..95d9d5cbfe 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -74,7 +74,7 @@ func (s *ExecutionEngine) EnableReorgSequencing() { s.reorgSequencing = true } -func (s *ExecutionEngine) SetTransactionStreamer(consensus consensus.FullConsensusClient) error { +func (s *ExecutionEngine) SetConsensus(consensus consensus.FullConsensusClient) error { if s.Started() { return errors.New("trying to set transaction consensus after start") } @@ -622,7 +622,7 @@ func (s *ExecutionEngine) Start(ctx_in context.Context) { s.latestBlockMutex.Lock() block := s.latestBlock s.latestBlockMutex.Unlock() - if block != lastBlock && block != nil { + if block != nil && (lastBlock == nil || block.Hash() != lastBlock.Hash()) { log.Info( "created block", "l2Block", block.Number(), diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index c3aa799c1d..6742447c0b 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -4,11 +4,13 @@ import ( "context" "errors" "fmt" + "reflect" "sync" "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/ethdb" @@ -21,6 +23,7 @@ import ( "github.com/offchainlabs/nitro/consensus/consensusclient" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/execution/execapi" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/rpcclient" @@ -60,88 +63,91 @@ func ExecRPCConfigAddOptions(prefix string, f *flag.FlagSet) { } type Config struct { - L1Reader headerreader.Config `koanf:"l1-reader" reload:"hot"` - Sequencer SequencerConfig `koanf:"sequencer" reload:"hot"` - RecordingDB arbitrum.RecordingDatabaseConfig `koanf:"recording-database"` - TxPreChecker TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"` - Forwarder ForwarderConfig `koanf:"forwarder"` - ForwardingTargetImpl string `koanf:"forwarding-target"` - Caching CachingConfig `koanf:"caching"` - SyncMonitor SyncMonitorConfig `koanf:"sync-monitor" reload:"hot"` - RPC arbitrum.Config `koanf:"rpc"` - ExecRPC ExecRPCConfig `koanf:"exec-rpc"` - Archive bool `koanf:"archive"` - TxLookupLimit uint64 `koanf:"tx-lookup-limit"` - ConsensesServer rpcclient.ClientConfig `koanf:"consensus-server" reload:"hot"` - Dangerous DangerousConfig `koanf:"dangerous"` -} - -func (c *Config) ForwardingTarget() string { - if c.ForwardingTargetImpl == "null" { - return "" - } - - return c.ForwardingTargetImpl + ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` + Sequencer SequencerConfig `koanf:"sequencer" reload:"hot"` + RecordingDatabase arbitrum.RecordingDatabaseConfig `koanf:"recording-database"` + TxPreChecker TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"` + Forwarder ForwarderConfig `koanf:"forwarder"` + ForwardingTarget string `koanf:"forwarding-target"` + Caching CachingConfig `koanf:"caching"` + SyncMonitor SyncMonitorConfig `koanf:"sync-monitor" reload:"hot"` + RPC arbitrum.Config `koanf:"rpc"` + ExecRPC ExecRPCConfig `koanf:"exec-rpc"` + TxLookupLimit uint64 `koanf:"tx-lookup-limit"` + ConsensusServer rpcclient.ClientConfig `koanf:"consensus-server" reload:"hot"` + Dangerous DangerousConfig `koanf:"dangerous"` + + forwardingTarget string } func (c *Config) Validate() error { if err := c.Sequencer.Validate(); err != nil { return err } - if err := c.ConsensesServer.Validate(); err != nil { + if err := c.ConsensusServer.Validate(); err != nil { return err } + if !c.Sequencer.Enable && c.ForwardingTarget == "" { + return errors.New("ForwardingTarget not set and not sequencer (can use \"null\")") + } + if c.ForwardingTarget == "null" { + c.forwardingTarget = "" + } else { + c.forwardingTarget = c.ForwardingTarget + } + if c.forwardingTarget != "" && c.Sequencer.Enable { + return errors.New("ForwardingTarget set and sequencer enabled") + } return nil } func ConfigAddOptions(prefix string, f *flag.FlagSet) { - headerreader.AddOptions(prefix+".l1-reader", f) arbitrum.ConfigAddOptions(prefix+".rpc", f) SequencerConfigAddOptions(prefix+".sequencer", f) + headerreader.AddOptions(prefix+".parent-chain-reader", f) arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-database", f) - f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTargetImpl, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)") + f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTarget, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)") AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) TxPreCheckerConfigAddOptions(prefix+".tx-pre-checker", f) SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) CachingConfigAddOptions(prefix+".caching", f) f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)") - archiveMsg := fmt.Sprintf("retain past block state (deprecated, please use %v.caching.archive)", prefix) - f.Bool(prefix+".archive", ConfigDefault.Archive, archiveMsg) ExecRPCConfigAddOptions(prefix+".exec-rpc", f) - rpcclient.RPCClientAddOptions(prefix+".consensus-server", f, &ConfigDefault.ConsensesServer) + rpcclient.RPCClientAddOptions(prefix+".consensus-server", f, &ConfigDefault.ConsensusServer) DangerousConfigAddOptions(prefix+".dangerous", f) } var ConfigDefault = Config{ - L1Reader: headerreader.DefaultConfig, - RPC: arbitrum.DefaultConfig, - Sequencer: DefaultSequencerConfig, - RecordingDB: arbitrum.DefaultRecordingDatabaseConfig, - ForwardingTargetImpl: "", - TxPreChecker: DefaultTxPreCheckerConfig, - ExecRPC: ExecRPCConfigDefault, - Archive: false, - TxLookupLimit: 126_230_400, // 1 year at 4 blocks per second - Caching: DefaultCachingConfig, - Dangerous: DefaultDangerousConfig, + RPC: arbitrum.DefaultConfig, + Sequencer: DefaultSequencerConfig, + ParentChainReader: headerreader.DefaultConfig, + RecordingDatabase: arbitrum.DefaultRecordingDatabaseConfig, + ForwardingTarget: "", + TxPreChecker: DefaultTxPreCheckerConfig, + TxLookupLimit: 126_230_400, // 1 year at 4 blocks per second + ExecRPC: ExecRPCConfigDefault, + Caching: DefaultCachingConfig, + SyncMonitor: DefaultSyncMonitorConfig, + Dangerous: DefaultDangerousConfig, + Forwarder: DefaultNodeForwarderConfig, } type ConfigFetcher func() *Config type ExecutionNode struct { - ChainDB ethdb.Database - Backend *arbitrum.Backend - FilterSystem *filters.FilterSystem - ArbInterface *ArbInterface - ExecEngine *ExecutionEngine - Recorder *BlockRecorder - Sequencer *Sequencer // either nil or same as TxPublisher - TxPublisher TransactionPublisher - ConfigFetcher ConfigFetcher - SyncMonitor *SyncMonitor - L1Reader *headerreader.HeaderReader - ClassicOutbox *ClassicOutboxRetriever - ConsensusClient *consensusclient.Client + ChainDB ethdb.Database + Backend *arbitrum.Backend + FilterSystem *filters.FilterSystem + ArbInterface *ArbInterface + ExecEngine *ExecutionEngine + Recorder *BlockRecorder + Sequencer *Sequencer // either nil or same as TxPublisher + TxPublisher TransactionPublisher + ConfigFetcher ConfigFetcher + SyncMonitor *SyncMonitor + ParentChainReader *headerreader.HeaderReader + ClassicOutbox *ClassicOutboxRetriever + ConsensusClient *consensusclient.Client stopOnce sync.Once } @@ -171,8 +177,8 @@ func CreateExecutionNode( config := configFetcher() var consensusClient *consensusclient.Client - if config.ConsensesServer.URL != "" { - clientFetcher := func() *rpcclient.ClientConfig { return &configFetcher().ConsensesServer } + if config.ConsensusServer.URL != "" { + clientFetcher := func() *rpcclient.ClientConfig { return &configFetcher().ConsensusServer } consensusClient = consensusclient.NewClient(clientFetcher, stack) } @@ -185,38 +191,35 @@ func CreateExecutionNode( if err != nil { return nil, err } - recorder := NewBlockRecorder(&config.RecordingDB, execEngine, chainDB) + recorder := NewBlockRecorder(&config.RecordingDatabase, execEngine, chainDB) var txPublisher TransactionPublisher var sequencer *Sequencer - var l1Reader *headerreader.HeaderReader - if l1client != nil { - l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher().L1Reader }) + var parentChainReader *headerreader.HeaderReader + if l1client != nil && !reflect.ValueOf(l1client).IsNil() { + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + parentChainReader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher().ParentChainReader }, arbSys) if err != nil { return nil, err } - } else { + } else if config.Sequencer.Enable { log.Warn("sequencer enabled without l1 client") } - fwTarget := config.ForwardingTarget() if config.Sequencer.Enable { - if fwTarget != "" { - return nil, errors.New("sequencer and forwarding target both set") - } seqConfigFetcher := func() *SequencerConfig { return &configFetcher().Sequencer } - sequencer, err = NewSequencer(execEngine, l1Reader, seqConfigFetcher) + sequencer, err = NewSequencer(execEngine, parentChainReader, seqConfigFetcher) if err != nil { return nil, err } txPublisher = sequencer } else { if config.Forwarder.RedisUrl != "" { - txPublisher = NewRedisTxForwarder(fwTarget, &config.Forwarder) - } else if fwTarget == "" { + txPublisher = NewRedisTxForwarder(config.forwardingTarget, &config.Forwarder) + } else if config.forwardingTarget == "" { txPublisher = NewTxDropper() } else { - txPublisher = NewForwarder(fwTarget, &config.Forwarder) + txPublisher = NewForwarder(config.forwardingTarget, &config.Forwarder) } } @@ -240,31 +243,32 @@ func CreateExecutionNode( syncMon := NewSyncMonitor(execEngine, syncMonFetcher, consensusInterface) var classicOutbox *ClassicOutboxRetriever - classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "", true) - if err != nil { - if l2BlockChain.Config().ArbitrumChainParams.GenesisBlockNum > 0 { + + if l2BlockChain.Config().ArbitrumChainParams.GenesisBlockNum > 0 { + classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "", true) + if err != nil { log.Warn("Classic Msg Database not found", "err", err) + classicOutbox = nil + } else { + classicOutbox = NewClassicOutboxRetriever(classicMsgDb) } - classicOutbox = nil - } else { - classicOutbox = NewClassicOutboxRetriever(classicMsgDb) } execNode := &ExecutionNode{ - chainDB, - backend, - filterSystem, - arbInterface, - execEngine, - recorder, - sequencer, - txPublisher, - configFetcher, - syncMon, - l1Reader, - classicOutbox, - consensusClient, - sync.Once{}, + ChainDB: chainDB, + Backend: backend, + FilterSystem: filterSystem, + ArbInterface: arbInterface, + ExecEngine: execEngine, + Recorder: recorder, + Sequencer: sequencer, + TxPublisher: txPublisher, + ConfigFetcher: configFetcher, + SyncMonitor: syncMon, + ParentChainReader: parentChainReader, + ClassicOutbox: classicOutbox, + ConsensusClient: consensusClient, + stopOnce: sync.Once{}, } apis := []rpc.API{{ @@ -309,6 +313,7 @@ func CreateExecutionNode( stack.RegisterLifecycle(&ExecNodeLifeCycle{execNode}) return execNode, nil + } func (n *ExecutionNode) Initialize(ctx context.Context) error { @@ -328,6 +333,7 @@ func (n *ExecutionNode) Initialize(ctx context.Context) error { return nil } +// not thread safe func (n *ExecutionNode) Start(ctx context.Context) error { // TODO after separation // err := n.Stack.Start() @@ -345,8 +351,8 @@ func (n *ExecutionNode) Start(ctx context.Context) error { if err != nil { return fmt.Errorf("error starting transaction puiblisher: %w", err) } - if n.L1Reader != nil { - n.L1Reader.Start(ctx) + if n.ParentChainReader != nil { + n.ParentChainReader.Start(ctx) } n.SyncMonitor.Start(ctx) return nil @@ -360,8 +366,8 @@ func (n *ExecutionNode) StopAndWait() { n.TxPublisher.StopAndWait() } n.Recorder.OrderlyShutdown() - if n.L1Reader != nil && n.L1Reader.Started() { - n.L1Reader.StopAndWait() + if n.ParentChainReader != nil && n.ParentChainReader.Started() { + n.ParentChainReader.StopAndWait() } if n.ExecEngine.Started() { n.ExecEngine.StopAndWait() @@ -430,12 +436,12 @@ func (n *ExecutionNode) ForwardTo(url string) containers.PromiseInterface[struct if n.Sequencer != nil { return n.Sequencer.ForwardTo(url) } else { - return containers.NewReadyPromise[struct{}](struct{}{}, errors.New("forwardTo not supported - sequencer not acrtive")) + return containers.NewReadyPromise[struct{}](struct{}{}, errors.New("forwardTo not supported - sequencer not active")) } } func (n *ExecutionNode) SetConsensusClient(consensus consensus.FullConsensusClient) error { - if err := n.ExecEngine.SetTransactionStreamer(consensus); err != nil { + if err := n.ExecEngine.SetConsensus(consensus); err != nil { return err } return n.SyncMonitor.SetConsensusInfo(consensus) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 459b6ec85e..19f2765f59 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -85,7 +85,7 @@ type SequencerConfigFetcher func() *SequencerConfig var DefaultSequencerConfig = SequencerConfig{ Enable: false, - MaxBlockSpeed: time.Millisecond * 100, + MaxBlockSpeed: time.Millisecond * 250, MaxRevertGasReject: params.TxGas + 10000, MaxAcceptableTimestampDelta: time.Hour, Forwarder: DefaultSequencerForwarderConfig, @@ -93,6 +93,7 @@ var DefaultSequencerConfig = SequencerConfig{ QueueTimeout: time.Second * 12, NonceCacheSize: 1024, // 95% of the default batch poster limit, leaving 5KB for headers and such + // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, @@ -163,9 +164,9 @@ func newNonceCache(size int) *nonceCache { func (c *nonceCache) matches(header *types.Header) bool { if c.dirty != nil { - // The header is updated as the block is built, - // so instead of checking its hash, we do a pointer comparison. - return c.dirty == header + // Note, even though the of the header changes, c.dirty points to the + // same header, hence hashes will be the same and this check will pass. + return headerreader.HeadersEqual(c.dirty, header) } return c.block == header.ParentHash } @@ -249,11 +250,16 @@ func (c nonceFailureCache) Contains(err NonceError) bool { } func (c nonceFailureCache) Add(err NonceError, queueItem txQueueItem) { + expiry := queueItem.firstAppearance.Add(c.getExpiry()) + if c.Contains(err) || time.Now().After(expiry) { + queueItem.returnResult(err) + return + } key := addressAndNonce{err.sender, err.txNonce} val := &nonceFailure{ queueItem: queueItem, nonceErr: err, - expiry: queueItem.firstAppearance.Add(c.getExpiry()), + expiry: expiry, revived: false, } evicted := c.LruCache.Add(key, val) @@ -348,12 +354,12 @@ func (s *Sequencer) onNonceFailureEvict(_ addressAndNonce, failure *nonceFailure } } -func (s *Sequencer) ctxWithQueueTimeout(inctx context.Context) (context.Context, context.CancelFunc) { - timeout := s.config().QueueTimeout +// ctxWithTimeout is like context.WithTimeout except a timeout of 0 means unlimited instead of instantly expired. +func ctxWithTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { if timeout == time.Duration(0) { - return context.WithCancel(inctx) + return context.WithCancel(ctx) } - return context.WithTimeout(inctx, timeout) + return context.WithTimeout(ctx, timeout) } func (s *Sequencer) PublishTransaction(parentCtx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error { @@ -384,31 +390,41 @@ func (s *Sequencer) PublishTransaction(parentCtx context.Context, tx *types.Tran return types.ErrTxTypeNotSupported } - ctx, cancelFunc := s.ctxWithQueueTimeout(parentCtx) + queueTimeout := s.config().QueueTimeout + queueCtx, cancelFunc := ctxWithTimeout(parentCtx, queueTimeout) defer cancelFunc() + // Just to be safe, make sure we don't run over twice the queue timeout + abortCtx, cancel := ctxWithTimeout(parentCtx, queueTimeout*2) + defer cancel() + resultChan := make(chan error, 1) queueItem := txQueueItem{ tx, options, resultChan, false, - ctx, + queueCtx, time.Now(), } select { case s.txQueue <- queueItem: - case <-ctx.Done(): - return ctx.Err() + case <-queueCtx.Done(): + return queueCtx.Err() } select { case res := <-resultChan: return res - case <-parentCtx.Done(): - // We use parentCtx here and not ctx, because the QueueTimeout only applies to the background queue. + case <-abortCtx.Done(): + // We use abortCtx here and not queueCtx, because the QueueTimeout only applies to the background queue. // We want to give the background queue as much time as possible to make a response. - return parentCtx.Err() + err := abortCtx.Err() + if parentCtx.Err() == nil { + // If we've hit the abort deadline (as opposed to parentCtx being canceled), something went wrong. + log.Warn("Transaction sequencing hit abort deadline", "err", err, "submittedAt", queueItem.firstAppearance, "queueTimeout", queueTimeout, "txHash", tx.Hash()) + } + return err } } @@ -622,7 +638,7 @@ func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { return queueItems } nextHeaderNumber := arbmath.BigAdd(latestHeader.Number, common.Big1) - signer := types.MakeSigner(bc.Config(), nextHeaderNumber) + signer := types.MakeSigner(bc.Config(), nextHeaderNumber, latestHeader.Time) outputQueueItems := make([]txQueueItem, 0, len(queueItems)) var nextQueueItem *txQueueItem var queueItemsIdx int @@ -678,11 +694,7 @@ func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { continue } // Retry this transaction if its predecessor appears - if s.nonceFailures.Contains(nonceError) { - queueItem.returnResult(err) - } else { - s.nonceFailures.Add(nonceError, queueItem) - } + s.nonceFailures.Add(nonceError, queueItem) continue } else if err != nil { nonceCacheRejectedCounter.Inc(1) @@ -976,11 +988,11 @@ func (s *Sequencer) Start(ctxIn context.Context) error { func (s *Sequencer) StopAndWait() { s.StopWaiter.StopAndWait() - if s.txRetryQueue.Len() == 0 && len(s.txQueue) == 0 { + if s.txRetryQueue.Len() == 0 && len(s.txQueue) == 0 && s.nonceFailures.Len() == 0 { return } // this usually means that coordinator's safe-shutdown-delay is too low - log.Warn("sequencer has queued items while shutting down", "txQueue", len(s.txQueue), "retryQueue", s.txRetryQueue.Len()) + log.Warn("Sequencer has queued items while shutting down", "txQueue", len(s.txQueue), "retryQueue", s.txRetryQueue.Len(), "nonceFailures", s.nonceFailures.Len()) _, forwarder := s.GetPauseAndForwarder() if forwarder != nil { var wg sync.WaitGroup @@ -995,6 +1007,7 @@ func (s *Sequencer) StopAndWait() { _, failure, _ := s.nonceFailures.GetOldest() failure.revived = true item = failure.queueItem + source = "nonceFailures" s.nonceFailures.RemoveOldest() } else { select { diff --git a/execution/gethexec/sync_monitor.go b/execution/gethexec/sync_monitor.go index e1c0fa5617..84f367e882 100644 --- a/execution/gethexec/sync_monitor.go +++ b/execution/gethexec/sync_monitor.go @@ -14,7 +14,7 @@ type SyncMonitorConfig struct { ConsensusTimeout time.Duration `koanf:"consensus-timeout" reload:"hot"` } -var DefaultSyncMonitorConfig = &SyncMonitorConfig{ +var DefaultSyncMonitorConfig = SyncMonitorConfig{ ConsensusTimeout: time.Second * 5, } diff --git a/execution/gethexec/test_config.go b/execution/gethexec/test_config.go index 7de3427442..c5d061adf0 100644 --- a/execution/gethexec/test_config.go +++ b/execution/gethexec/test_config.go @@ -12,7 +12,9 @@ func ConfigDefaultNonSequencerTest() *Config { config.Sequencer.Enable = false config.Forwarder = DefaultTestForwarderConfig config.ExecRPC = ExecRPCConfigTest - config.ConsensesServer = rpcclient.TestClientConfig + config.ConsensusServer = rpcclient.TestClientConfig + config.ParentChainReader = headerreader.TestConfig + config.ForwardingTarget = "null" err := config.Validate() if err != nil { @@ -24,9 +26,10 @@ func ConfigDefaultNonSequencerTest() *Config { func ConfigDefaultTest() *Config { config := ConfigDefault config.Sequencer = TestSequencerConfig - config.L1Reader = headerreader.TestConfig + config.ParentChainReader = headerreader.TestConfig config.ExecRPC = ExecRPCConfigTest - config.ConsensesServer = rpcclient.TestClientConfig + config.ConsensusServer = rpcclient.TestClientConfig + config.ForwardingTarget = "null" err := config.Validate() if err != nil { diff --git a/execution/gethexec/tx_pre_checker.go b/execution/gethexec/tx_pre_checker.go index 8745702ace..51ba88fec8 100644 --- a/execution/gethexec/tx_pre_checker.go +++ b/execution/gethexec/tx_pre_checker.go @@ -18,6 +18,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/headerreader" flag "github.com/spf13/pflag" ) @@ -115,7 +116,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty if tx.Gas() < params.TxGas { return core.ErrIntrinsicGas } - sender, err := types.Sender(types.MakeSigner(chainConfig, header.Number), tx) + sender, err := types.Sender(types.MakeSigner(chainConfig, header.Number, header.Time), tx) if err != nil { return err } @@ -134,7 +135,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty return MakeNonceError(sender, tx.Nonce(), stateNonce) } extraInfo := types.DeserializeHeaderExtraInformation(header) - intrinsic, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, chainConfig.IsHomestead(header.Number), chainConfig.IsIstanbul(header.Number), chainConfig.IsShanghai(header.Time, extraInfo.ArbOSFormatVersion)) + intrinsic, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, chainConfig.IsHomestead(header.Number), chainConfig.IsIstanbul(header.Number), chainConfig.IsShanghai(header.Number, header.Time, extraInfo.ArbOSFormatVersion)) if err != nil { return err } @@ -144,11 +145,6 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty if config.Strictness < TxPreCheckerStrictnessLikelyCompatible { return nil } - balance := statedb.GetBalance(sender) - cost := tx.Cost() - if arbmath.BigLessThan(balance, cost) { - return fmt.Errorf("%w: address %v have %v want %v", core.ErrInsufficientFunds, sender, balance, cost) - } if options != nil { if err := options.Check(extraInfo.L1BlockNumber, header.Time, statedb); err != nil { conditionalTxRejectedByTxPreCheckerCurrentStateCounter.Inc(1) @@ -170,7 +166,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty oldHeader = previousHeader blocksTraversed++ } - if oldHeader != header { + if !headerreader.HeadersEqual(oldHeader, header) { secondOldStatedb, err := bc.StateAt(oldHeader.Root) if err != nil { return fmt.Errorf("failed to get old state: %w", err) @@ -184,10 +180,19 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty conditionalTxAcceptedByTxPreCheckerOldStateCounter.Inc(1) } } + balance := statedb.GetBalance(sender) + cost := tx.Cost() + if arbmath.BigLessThan(balance, cost) { + return fmt.Errorf("%w: address %v have %v want %v", core.ErrInsufficientFunds, sender, balance, cost) + } if config.Strictness >= TxPreCheckerStrictnessFullValidation && tx.Nonce() > stateNonce { return MakeNonceError(sender, tx.Nonce(), stateNonce) } - dataCost, _ := arbos.L1PricingState().GetPosterInfo(tx, l1pricing.BatchPosterAddress) + brotliCompressionLevel, err := arbos.BrotliCompressionLevel() + if err != nil { + return fmt.Errorf("failed to get brotli compression level: %w", err) + } + dataCost, _ := arbos.L1PricingState().GetPosterInfo(tx, l1pricing.BatchPosterAddress, brotliCompressionLevel) dataGas := arbmath.BigDiv(dataCost, header.BaseFee) if tx.Gas() < intrinsic+dataGas.Uint64() { return core.ErrIntrinsicGas diff --git a/execution/interface.go b/execution/interface.go index 955a2c2cb2..e059022b78 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -1,6 +1,7 @@ package execution import ( + "context" "errors" "github.com/ethereum/go-ethereum/common" @@ -56,5 +57,8 @@ type FullExecutionClient interface { ExecutionRecorder ExecutionSequencer + Start(ctx context.Context) error + StopAndWait() + Maintenance() containers.PromiseInterface[struct{}] } diff --git a/execution/json.go b/execution/json.go index 90ff5ed4b4..b707072a81 100644 --- a/execution/json.go +++ b/execution/json.go @@ -10,7 +10,7 @@ import ( type JsonRecordResult struct { Pos arbutil.MessageIndex BlockHash common.Hash - Preimages jsonapi.PreimagesMapJson + Preimages *jsonapi.PreimagesMapJson BatchInfo []validator.BatchInfo } diff --git a/execution/nodeInterface/NodeInterface.go b/execution/nodeInterface/NodeInterface.go index effa55d035..ed1bcfa94b 100644 --- a/execution/nodeInterface/NodeInterface.go +++ b/execution/nodeInterface/NodeInterface.go @@ -6,6 +6,7 @@ package nodeInterface import ( "context" "errors" + "fmt" "math/big" "sort" @@ -64,7 +65,7 @@ func (n NodeInterface) FindBatchContainingBlock(c ctx, evm mech, blockNum uint64 if fetcher == nil { return 0, errors.New("batch fetcher not set") } - batch, err := fetcher.FindL1BatchForMessage(msgIndex).Await(node.ExecEngine.GetContext()) + batch, err := fetcher.FindInboxBatchContainingMessage(msgIndex).Await(node.ExecEngine.GetContext()) return batch, err } @@ -94,10 +95,10 @@ func (n NodeInterface) GetL1Confirmations(c ctx, evm mech, blockHash bytes32) (u if err != nil { return 0, err } - if node.L1Reader == nil { + if node.ParentChainReader == nil { return 0, nil } - latestHeader, err := node.L1Reader.LastHeaderWithError() + latestHeader, err := node.ParentChainReader.LastHeaderWithError() if err != nil { return 0, err } @@ -470,7 +471,11 @@ func (n NodeInterface) GasEstimateL1Component( // Compute the fee paid for L1 in L2 terms // See in GasChargingHook that this does not induce truncation error // - feeForL1, _ := pricing.PosterDataCost(msg, l1pricing.BatchPosterAddress) + brotliCompressionLevel, err := c.State.BrotliCompressionLevel() + if err != nil { + return 0, nil, nil, fmt.Errorf("failed to get brotli compression level: %w", err) + } + feeForL1, _ := pricing.PosterDataCost(msg, l1pricing.BatchPosterAddress, brotliCompressionLevel) feeForL1 = arbmath.BigMulByBips(feeForL1, arbos.GasEstimationL1PricePadding) gasForL1 := arbmath.BigDiv(feeForL1, baseFee).Uint64() return gasForL1, baseFee, l1BaseFeeEstimate, nil @@ -508,7 +513,11 @@ func (n NodeInterface) GasEstimateComponents( if err != nil { return 0, 0, nil, nil, err } - feeForL1, _ := pricing.PosterDataCost(msg, l1pricing.BatchPosterAddress) + brotliCompressionLevel, err := c.State.BrotliCompressionLevel() + if err != nil { + return 0, 0, nil, nil, fmt.Errorf("failed to get brotli compression level: %w", err) + } + feeForL1, _ := pricing.PosterDataCost(msg, l1pricing.BatchPosterAddress, brotliCompressionLevel) baseFee, err := c.State.L2PricingState().BaseFeeWei() if err != nil { @@ -567,3 +576,80 @@ func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum h calldataForL1 = data return } + +// L2BlockRangeForL1 fetches the L1 block number of a given l2 block number. +// c ctx and evm mech arguments are not used but supplied to match the precompile function type in NodeInterface contract +func (n NodeInterface) BlockL1Num(c ctx, evm mech, l2BlockNum uint64) (uint64, error) { + blockHeader, err := n.backend.HeaderByNumber(n.context, rpc.BlockNumber(l2BlockNum)) + if err != nil { + return 0, err + } + if blockHeader == nil { + return 0, fmt.Errorf("nil header for l2 block: %d", l2BlockNum) + } + blockL1Num := types.DeserializeHeaderExtraInformation(blockHeader).L1BlockNumber + return blockL1Num, nil +} + +func (n NodeInterface) matchL2BlockNumWithL1(c ctx, evm mech, l2BlockNum uint64, l1BlockNum uint64) error { + blockL1Num, err := n.BlockL1Num(c, evm, l2BlockNum) + if err != nil { + return fmt.Errorf("failed to get the L1 block number of the L2 block: %v. Error: %w", l2BlockNum, err) + } + if blockL1Num != l1BlockNum { + return fmt.Errorf("no L2 block was found with the given L1 block number. Found L2 block: %v with L1 block number: %v, given L1 block number: %v", l2BlockNum, blockL1Num, l1BlockNum) + } + return nil +} + +// L2BlockRangeForL1 finds the first and last L2 block numbers that have the given L1 block number +func (n NodeInterface) L2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) (uint64, uint64, error) { + currentBlockNum := n.backend.CurrentBlock().Number.Uint64() + genesis := n.backend.ChainConfig().ArbitrumChainParams.GenesisBlockNum + + storedMids := map[uint64]uint64{} + firstL2BlockForL1 := func(target uint64) (uint64, error) { + low, high := genesis, currentBlockNum + highBlockL1Num, err := n.BlockL1Num(c, evm, high) + if err != nil { + return 0, err + } + if highBlockL1Num < target { + return high + 1, nil + } + for low < high { + mid := arbmath.SaturatingUAdd(low, high) / 2 + if _, ok := storedMids[mid]; !ok { + midBlockL1Num, err := n.BlockL1Num(c, evm, mid) + if err != nil { + return 0, err + } + storedMids[mid] = midBlockL1Num + } + if storedMids[mid] < target { + low = mid + 1 + } else { + high = mid + } + } + return high, nil + } + + firstBlock, err := firstL2BlockForL1(l1BlockNum) + if err != nil { + return 0, 0, fmt.Errorf("failed to get the first L2 block with the L1 block: %v. Error: %w", l1BlockNum, err) + } + lastBlock, err := firstL2BlockForL1(l1BlockNum + 1) + if err != nil { + return 0, 0, fmt.Errorf("failed to get the last L2 block with the L1 block: %v. Error: %w", l1BlockNum, err) + } + + if err := n.matchL2BlockNumWithL1(c, evm, firstBlock, l1BlockNum); err != nil { + return 0, 0, err + } + lastBlock -= 1 + if err = n.matchL2BlockNumWithL1(c, evm, lastBlock, l1BlockNum); err != nil { + return 0, 0, err + } + return firstBlock, lastBlock, nil +} diff --git a/execution/nodeInterface/virtual-contracts.go b/execution/nodeInterface/virtual-contracts.go index 186fbd0557..3a863e31b5 100644 --- a/execution/nodeInterface/virtual-contracts.go +++ b/execution/nodeInterface/virtual-contracts.go @@ -53,6 +53,7 @@ func init() { statedb *state.StateDB, header *types.Header, backend core.NodeInterfaceBackendAPI, + blockCtx *vm.BlockContext, ) (*core.Message, *ExecutionResult, error) { to := msg.To arbosVersion := arbosState.ArbOSVersion(statedb) // check ArbOS has been installed @@ -87,10 +88,7 @@ func init() { return msg, nil, nil } - evm, vmError, err := backend.GetEVM(ctx, msg, statedb, header, &vm.Config{NoBaseFee: true}) - if err != nil { - return msg, nil, err - } + evm, vmError := backend.GetEVM(ctx, msg, statedb, header, &vm.Config{NoBaseFee: true}, blockCtx) go func() { <-ctx.Done() evm.Cancel() @@ -137,7 +135,12 @@ func init() { return } - posterCost, _ := state.L1PricingState().PosterDataCost(msg, l1pricing.BatchPosterAddress) + brotliCompressionLevel, err := state.BrotliCompressionLevel() + if err != nil { + log.Error("failed to get brotli compression level", "err", err) + return + } + posterCost, _ := state.L1PricingState().PosterDataCost(msg, l1pricing.BatchPosterAddress, brotliCompressionLevel) posterCostInL2Gas := arbos.GetPosterGas(state, header.BaseFee, msg.TxRunMode, posterCost) *gascap = arbmath.SaturatingUAdd(*gascap, posterCostInL2Gas) } diff --git a/gethhook/geth_test.go b/gethhook/geth_test.go index 5c1b6afe8c..6274a54119 100644 --- a/gethhook/geth_test.go +++ b/gethhook/geth_test.go @@ -40,7 +40,6 @@ var testChainConfig = ¶ms.ChainConfig{ DAOForkBlock: nil, DAOForkSupport: true, EIP150Block: big.NewInt(0), - EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), diff --git a/go-ethereum b/go-ethereum index 994bfde0fb..859182f2fa 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 994bfde0fb18aad9e8c79c2b95478ee485cfa3a7 +Subproject commit 859182f2fa2d33c03fba5e29e1e750d3f49525fe diff --git a/go.mod b/go.mod index ed42790063..4ade1ce7e9 100644 --- a/go.mod +++ b/go.mod @@ -16,21 +16,29 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 github.com/cavaliergopher/grab/v3 v3.0.1 + github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 + github.com/enescakir/emoji v1.0.0 github.com/ethereum/go-ethereum v1.10.26 - github.com/hashicorp/golang-lru/v2 v2.0.1 - github.com/ipfs/go-cid v0.3.2 + github.com/fatih/structtag v1.2.0 + github.com/gdamore/tcell/v2 v2.6.0 + github.com/google/go-cmp v0.5.9 + github.com/hashicorp/golang-lru/v2 v2.0.2 + github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-libipfs v0.6.2 github.com/ipfs/interface-go-ipfs-core v0.11.0 github.com/ipfs/kubo v0.19.1 github.com/knadh/koanf v1.4.0 - github.com/libp2p/go-libp2p v0.26.4 - github.com/multiformats/go-multiaddr v0.8.0 + github.com/libp2p/go-libp2p v0.27.8 + github.com/multiformats/go-multiaddr v0.9.0 github.com/multiformats/go-multihash v0.2.1 + github.com/r3labs/diff/v3 v3.0.1 + github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 github.com/wealdtech/go-merkletree v1.0.0 - golang.org/x/term v0.5.0 + golang.org/x/term v0.13.0 + golang.org/x/tools v0.7.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -40,7 +48,7 @@ require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect github.com/DataDog/zstd v1.5.2 // indirect - github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 // indirect @@ -67,9 +75,8 @@ require ( github.com/cespare/xxhash v1.1.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect github.com/cockroachdb/redact v1.1.3 // indirect - github.com/containerd/cgroups v1.0.4 // indirect + github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect @@ -91,21 +98,22 @@ require ( github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gammazero/deque v0.2.1 // indirect + github.com/gdamore/encoding v1.0.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/flatbuffers v1.12.1 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect + github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect @@ -113,7 +121,6 @@ require ( github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-block-format v0.1.1 // indirect @@ -165,16 +172,16 @@ require ( github.com/jbenet/goprocess v0.1.4 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 // indirect - github.com/klauspost/compress v1.15.15 // indirect - github.com/klauspost/cpuid/v2 v2.2.3 // indirect - github.com/koron/go-ssdp v0.0.3 // indirect + github.com/klauspost/compress v1.16.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-doh-resolver v0.4.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect github.com/libp2p/go-libp2p-kad-dht v0.21.1 // indirect github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect github.com/libp2p/go-libp2p-pubsub v0.9.0 // indirect @@ -189,9 +196,10 @@ require ( github.com/libp2p/go-reuseport v0.2.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.0 // indirect github.com/libp2p/zeroconf/v2 v2.2.0 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.50 // indirect + github.com/miekg/dns v1.1.53 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.0 // indirect @@ -203,11 +211,11 @@ require ( github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.7.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.8.1 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.5.1 // indirect + github.com/onsi/ginkgo/v2 v2.9.2 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.0 // indirect @@ -216,22 +224,24 @@ require ( github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.39.0 // indirect + github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-19 v0.2.1 // indirect - github.com/quic-go/qtls-go1-20 v0.1.1 // indirect + github.com/quic-go/qtls-go1-19 v0.3.3 // indirect + github.com/quic-go/qtls-go1-20 v0.2.3 // indirect github.com/quic-go/quic-go v0.33.0 // indirect github.com/quic-go/webtransport-go v0.5.2 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rhnvrm/simples3 v0.6.1 // indirect github.com/rjeczalik/notify v0.9.1 // indirect + github.com/rivo/uniseg v0.4.3 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/samber/lo v1.36.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/stretchr/testify v1.8.2 // indirect github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect @@ -252,18 +262,19 @@ require ( go.opentelemetry.io/otel/trace v1.7.0 // indirect go.opentelemetry.io/proto/otlp v0.16.0 // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/dig v1.15.0 // indirect - go.uber.org/fx v1.18.2 // indirect - go.uber.org/multierr v1.9.0 // indirect + go.uber.org/dig v1.16.1 // indirect + go.uber.org/fx v1.19.2 // indirect + go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect go4.org v0.0.0-20200411211856-f5505b9728dd // indirect - golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect - golang.org/x/mod v0.7.0 // indirect - golang.org/x/tools v0.3.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/mod v0.10.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect - google.golang.org/grpc v1.46.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect nhooyr.io/websocket v1.8.7 // indirect @@ -283,24 +294,23 @@ require ( github.com/VictoriaMetrics/fastcache v1.6.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-redis/redis/v8 v8.11.4 github.com/go-stack/stack v1.8.1 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/uuid v1.3.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect - github.com/holiman/uint256 v1.2.0 - github.com/huin/goupnp v1.0.3 // indirect + github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c + github.com/huin/goupnp v1.1.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mitchellh/mapstructure v1.4.2 github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect @@ -311,11 +321,11 @@ require ( github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect - golang.org/x/crypto v0.6.0 - golang.org/x/net v0.7.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.6.0 - golang.org/x/text v0.7.0 // indirect + golang.org/x/crypto v0.14.0 + golang.org/x/net v0.17.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect ) diff --git a/go.sum b/go.sum index 463e34899f..11ed0a2cde 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,9 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= @@ -231,8 +232,8 @@ github.com/codeclysm/extract/v3 v3.0.2 h1:sB4LcE3Php7LkhZwN0n2p8GCwZe92PEQutdbGU github.com/codeclysm/extract/v3 v3.0.2/go.mod h1:NKsw+hqua9H+Rlwy/w/3Qgt9jDonYEgB6wJu+25eOKw= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -304,7 +305,6 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= @@ -312,6 +312,8 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/ github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog= +github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -328,6 +330,8 @@ github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:Jp github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -350,6 +354,10 @@ github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZ github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell/v2 v2.6.0 h1:OKbluoP9VYmJwZwq/iLb4BxwKcwGthaa1YNBJIyCySg= +github.com/gdamore/tcell/v2 v2.6.0/go.mod h1:be9omFATkdr0D9qewWW3d+MEvl5dha+Etb5y65J2H8Y= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= @@ -397,8 +405,9 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= @@ -434,8 +443,9 @@ github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzq github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -469,13 +479,15 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -495,6 +507,7 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -510,15 +523,15 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM= -github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= +github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -589,8 +602,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4= -github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= +github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -601,17 +614,15 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= -github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= -github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8= +github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= -github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU= +github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -647,8 +658,8 @@ github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67Fexh github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= @@ -897,21 +908,21 @@ github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= +github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= -github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -955,10 +966,10 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.26.4 h1:VA9ChjN0n1BwwfU/dqx4Zj9ezXtIxGk8FyJPwFONqxs= -github.com/libp2p/go-libp2p v0.26.4/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8= -github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= -github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p v0.27.8 h1:IX5x/4yKwyPQeVS2AXHZ3J4YATM9oHBGH1gBc23jBAI= +github.com/libp2p/go-libp2p v0.27.8/go.mod h1:eCFFtd0s5i/EVKR7+5Ki8bM7qwkNW3TPTTSSW9sz8NE= +github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= +github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= @@ -1142,6 +1153,8 @@ github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1171,11 +1184,12 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= @@ -1189,8 +1203,8 @@ github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= +github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -1257,8 +1271,8 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= -github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ= +github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1278,11 +1292,11 @@ github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysj github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ= -github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8= +github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1336,8 +1350,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw= -github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= +github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1345,7 +1359,7 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1417,8 +1431,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1430,14 +1444,16 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-19 v0.2.1 h1:aJcKNMkH5ASEJB9FXNeZCyTEIHU1J7MmHyz1Q1TSG1A= -github.com/quic-go/qtls-go1-19 v0.2.1/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= -github.com/quic-go/qtls-go1-20 v0.1.1 h1:KbChDlg82d3IHqaj2bn6GfKRj84Per2VGf5XV3wSwQk= -github.com/quic-go/qtls-go1-20 v0.1.1/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= +github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE= +github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.2.3 h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI= +github.com/quic-go/qtls-go1-20 v0.2.3/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0= github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/r3labs/diff/v3 v3.0.1 h1:CBKqf3XmNRHXKmdU7mZP1w7TV0pDyVCis1AUHtA4Xtg= +github.com/r3labs/diff/v3 v3.0.1/go.mod h1:f1S9bourRbiM66NskseyUdo0fTmEE0qKrikYJX63dgo= github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= @@ -1447,6 +1463,11 @@ github.com/rhnvrm/simples3 v0.6.1 h1:H0DJwybR6ryQE+Odi9eqkHuzjYAeJgtGcGtuBwOhsH8 github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 h1:ZyM/+FYnpbZsFWuCohniM56kRoHRB4r5EuIzXEYkpxo= +github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703/go.mod h1:nVwGv4MP47T0jvlk7KuTTjjuSmrGO4JF0iaiNt4bufE= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= +github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1550,7 +1571,6 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -1586,6 +1606,10 @@ github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.9.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= @@ -1635,6 +1659,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1682,10 +1707,10 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.15.0 h1:vq3YWr8zRj1eFGC7Gvf907hE0eRjPTZ1d3xHadD6liE= -go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= -go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= -go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= +go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8= +go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk= +go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY= +go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= @@ -1694,8 +1719,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= @@ -1741,8 +1766,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1753,8 +1778,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1778,8 +1803,9 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1836,11 +1862,11 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1861,8 +1887,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1952,15 +1979,21 @@ golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1970,8 +2003,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2038,9 +2072,9 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2113,8 +2147,13 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= +google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a h1:myvhA4is3vrit1a6NZCWBIwN0kNEnX21DJOJX/NvIfI= +google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -2145,8 +2184,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2161,8 +2201,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/linter/koanf/handlers.go b/linter/koanf/handlers.go new file mode 100644 index 0000000000..5826004014 --- /dev/null +++ b/linter/koanf/handlers.go @@ -0,0 +1,227 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + + "github.com/fatih/structtag" + "golang.org/x/tools/go/analysis" +) + +// handleComposite tracks use of fields in composite literals. +// E.g. `Config{A: 1, B: 2, C: 3}` will increase counters of fields A,B and C. +func handleComposite(pass *analysis.Pass, cl *ast.CompositeLit, cnt map[string]int) { + id, ok := cl.Type.(*ast.Ident) + if !ok { + return + } + for _, e := range cl.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + if ki, ok := kv.Key.(*ast.Ident); ok { + fi := pass.TypesInfo.Types[id].Type.String() + "." + ki.Name + cnt[normalizeID(pass, fi)]++ + } + } + } +} + +// handleSelector handles selector expression recursively, that is an expression: +// a.B.C.D will update counter for fields: a.B.C.D, a.B.C and a.B. +// It updates counters map in place, increasing corresponding identifiers by +// increaseBy amount. +func handleSelector(pass *analysis.Pass, se *ast.SelectorExpr, increaseBy int, cnt map[string]int) string { + if e, ok := se.X.(*ast.SelectorExpr); ok { + // Full field identifier, including package name. + fi := pass.TypesInfo.Types[e].Type.String() + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += increaseBy + prefix := handleSelector(pass, e, increaseBy, cnt) + fi = prefix + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += increaseBy + return fi + } + // Handle selectors on function calls, e.g. `config().Enabled`. + if _, ok := se.X.(*ast.CallExpr); ok { + fi := pass.TypesInfo.Types[se.X].Type.String() + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += increaseBy + return fi + } + if ident, ok := se.X.(*ast.Ident); ok { + if pass.TypesInfo.Types[ident].Type != nil { + fi := pass.TypesInfo.Types[ident].Type.String() + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += increaseBy + return fi + } + } + return "" +} + +// koanfFields returns a map of fields that have koanf tag. +func koanfFields(pass *analysis.Pass) map[string]token.Pos { + res := make(map[string]token.Pos) + for _, f := range pass.Files { + pkgName := f.Name.Name + ast.Inspect(f, func(node ast.Node) bool { + if ts, ok := node.(*ast.TypeSpec); ok { + st, ok := ts.Type.(*ast.StructType) + if !ok { + return true + } + for _, f := range st.Fields.List { + if tag := tagFromField(f); tag != "" { + t := strings.Join([]string{pkgName, ts.Name.Name, f.Names[0].Name}, ".") + res[t] = f.Pos() + } + } + } + return true + }) + } + return res +} + +func containsFlagSet(params []*ast.Field) bool { + for _, p := range params { + se, ok := p.Type.(*ast.StarExpr) + if !ok { + continue + } + sle, ok := se.X.(*ast.SelectorExpr) + if !ok { + continue + } + if sle.Sel.Name == "FlagSet" { + return true + } + } + return false +} + +// checkFlagDefs checks flag definitions in the function. +// Result contains list of errors where flag name doesn't match field name. +func checkFlagDefs(pass *analysis.Pass, f *ast.FuncDecl, cnt map[string]int) Result { + // Ignore functions that does not get flagset as parameter. + if !containsFlagSet(f.Type.Params.List) { + return Result{} + } + var res Result + for _, s := range f.Body.List { + es, ok := s.(*ast.ExprStmt) + if !ok { + continue + } + callE, ok := es.X.(*ast.CallExpr) + if !ok { + continue + } + if len(callE.Args) != 3 { + continue + } + sl, ok := extractStrLit(callE.Args[0]) + if !ok { + continue + } + s, ok := selectorName(callE.Args[1]) + if !ok { + continue + } + handleSelector(pass, callE.Args[1].(*ast.SelectorExpr), -1, cnt) + if normSL := normalizeTag(sl); !strings.EqualFold(normSL, s) { + res.Errors = append(res.Errors, koanfError{ + Pos: f.Pos(), + Message: fmt.Sprintf("koanf tag name: %q doesn't match the field: %q", sl, s), + err: errIncorrectFlag, + }) + } + + } + return res +} + +func selectorName(e ast.Expr) (string, bool) { + n, ok := e.(ast.Node) + if !ok { + return "", false + } + se, ok := n.(*ast.SelectorExpr) + if !ok { + return "", false + } + return se.Sel.Name, true +} + +// Extracts literal from expression that is either: +// - string literal or +// - sum of variable and string literal. +// E.g. +// strLitFromSum(`"max-size"`) = "max-size" +// - strLitFromSum(`prefix + ".enable"“) = ".enable". +func extractStrLit(e ast.Expr) (string, bool) { + if s, ok := strLit(e); ok { + return s, true + } + if be, ok := e.(*ast.BinaryExpr); ok { + if be.Op == token.ADD { + if s, ok := strLit(be.Y); ok { + // Drop the prefix dot. + return s[1:], true + } + } + } + return "", false +} + +func strLit(e ast.Expr) (string, bool) { + if s, ok := e.(*ast.BasicLit); ok { + if s.Kind == token.STRING { + return strings.Trim(s.Value, "\""), true + } + } + return "", false +} + +// tagFromField extracts koanf tag from struct field. +func tagFromField(f *ast.Field) string { + if f.Tag == nil { + return "" + } + tags, err := structtag.Parse(strings.Trim((f.Tag.Value), "`")) + if err != nil { + return "" + } + tag, err := tags.Get("koanf") + if err != nil { + return "" + } + return normalizeTag(tag.Name) +} + +// checkStruct returns violations where koanf tag name doesn't match field names. +func checkStruct(pass *analysis.Pass, s *ast.StructType) Result { + var res Result + for _, f := range s.Fields.List { + tag := tagFromField(f) + if tag == "" { + continue + } + fieldName := f.Names[0].Name + if !strings.EqualFold(tag, fieldName) { + res.Errors = append(res.Errors, koanfError{ + Pos: f.Pos(), + Message: fmt.Sprintf("field name: %q doesn't match tag name: %q\n", fieldName, tag), + err: errMismatch, + }) + } + } + return res +} + +func normalizeTag(s string) string { + return strings.ReplaceAll(s, "-", "") +} + +func normalizeID(pass *analysis.Pass, id string) string { + id = strings.TrimPrefix(id, "*") + return pass.Pkg.Name() + strings.TrimPrefix(id, pass.Pkg.Path()) +} diff --git a/linter/koanf/koanf.go b/linter/koanf/koanf.go new file mode 100644 index 0000000000..d6780760e7 --- /dev/null +++ b/linter/koanf/koanf.go @@ -0,0 +1,107 @@ +package main + +import ( + "errors" + "fmt" + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/singlechecker" +) + +var ( + errUnused = errors.New("unused") + errMismatch = errors.New("mismmatched field name and tag in a struct") + // e.g. f.Int("max-sz", DefaultBatchPosterConfig.MaxSize, "maximum batch size") + errIncorrectFlag = errors.New("mismatching flag initialization") +) + +func New(conf any) ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{Analyzer}, nil +} + +var Analyzer = &analysis.Analyzer{ + Name: "koanfcheck", + Doc: "check for koanf misconfigurations", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "testkoanfcheck", + Doc: "check for koanf misconfigurations (for tests)", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +// koanfError indicates the position of an error in configuration. +type koanfError struct { + Pos token.Pos + Message string + err error +} + +// Result is returned from the checkStruct function, and holds all the +// configuration errors. +type Result struct { + Errors []koanfError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ( + ret Result + cnt = make(map[string]int) + // koanfFields map contains all the struct koanfFields that have koanf tag. + // It identifies field as "{pkgName}.{structName}.{field_Name}". + // e.g. "a.BatchPosterConfig.Enable", "a.BatchPosterConfig.MaxSize" + koanfFields = koanfFields(pass) + ) + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + var res Result + switch v := node.(type) { + case *ast.StructType: + res = checkStruct(pass, v) + case *ast.FuncDecl: + res = checkFlagDefs(pass, v, cnt) + case *ast.SelectorExpr: + handleSelector(pass, v, 1, cnt) + case *ast.IfStmt: + if se, ok := v.Cond.(*ast.SelectorExpr); ok { + handleSelector(pass, se, 1, cnt) + } + case *ast.CompositeLit: + handleComposite(pass, v, cnt) + default: + } + ret.Errors = append(ret.Errors, res.Errors...) + return true + }) + } + for k := range koanfFields { + if cnt[k] == 0 { + ret.Errors = append(ret.Errors, + koanfError{ + Pos: koanfFields[k], + Message: fmt.Sprintf("field %v not used", k), + err: errUnused, + }) + } + } + for _, err := range ret.Errors { + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: err.Pos, + Message: err.Message, + Category: "koanf", + }) + } + } + return ret, nil +} + +func main() { + singlechecker.Main(Analyzer) +} diff --git a/linter/koanf/koanf_test.go b/linter/koanf/koanf_test.go new file mode 100644 index 0000000000..064ae533c4 --- /dev/null +++ b/linter/koanf/koanf_test.go @@ -0,0 +1,71 @@ +package main + +import ( + "errors" + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/go/analysis/analysistest" +) + +var ( + incorrectFlag = "incorrect_flag" + mismatch = "mismatch" + unused = "unused" +) + +func testData(t *testing.T) string { + t.Helper() + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get wd: %s", err) + } + return filepath.Join(filepath.Dir(wd), "testdata") +} + +// Tests koanf/a package that contains two types of errors where: +// - koanf tag doesn't match field name. +// - flag definition doesn't match field name. +// Errors are marked as comments in the package source file. +func TestMismatch(t *testing.T) { + testdata := testData(t) + got := errCounts(analysistest.Run(t, testdata, analyzerForTests, "koanf/a")) + want := map[string]int{ + incorrectFlag: 2, + mismatch: 1, + } + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("analysistest.Run() unexpected diff:\n%s\n", diff) + } +} + +func TestUnused(t *testing.T) { + testdata := testData(t) + got := errCounts(analysistest.Run(t, testdata, analyzerForTests, "koanf/b")) + if diff := cmp.Diff(got, map[string]int{"unused": 2}); diff != "" { + t.Errorf("analysistest.Run() unexpected diff:\n%s\n", diff) + } +} + +func errCounts(res []*analysistest.Result) map[string]int { + m := make(map[string]int) + for _, r := range res { + if rs, ok := r.Result.(Result); ok { + for _, e := range rs.Errors { + var s string + switch { + case errors.Is(e.err, errIncorrectFlag): + s = incorrectFlag + case errors.Is(e.err, errMismatch): + s = mismatch + case errors.Is(e.err, errUnused): + s = unused + } + m[s] = m[s] + 1 + } + } + } + return m +} diff --git a/linter/pointercheck/pointer.go b/linter/pointercheck/pointer.go new file mode 100644 index 0000000000..6500b01222 --- /dev/null +++ b/linter/pointercheck/pointer.go @@ -0,0 +1,100 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/singlechecker" +) + +func New(conf any) ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{Analyzer}, nil +} + +var Analyzer = &analysis.Analyzer{ + Name: "pointercheck", + Doc: "check for pointer comparison", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "testpointercheck", + Doc: "check for pointer comparison (for tests)", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +// pointerCmpError indicates the position of pointer comparison. +type pointerCmpError struct { + Pos token.Position + Message string +} + +// Result is returned from the checkStruct function, and holds all the +// configuration errors. +type Result struct { + Errors []pointerCmpError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ret Result + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + var res *Result + switch e := node.(type) { + case *ast.BinaryExpr: + res = checkExpr(pass, e) + default: + } + if res == nil { + return true + } + for _, err := range res.Errors { + ret.Errors = append(ret.Errors, err) + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), + Message: err.Message, + Category: "pointercheck", + }) + } + } + return true + }, + ) + } + return ret, nil +} + +func checkExpr(pass *analysis.Pass, e *ast.BinaryExpr) *Result { + if e.Op != token.EQL && e.Op != token.NEQ { + return nil + } + ret := &Result{} + if ptrIdent(pass, e.X) && ptrIdent(pass, e.Y) { + ret.Errors = append(ret.Errors, pointerCmpError{ + Pos: pass.Fset.Position(e.Pos()), + Message: fmt.Sprintf("comparison of two pointers in expression %v", e), + }) + } + return ret +} + +func ptrIdent(pass *analysis.Pass, e ast.Expr) bool { + switch tp := e.(type) { + case *ast.Ident, *ast.SelectorExpr: + et := pass.TypesInfo.Types[tp].Type + _, isPtr := (et).(*types.Pointer) + return isPtr + } + return false +} + +func main() { + singlechecker.Main(Analyzer) +} diff --git a/linter/pointercheck/pointer_test.go b/linter/pointercheck/pointer_test.go new file mode 100644 index 0000000000..290e3826de --- /dev/null +++ b/linter/pointercheck/pointer_test.go @@ -0,0 +1,31 @@ +package main + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +func TestAll(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get wd: %s", err) + } + testdata := filepath.Join(filepath.Dir(wd), "testdata") + res := analysistest.Run(t, testdata, analyzerForTests, "pointercheck") + if cnt := countErrors(res); cnt != 6 { + t.Errorf("analysistest.Run() got %v errors, expected 6", cnt) + } +} + +func countErrors(errs []*analysistest.Result) int { + cnt := 0 + for _, e := range errs { + if r, ok := e.Result.(Result); ok { + cnt += len(r.Errors) + } + } + return cnt +} diff --git a/linter/structinit/structinit.go b/linter/structinit/structinit.go new file mode 100644 index 0000000000..e4e65bc3fc --- /dev/null +++ b/linter/structinit/structinit.go @@ -0,0 +1,122 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/singlechecker" +) + +// Tip for linter that struct that has this comment should be included in the +// analysis. +// Note: comment should be directly line above the struct definition. +const linterTip = "// lint:require-exhaustive-initialization" + +func New(conf any) ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{Analyzer}, nil +} + +// Analyzer implements struct analyzer for structs that are annotated with +// `linterTip`, it checks that every instantiation initializes all the fields. +var Analyzer = &analysis.Analyzer{ + Name: "structinit", + Doc: "check for struct field initializations", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "teststructinit", + Doc: "check for struct field initializations", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +type structError struct { + Pos token.Pos + Message string +} + +type Result struct { + Errors []structError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ( + ret Result + structs = markedStructs(pass) + ) + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + // For every composite literal check that number of elements in + // the literal match the number of struct fields. + if cl, ok := node.(*ast.CompositeLit); ok { + stName := pass.TypesInfo.Types[cl].Type.String() + if cnt, found := structs[stName]; found && cnt != len(cl.Elts) { + ret.Errors = append(ret.Errors, structError{ + Pos: cl.Pos(), + Message: fmt.Sprintf("struct: %q initialized with: %v of total: %v fields", stName, len(cl.Elts), cnt), + }) + + } + + } + return true + }) + } + for _, err := range ret.Errors { + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: err.Pos, + Message: err.Message, + Category: "structinit", + }) + } + } + return ret, nil +} + +// markedStructs returns a map of structs that are annotated for linter to check +// that all fields are initialized when the struct is instantiated. +// It maps struct full name (including package path) to number of fields it contains. +func markedStructs(pass *analysis.Pass) map[string]int { + res := make(map[string]int) + for _, f := range pass.Files { + tips := make(map[position]bool) + ast.Inspect(f, func(node ast.Node) bool { + switch n := node.(type) { + case *ast.Comment: + p := pass.Fset.Position(node.Pos()) + if strings.Contains(n.Text, linterTip) { + tips[position{p.Filename, p.Line + 1}] = true + } + case *ast.TypeSpec: + if st, ok := n.Type.(*ast.StructType); ok { + p := pass.Fset.Position(st.Struct) + if tips[position{p.Filename, p.Line}] { + fieldsCnt := 0 + for _, field := range st.Fields.List { + fieldsCnt += len(field.Names) + } + res[pass.Pkg.Path()+"."+n.Name.Name] = fieldsCnt + } + } + } + return true + }) + } + return res +} + +type position struct { + fileName string + line int +} + +func main() { + singlechecker.Main(Analyzer) +} diff --git a/linter/structinit/structinit_test.go b/linter/structinit/structinit_test.go new file mode 100644 index 0000000000..db3676e185 --- /dev/null +++ b/linter/structinit/structinit_test.go @@ -0,0 +1,36 @@ +package main + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +func testData(t *testing.T) string { + t.Helper() + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get wd: %s", err) + } + return filepath.Join(filepath.Dir(wd), "testdata") +} + +func TestLinter(t *testing.T) { + testdata := testData(t) + got := errCount(analysistest.Run(t, testdata, analyzerForTests, "structinit/a")) + if got != 2 { + t.Errorf("analysistest.Run() got %d errors, expected 2", got) + } +} + +func errCount(res []*analysistest.Result) int { + cnt := 0 + for _, r := range res { + if rs, ok := r.Result.(Result); ok { + cnt += len(rs.Errors) + } + } + return cnt +} diff --git a/linter/testdata/src/koanf/a/a.go b/linter/testdata/src/koanf/a/a.go new file mode 100644 index 0000000000..a0513fb09b --- /dev/null +++ b/linter/testdata/src/koanf/a/a.go @@ -0,0 +1,58 @@ +package a + +import ( + "flag" +) + +type Config struct { + L2 int `koanf:"chain"` // Err: mismatch. + LogLevel int `koanf:"log-level"` + LogType int `koanf:"log-type"` + Metrics int `koanf:"metrics"` + PProf int `koanf:"pprof"` + Node int `koanf:"node"` + Queue int `koanf:"queue"` +} + +// Cover using of all fields in a various way: + +// Instantiating a type. +var defaultConfig = Config{ + L2: 1, + LogLevel: 2, +} + +// Instantiating a type an taking reference. +var defaultConfigPtr = &Config{ + LogType: 3, + Metrics: 4, +} + +func init() { + defaultConfig.PProf = 5 + defaultConfig.Node, _ = 6, 0 + defaultConfigPtr.Queue = 7 +} + +type BatchPosterConfig struct { + Enable bool `koanf:"enable"` + MaxSize int `koanf:"max-size" reload:"hot"` +} + +var DefaultBatchPosterConfig BatchPosterConfig + +func BatchPosterConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enabled", DefaultBatchPosterConfig.Enable, "") // Err: incorrect flag. + f.Int("max-sz", DefaultBatchPosterConfig.MaxSize, "") // Err: incorrect flag. +} + +func ConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") + f.Int("max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") +} + +func init() { + // Fields must be used outside flag definitions at least once. + DefaultBatchPosterConfig.Enable = true + DefaultBatchPosterConfig.MaxSize = 3 +} diff --git a/linter/testdata/src/koanf/b/b.go b/linter/testdata/src/koanf/b/b.go new file mode 100644 index 0000000000..fe958f17b3 --- /dev/null +++ b/linter/testdata/src/koanf/b/b.go @@ -0,0 +1,52 @@ +package b + +import ( + "flag" + "fmt" +) + +type ParCfg struct { + child ChildCfg `koanf:"child"` + grandChild GrandChildCfg `koanf:grandchild` +} + +var defaultCfg = ParCfg{} + +type ChildCfg struct { + A bool `koanf:"A"` + B bool `koanf:"B"` + C bool `koanf:"C"` + D bool `koanf:"D"` // Error: not used outside flag definition. +} + +var defaultChildCfg = ChildCfg{} + +func childConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".a", defaultChildCfg.A, "") + f.Bool("b", defaultChildCfg.B, "") + f.Bool("c", defaultChildCfg.C, "") + f.Bool("d", defaultChildCfg.D, "") +} + +type GrandChildCfg struct { + A int `koanf:"A"` // Error: unused. +} + +func (c *GrandChildCfg) Do() { +} + +func configPtr() *ChildCfg { + return nil +} +func config() ChildCfg { + return ChildCfg{} +} + +func init() { + fmt.Printf("%v %v", config().A, configPtr().B) + // This covers usage of both `ParCfg.Child` and `ChildCfg.C`. + _ = defaultCfg.child.C + // Covers usage of grandChild. + defaultCfg.grandChild.Do() + +} diff --git a/linter/testdata/src/pointercheck/pointercheck.go b/linter/testdata/src/pointercheck/pointercheck.go new file mode 100644 index 0000000000..f63fdd1743 --- /dev/null +++ b/linter/testdata/src/pointercheck/pointercheck.go @@ -0,0 +1,50 @@ +package pointercheck + +import "fmt" + +type A struct { + x, y int +} + +// pointerCmp compares pointers, sometimes inside +func pointerCmp() { + a, b := &A{}, &A{} + // Simple comparions. + if a != b { + fmt.Println("Not Equal") + } + if a == b { + fmt.Println("Equals") + } + // Nested binary expressions. + if (2 > 1) && (a != b) { + fmt.Println("Still not equal") + } + if (174%15 > 3) && (2 > 1 && (1+2 > 2 || a != b)) { + fmt.Println("Who knows at this point") + } + // Nested and inside unary operator. + if 10 > 5 && !(2 > 1 || a == b) { + fmt.Println("Not equal") + } + c, d := 1, 2 + if &c != &d { + fmt.Println("Not equal") + } +} + +func legitCmps() { + a, b := &A{}, &A{} + if a.x == b.x { + fmt.Println("Allowed") + } +} + +type cache struct { + dirty *A +} + +// matches does pointer comparison. +func (c *cache) matches(a *A) bool { + return c.dirty == a +} diff --git a/linter/testdata/src/structinit/a/a.go b/linter/testdata/src/structinit/a/a.go new file mode 100644 index 0000000000..45f6059726 --- /dev/null +++ b/linter/testdata/src/structinit/a/a.go @@ -0,0 +1,33 @@ +package a + +import "fmt" + +// lint:require-exhaustive-initialization +type interestingStruct struct { + x int + b *boringStruct +} + +type boringStruct struct { + x, y int +} + +func init() { + a := &interestingStruct{ // Error: only single field is initialized. + x: 1, + } + fmt.Println(a) + b := interestingStruct{ // Error: only single field is initialized. + b: nil, + } + fmt.Println(b) + c := interestingStruct{ // Not an error, all fields are initialized. + x: 1, + b: nil, + } + fmt.Println(c) + d := &boringStruct{ // Not an error since it's not annotated for the linter. + x: 1, + } + fmt.Println(d) +} diff --git a/nitro-testnode b/nitro-testnode index 14f24a1bad..bb3f094f43 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 14f24a1bad2625412602d06156156c380bd589d2 +Subproject commit bb3f094f4359780c2a9aba28e15bb845be9b35a3 diff --git a/precompiles/ArbGasInfo.go b/precompiles/ArbGasInfo.go index 22b3fe5a6e..378d48c780 100644 --- a/precompiles/ArbGasInfo.go +++ b/precompiles/ArbGasInfo.go @@ -162,6 +162,16 @@ func (con ArbGasInfo) GetL1BaseFeeEstimateInertia(c ctx, evm mech) (uint64, erro return c.State.L1PricingState().Inertia() } +// GetL1RewardRate gets the L1 pricer reward rate +func (con ArbGasInfo) GetL1RewardRate(c ctx, evm mech) (uint64, error) { + return c.State.L1PricingState().GetRewardsRate() +} + +// GetL1RewardRecipient gets the L1 pricer reward recipient +func (con ArbGasInfo) GetL1RewardRecipient(c ctx, evm mech) (common.Address, error) { + return c.State.L1PricingState().GetRewardsRecepient() +} + // GetL1GasPriceEstimate gets the current estimate of the L1 basefee func (con ArbGasInfo) GetL1GasPriceEstimate(c ctx, evm mech) (huge, error) { return con.GetL1BaseFeeEstimate(c, evm) diff --git a/precompiles/ArbOwner.go b/precompiles/ArbOwner.go index 1abf1d0d09..166768940b 100644 --- a/precompiles/ArbOwner.go +++ b/precompiles/ArbOwner.go @@ -142,6 +142,10 @@ func (con ArbOwner) SetAmortizedCostCapBips(c ctx, evm mech, cap uint64) error { return c.State.L1PricingState().SetAmortizedCostCapBips(cap) } +func (con ArbOwner) SetBrotliCompressionLevel(c ctx, evm mech, level uint64) error { + return c.State.SetBrotliCompressionLevel(level) +} + func (con ArbOwner) ReleaseL1PricerSurplusFunds(c ctx, evm mech, maxWeiToRelease huge) (huge, error) { balance := evm.StateDB.GetBalance(l1pricing.L1PricerFundsPoolAddress) l1p := c.State.L1PricingState() diff --git a/precompiles/ArbOwnerPublic.go b/precompiles/ArbOwnerPublic.go index a66ce8c17c..4064f41bef 100644 --- a/precompiles/ArbOwnerPublic.go +++ b/precompiles/ArbOwnerPublic.go @@ -11,7 +11,9 @@ import ( // The calls to this precompile do not require the sender be a chain owner. // For those that are, see ArbOwner type ArbOwnerPublic struct { - Address addr // 0x6b + Address addr // 0x6b + ChainOwnerRectified func(ctx, mech, addr) error + ChainOwnerRectifiedGasCost func(addr) (uint64, error) } // GetAllChainOwners retrieves the list of chain owners @@ -19,6 +21,15 @@ func (con ArbOwnerPublic) GetAllChainOwners(c ctx, evm mech) ([]common.Address, return c.State.ChainOwners().AllMembers(65536) } +// RectifyChainOwner checks if the account is a chain owner +func (con ArbOwnerPublic) RectifyChainOwner(c ctx, evm mech, addr addr) error { + err := c.State.ChainOwners().RectifyMapping(addr) + if err != nil { + return err + } + return con.ChainOwnerRectified(c, evm, addr) +} + // IsChainOwner checks if the user is a chain owner func (con ArbOwnerPublic) IsChainOwner(c ctx, evm mech, addr addr) (bool, error) { return c.State.ChainOwners().IsMember(addr) @@ -36,3 +47,8 @@ func (con ArbOwnerPublic) GetInfraFeeAccount(c ctx, evm mech) (addr, error) { } return c.State.InfraFeeAccount() } + +// GetBrotliCompressionLevel gets the current brotli compression level used for fast compression +func (con ArbOwnerPublic) GetBrotliCompressionLevel(c ctx, evm mech) (uint64, error) { + return c.State.BrotliCompressionLevel() +} diff --git a/precompiles/ArbOwner_test.go b/precompiles/ArbOwner_test.go index b5527e0017..ab128a8cb2 100644 --- a/precompiles/ArbOwner_test.go +++ b/precompiles/ArbOwner_test.go @@ -9,20 +9,17 @@ import ( "math/big" "testing" - "github.com/offchainlabs/nitro/arbos/l1pricing" - - "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/burn" - "github.com/offchainlabs/nitro/util/testhelpers" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/util/testhelpers" ) func TestArbOwner(t *testing.T) { @@ -99,7 +96,7 @@ func TestArbOwner(t *testing.T) { costCap, err := gasInfo.GetAmortizedCostCapBips(callCtx, evm) Require(t, err) - if costCap != math.MaxUint64 { + if costCap != 0 { Fail(t, costCap) } newCostCap := uint64(77734) diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 55dd1e3e4e..ded90ebdf7 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -366,7 +366,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr emitCost := gascost(args) cost := emitCost[0].Interface().(uint64) //nolint:errcheck if !emitCost[1].IsNil() { - // an error occured during gascost() + // an error occurred during gascost() return []reflect.Value{emitCost[1]} } if err := callerCtx.Burn(cost); err != nil { @@ -536,6 +536,8 @@ func Precompiles() map[addr]ArbosPrecompile { insert(MakePrecompile(templates.ArbosTestMetaData, &ArbosTest{Address: hex("69")})) ArbGasInfo := insert(MakePrecompile(templates.ArbGasInfoMetaData, &ArbGasInfo{Address: hex("6c")})) ArbGasInfo.methodsByName["GetL1FeesAvailable"].arbosVersion = 10 + ArbGasInfo.methodsByName["GetL1RewardRate"].arbosVersion = 11 + ArbGasInfo.methodsByName["GetL1RewardRecipient"].arbosVersion = 11 insert(MakePrecompile(templates.ArbAggregatorMetaData, &ArbAggregator{Address: hex("6d")})) insert(MakePrecompile(templates.ArbStatisticsMetaData, &ArbStatistics{Address: hex("6f")})) @@ -551,6 +553,8 @@ func Precompiles() map[addr]ArbosPrecompile { ArbOwnerPublic := insert(MakePrecompile(templates.ArbOwnerPublicMetaData, &ArbOwnerPublic{Address: hex("6b")})) ArbOwnerPublic.methodsByName["GetInfraFeeAccount"].arbosVersion = 5 + ArbOwnerPublic.methodsByName["RectifyChainOwner"].arbosVersion = 11 + ArbOwnerPublic.methodsByName["GetBrotliCompressionLevel"].arbosVersion = 12 ArbRetryableImpl := &ArbRetryableTx{Address: types.ArbRetryableTxAddress} ArbRetryable := insert(MakePrecompile(templates.ArbRetryableTxMetaData, ArbRetryableImpl)) @@ -586,6 +590,7 @@ func Precompiles() map[addr]ArbosPrecompile { ArbOwner.methodsByName["SetInfraFeeAccount"].arbosVersion = 5 ArbOwner.methodsByName["ReleaseL1PricerSurplusFunds"].arbosVersion = 10 ArbOwner.methodsByName["SetChainConfig"].arbosVersion = 11 + ArbOwner.methodsByName["SetBrotliCompressionLevel"].arbosVersion = 12 insert(ownerOnly(ArbOwnerImpl.Address, ArbOwner, emitOwnerActs)) insert(debugOnly(MakePrecompile(templates.ArbDebugMetaData, &ArbDebug{Address: hex("ff")}))) diff --git a/relay/relay.go b/relay/relay.go index b9d70c513b..4288902865 100644 --- a/relay/relay.go +++ b/relay/relay.go @@ -7,7 +7,6 @@ import ( "context" "errors" "net" - "time" flag "github.com/spf13/pflag" @@ -52,7 +51,7 @@ func NewRelay(config *Config, feedErrChan chan error) (*Relay, error) { clients, err := broadcastclients.NewBroadcastClients( func() *broadcastclient.Config { return &config.Node.Feed.Input }, - config.L2.ChainId, + config.Chain.ID, 0, &q, confirmedSequenceNumberListener, @@ -70,16 +69,13 @@ func NewRelay(config *Config, feedErrChan chan error) (*Relay, error) { return nil, errors.New("relay attempted to sign feed message") } return &Relay{ - broadcaster: broadcaster.NewBroadcaster(func() *wsbroadcastserver.BroadcasterConfig { return &config.Node.Feed.Output }, config.L2.ChainId, feedErrChan, dataSignerErr), + broadcaster: broadcaster.NewBroadcaster(func() *wsbroadcastserver.BroadcasterConfig { return &config.Node.Feed.Output }, config.Chain.ID, feedErrChan, dataSignerErr), broadcastClients: clients, confirmedSequenceNumberChan: confirmedSequenceNumberListener, messageChan: q.queue, }, nil } -const RECENT_FEED_ITEM_TTL = time.Second * 10 -const RECENT_FEED_INITIAL_MAP_SIZE = 1024 - func (r *Relay) Start(ctx context.Context) error { r.StopWaiter.Start(ctx, r) err := r.broadcaster.Initialize() @@ -93,35 +89,16 @@ func (r *Relay) Start(ctx context.Context) error { r.broadcastClients.Start(ctx) - var lastConfirmed arbutil.MessageIndex - recentFeedItemsNew := make(map[arbutil.MessageIndex]time.Time, RECENT_FEED_INITIAL_MAP_SIZE) - recentFeedItemsOld := make(map[arbutil.MessageIndex]time.Time, RECENT_FEED_INITIAL_MAP_SIZE) r.LaunchThread(func(ctx context.Context) { - recentFeedItemsCleanup := time.NewTicker(RECENT_FEED_ITEM_TTL) - defer recentFeedItemsCleanup.Stop() for { select { case <-ctx.Done(): return case msg := <-r.messageChan: - if _, ok := recentFeedItemsNew[msg.SequenceNumber]; ok { - continue - } - if _, ok := recentFeedItemsOld[msg.SequenceNumber]; ok { - continue - } - recentFeedItemsNew[msg.SequenceNumber] = time.Now() sharedmetrics.UpdateSequenceNumberGauge(msg.SequenceNumber) r.broadcaster.BroadcastSingleFeedMessage(&msg) case cs := <-r.confirmedSequenceNumberChan: - if lastConfirmed == cs { - continue - } r.broadcaster.Confirm(cs) - case <-recentFeedItemsCleanup.C: - // Cycle buckets to get rid of old entries - recentFeedItemsOld = recentFeedItemsNew - recentFeedItemsNew = make(map[arbutil.MessageIndex]time.Time, RECENT_FEED_INITIAL_MAP_SIZE) } } }) @@ -141,22 +118,26 @@ func (r *Relay) StopAndWait() { type Config struct { Conf genericconf.ConfConfig `koanf:"conf"` - L2 L2Config `koanf:"chain"` + Chain L2Config `koanf:"chain"` LogLevel int `koanf:"log-level"` LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` + PProf bool `koanf:"pprof"` + PprofCfg genericconf.PProf `koanf:"pprof-cfg"` Node NodeConfig `koanf:"node"` Queue int `koanf:"queue"` } var ConfigDefault = Config{ Conf: genericconf.ConfConfigDefault, - L2: L2ConfigDefault, + Chain: L2ConfigDefault, LogLevel: int(log.LvlInfo), LogType: "plaintext", Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, + PProf: false, + PprofCfg: genericconf.PProfDefault, Node: NodeConfigDefault, Queue: 1024, } @@ -168,8 +149,10 @@ func ConfigAddOptions(f *flag.FlagSet) { f.String("log-type", ConfigDefault.LogType, "log type") f.Bool("metrics", ConfigDefault.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) + f.Bool("pprof", ConfigDefault.PProf, "enable pprof") + genericconf.PProfAddOptions("pprof-cfg", f) NodeConfigAddOptions("node", f) - f.Int("queue", ConfigDefault.Queue, "size of relay queue") + f.Int("queue", ConfigDefault.Queue, "queue for incoming messages from sequencer") } type NodeConfig struct { @@ -185,15 +168,15 @@ func NodeConfigAddOptions(prefix string, f *flag.FlagSet) { } type L2Config struct { - ChainId uint64 `koanf:"id"` + ID uint64 `koanf:"id"` } var L2ConfigDefault = L2Config{ - ChainId: 0, + ID: 0, } func L2ConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".id", L2ConfigDefault.ChainId, "L2 chain ID") + f.Uint64(prefix+".id", L2ConfigDefault.ID, "L2 chain ID") } func ParseRelay(_ context.Context, args []string) (*Config, error) { diff --git a/solgen/gen.go b/solgen/gen.go index c29db93039..5d43946fa5 100644 --- a/solgen/gen.go +++ b/solgen/gen.go @@ -96,6 +96,27 @@ func main() { modInfo.addArtifact(artifact) } + // add upgrade executor module which is not compiled locally, but imported from 'nitro-contracts' depedencies + upgExecutorPath := filepath.Join(parent, "contracts", "node_modules", "@offchainlabs", "upgrade-executor", "build", "contracts", "src", "UpgradeExecutor.sol", "UpgradeExecutor.json") + _, err = os.Stat(upgExecutorPath) + if !os.IsNotExist(err) { + data, err := os.ReadFile(upgExecutorPath) + if err != nil { + // log.Fatal(string(output)) + log.Fatal("could not read", upgExecutorPath, "for contract", "UpgradeExecutor", err) + } + artifact := HardHatArtifact{} + if err := json.Unmarshal(data, &artifact); err != nil { + log.Fatal("failed to parse contract", "UpgradeExecutor", err) + } + modInfo := modules["upgrade_executorgen"] + if modInfo == nil { + modInfo = &moduleInfo{} + modules["upgrade_executorgen"] = modInfo + } + modInfo.addArtifact(artifact) + } + for module, info := range modules { code, err := bind.Bind( diff --git a/staker/block_validator.go b/staker/block_validator.go index cc5e793392..0966f3db6d 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -79,7 +79,7 @@ type BlockValidator struct { type BlockValidatorConfig struct { Enable bool `koanf:"enable"` ValidationServer rpcclient.ClientConfig `koanf:"validation-server" reload:"hot"` - ValidationPoll time.Duration `koanf:"check-validations-poll" reload:"hot"` + ValidationPoll time.Duration `koanf:"validation-poll" reload:"hot"` PrerecordedBlocks uint64 `koanf:"prerecorded-blocks" reload:"hot"` ForwardBlocks uint64 `koanf:"forward-blocks" reload:"hot"` CurrentModuleRoot string `koanf:"current-module-root"` // TODO(magic) requires reinitialization on hot reload @@ -101,7 +101,7 @@ type BlockValidatorConfigFetcher func() *BlockValidatorConfig func BlockValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultBlockValidatorConfig.Enable, "enable block-by-block validation") rpcclient.RPCClientAddOptions(prefix+".validation-server", f, &DefaultBlockValidatorConfig.ValidationServer) - f.Duration(prefix+".check-validations-poll", DefaultBlockValidatorConfig.ValidationPoll, "poll time to check validations") + f.Duration(prefix+".validation-poll", DefaultBlockValidatorConfig.ValidationPoll, "poll time to check validations") f.Uint64(prefix+".forward-blocks", DefaultBlockValidatorConfig.ForwardBlocks, "prepare entries for up to that many blocks ahead of validation (small footprint)") f.Uint64(prefix+".prerecorded-blocks", DefaultBlockValidatorConfig.PrerecordedBlocks, "record that many blocks ahead of validation (larger footprint)") f.String(prefix+".current-module-root", DefaultBlockValidatorConfig.CurrentModuleRoot, "current wasm module root ('current' read from chain, 'latest' from machines/latest dir, or provide hash)") @@ -584,7 +584,7 @@ func (v *BlockValidator) iterativeValidationPrint(ctx context.Context) time.Dura var batchMsgs arbutil.MessageIndex var printedCount int64 if validated.GlobalState.Batch > 0 { - batchMsgs, err = v.inboxTracker.GetBatchMessageCount(validated.GlobalState.Batch) + batchMsgs, err = v.inboxTracker.GetBatchMessageCount(validated.GlobalState.Batch - 1) } if err != nil { printedCount = -1 @@ -737,7 +737,7 @@ func (v *BlockValidator) iterativeValidationProgress(ctx context.Context, ignore } else if reorg != nil { err := v.Reorg(ctx, *reorg) if err != nil { - log.Error("error trying to rorg validation", "pos", *reorg-1, "err", err) + log.Error("error trying to reorg validation", "pos", *reorg-1, "err", err) v.possiblyFatal(err) } } @@ -795,7 +795,7 @@ func (v *BlockValidator) InitAssumeValid(globalState validator.GoGlobalState) er v.legacyValidInfo = nil - err := v.writeLastValidated(v.lastValidGS, nil) + err := v.writeLastValidated(globalState, nil) if err != nil { log.Error("failed writing new validated to database", "pos", v.lastValidGS, "err", err) } @@ -855,7 +855,7 @@ func (v *BlockValidator) UpdateLatestStaked(count arbutil.MessageIndex, globalSt v.validatedA = countUint64 v.valLoopPos = count validatorMsgCountValidatedGauge.Update(int64(countUint64)) - err = v.writeLastValidated(v.lastValidGS, nil) // we don't know which wasm roots were validated + err = v.writeLastValidated(globalState, nil) // we don't know which wasm roots were validated if err != nil { log.Error("failed writing valid state after reorg", "err", err) } @@ -919,7 +919,7 @@ func (v *BlockValidator) Reorg(ctx context.Context, count arbutil.MessageIndex) if v.validatedA > countUint64 { v.validatedA = countUint64 validatorMsgCountValidatedGauge.Update(int64(countUint64)) - err := v.writeLastValidated(v.lastValidGS, nil) // we don't know which wasm roots were validated + err := v.writeLastValidated(v.nextCreateStartGS, nil) // we don't know which wasm roots were validated if err != nil { log.Error("failed writing valid state after reorg", "err", err) } @@ -971,9 +971,12 @@ func (v *BlockValidator) checkLegacyValid() error { log.Warn("legacy valid batch ahead of db", "current", batchCount, "required", requiredBatchCount) return nil } - msgCount, err := v.inboxTracker.GetBatchMessageCount(v.legacyValidInfo.AfterPosition.BatchNumber) - if err != nil { - return err + var msgCount arbutil.MessageIndex + if v.legacyValidInfo.AfterPosition.BatchNumber > 0 { + msgCount, err = v.inboxTracker.GetBatchMessageCount(v.legacyValidInfo.AfterPosition.BatchNumber - 1) + if err != nil { + return err + } } msgCount += arbutil.MessageIndex(v.legacyValidInfo.AfterPosition.PosInBatch) processedCount, err := v.streamer.GetProcessedMessageCount() diff --git a/staker/eoa_validator_wallet.go b/staker/eoa_validator_wallet.go deleted file mode 100644 index f7b83aac9f..0000000000 --- a/staker/eoa_validator_wallet.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package staker - -import ( - "context" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/solgen/go/challengegen" - "github.com/offchainlabs/nitro/solgen/go/rollupgen" -) - -type EoaValidatorWallet struct { - auth *bind.TransactOpts - client arbutil.L1Interface - rollupAddress common.Address - challengeManager *challengegen.ChallengeManager - challengeManagerAddress common.Address -} - -var _ ValidatorWalletInterface = (*EoaValidatorWallet)(nil) - -func NewEoaValidatorWallet(rollupAddress common.Address, l1Client arbutil.L1Interface, auth *bind.TransactOpts) (*EoaValidatorWallet, error) { - return &EoaValidatorWallet{ - auth: auth, - client: l1Client, - rollupAddress: rollupAddress, - }, nil -} - -func (w *EoaValidatorWallet) Initialize(ctx context.Context) error { - rollup, err := rollupgen.NewRollupUserLogic(w.rollupAddress, w.client) - if err != nil { - return err - } - callOpts := &bind.CallOpts{Context: ctx} - w.challengeManagerAddress, err = rollup.ChallengeManager(callOpts) - if err != nil { - return err - } - w.challengeManager, err = challengegen.NewChallengeManager(w.challengeManagerAddress, w.client) - return err -} - -func (w *EoaValidatorWallet) Address() *common.Address { - return &w.auth.From -} - -func (w *EoaValidatorWallet) AddressOrZero() common.Address { - return w.auth.From -} - -func (w *EoaValidatorWallet) TxSenderAddress() *common.Address { - return &w.auth.From -} - -func (w *EoaValidatorWallet) L1Client() arbutil.L1Interface { - return w.client -} - -func (w *EoaValidatorWallet) RollupAddress() common.Address { - return w.rollupAddress -} - -func (w *EoaValidatorWallet) ChallengeManagerAddress() common.Address { - return w.challengeManagerAddress -} - -func (w *EoaValidatorWallet) TestTransactions(context.Context, []*types.Transaction) error { - // We only use the first tx which is checked implicitly by gas estimation - return nil -} - -func (w *EoaValidatorWallet) ExecuteTransactions(ctx context.Context, builder *ValidatorTxBuilder, _ common.Address) (*types.Transaction, error) { - if len(builder.transactions) == 0 { - return nil, nil - } - tx := builder.transactions[0] // we ignore future txs and only execute the first - err := w.client.SendTransaction(ctx, tx) - return tx, err -} - -func (w *EoaValidatorWallet) TimeoutChallenges(ctx context.Context, timeouts []uint64) (*types.Transaction, error) { - if len(timeouts) == 0 { - return nil, nil - } - auth := *w.auth - auth.Context = ctx - return w.challengeManager.Timeout(&auth, timeouts[0]) -} - -func (w *EoaValidatorWallet) CanBatchTxs() bool { - return false -} - -func (w *EoaValidatorWallet) AuthIfEoa() *bind.TransactOpts { - return w.auth -} diff --git a/staker/execution_reverted_test.go b/staker/execution_reverted_test.go new file mode 100644 index 0000000000..98a3bdfd61 --- /dev/null +++ b/staker/execution_reverted_test.go @@ -0,0 +1,26 @@ +package staker + +import ( + "io" + "testing" +) + +func TestExecutionRevertedRegexp(t *testing.T) { + executionRevertedErrors := []string{ + // go-ethereum and most other execution clients return "execution reverted" + "execution reverted", + // execution clients may decode the EVM revert data as a string and include it in the error + "execution reverted: FOO", + // besu returns "Execution reverted" + "Execution reverted", + } + for _, errString := range executionRevertedErrors { + if !executionRevertedRegexp.MatchString(errString) { + t.Fatalf("execution reverted regexp didn't match %q", errString) + } + } + // This regexp should not match random IO errors + if executionRevertedRegexp.MatchString(io.ErrUnexpectedEOF.Error()) { + t.Fatal("execution reverted regexp matched unexpected EOF") + } +} diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 21e8c8c4aa..ee24fc49d5 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -11,6 +11,8 @@ import ( "time" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/staker/txbuilder" + "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/validator" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -44,7 +46,7 @@ type L1Validator struct { rollupAddress common.Address validatorUtils *rollupgen.ValidatorUtils client arbutil.L1Interface - builder *ValidatorTxBuilder + builder *txbuilder.Builder wallet ValidatorWalletInterface callOpts bind.CallOpts @@ -65,7 +67,7 @@ func NewL1Validator( txStreamer TransactionStreamerInterface, blockValidator *BlockValidator, ) (*L1Validator, error) { - builder, err := NewValidatorTxBuilder(wallet) + builder, err := txbuilder.NewBuilder(wallet) if err != nil { return nil, err } @@ -221,15 +223,28 @@ type OurStakerInfo struct { *StakerInfo } -func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurStakerInfo, strategy StakerStrategy, makeAssertionInterval time.Duration) (nodeAction, bool, error) { - startState, prevInboxMaxCount, startStateProposedL1, startStateProposedParentChain, err := lookupNodeStartState(ctx, v.rollup, stakerInfo.LatestStakedNode, stakerInfo.LatestStakedNodeHash) +func (v *L1Validator) generateNodeAction( + ctx context.Context, + stakerInfo *OurStakerInfo, + strategy StakerStrategy, + stakerConfig *L1ValidatorConfig, +) (nodeAction, bool, error) { + startState, prevInboxMaxCount, startStateProposedL1, startStateProposedParentChain, err := lookupNodeStartState( + ctx, v.rollup, stakerInfo.LatestStakedNode, stakerInfo.LatestStakedNodeHash, + ) if err != nil { - return nil, false, fmt.Errorf("error looking up node %v (hash %v) start state: %w", stakerInfo.LatestStakedNode, stakerInfo.LatestStakedNodeHash, err) + return nil, false, fmt.Errorf( + "error looking up node %v (hash %v) start state: %w", + stakerInfo.LatestStakedNode, stakerInfo.LatestStakedNodeHash, err, + ) } - startStateProposedHeader, err := v.client.HeaderByNumber(ctx, new(big.Int).SetUint64(startStateProposedParentChain)) + startStateProposedHeader, err := v.client.HeaderByNumber(ctx, arbmath.UintToBig(startStateProposedParentChain)) if err != nil { - return nil, false, fmt.Errorf("error looking up L1 header of block %v of node start state: %w", startStateProposedParentChain, err) + return nil, false, fmt.Errorf( + "error looking up L1 header of block %v of node start state: %w", + startStateProposedParentChain, err, + ) } startStateProposedTime := time.Unix(int64(startStateProposedHeader.Time), 0) @@ -241,7 +256,10 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta return nil, false, fmt.Errorf("error getting batch count from inbox tracker: %w", err) } if localBatchCount < startState.RequiredBatches() || localBatchCount == 0 { - log.Info("catching up to chain batches", "localBatches", localBatchCount, "target", startState.RequiredBatches()) + log.Info( + "catching up to chain batches", "localBatches", localBatchCount, + "target", startState.RequiredBatches(), + ) return nil, false, nil } @@ -275,7 +293,9 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta return nil, false, err } validatedGlobalState = valInfo.GlobalState - caughtUp, validatedCount, err = GlobalStateToMsgCount(v.inboxTracker, v.txStreamer, valInfo.GlobalState) + caughtUp, validatedCount, err = GlobalStateToMsgCount( + v.inboxTracker, v.txStreamer, valInfo.GlobalState, + ) if err != nil { return nil, false, fmt.Errorf("%w: not found validated block in blockchain", err) } @@ -294,7 +314,13 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta } } if !wasmRootValid { - return nil, false, fmt.Errorf("wasmroot doesn't match rollup : %v, valid: %v", v.lastWasmModuleRoot, valInfo.WasmRoots) + if !stakerConfig.Dangerous.IgnoreRollupWasmModuleRoot { + return nil, false, fmt.Errorf( + "wasmroot doesn't match rollup : %v, valid: %v", + v.lastWasmModuleRoot, valInfo.WasmRoots, + ) + } + log.Warn("wasmroot doesn't match rollup", "rollup", v.lastWasmModuleRoot, "blockValidator", valInfo.WasmRoots) } } else { validatedCount, err = v.txStreamer.GetProcessedMessageCount() @@ -310,7 +336,7 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta batchNum = localBatchCount - 1 validatedCount = messageCount } else { - batchNum, err = v.inboxTracker.FindL1BatchForMessage(validatedCount - 1) + batchNum, err = v.inboxTracker.FindInboxBatchContainingMessage(validatedCount - 1) if err != nil { return nil, false, err } @@ -417,6 +443,7 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta return correctNode, wrongNodesExist, nil } + makeAssertionInterval := stakerConfig.MakeAssertionInterval if wrongNodesExist || (strategy >= MakeNodesStrategy && time.Since(startStateProposedTime) >= makeAssertionInterval) { // There's no correct node; create one. var lastNodeHashIfExists *common.Hash diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index aaaa30ce32..59a23d891d 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -9,6 +9,8 @@ import ( "errors" "fmt" "math/big" + "regexp" + "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -43,10 +45,11 @@ type StakerInfo struct { type RollupWatcher struct { *rollupgen.RollupUserLogic - address common.Address - fromBlock *big.Int - client arbutil.L1Interface - baseCallOpts bind.CallOpts + address common.Address + fromBlock *big.Int + client arbutil.L1Interface + baseCallOpts bind.CallOpts + unSupportedL3Method atomic.Bool } func NewRollupWatcher(address common.Address, client arbutil.L1Interface, callOpts bind.CallOpts) (*RollupWatcher, error) { @@ -69,17 +72,28 @@ func (r *RollupWatcher) getCallOpts(ctx context.Context) *bind.CallOpts { return &opts } +// A regexp matching "execution reverted" errors returned from the parent chain RPC. +var executionRevertedRegexp = regexp.MustCompile("(?i)execution reverted") + func (r *RollupWatcher) getNodeCreationBlock(ctx context.Context, nodeNum uint64) (*big.Int, error) { callOpts := r.getCallOpts(ctx) - createdAtBlock, err := r.GetNodeCreationBlockForLogLookup(callOpts, nodeNum) - if err != nil { + if !r.unSupportedL3Method.Load() { + createdAtBlock, err := r.GetNodeCreationBlockForLogLookup(callOpts, nodeNum) + if err == nil { + return createdAtBlock, nil + } log.Trace("failed to call getNodeCreationBlockForLogLookup, falling back on node CreatedAtBlock field", "err", err) - node, err := r.GetNode(callOpts, nodeNum) - if err != nil { + if executionRevertedRegexp.MatchString(err.Error()) { + r.unSupportedL3Method.Store(true) + } else { return nil, err } - createdAtBlock = new(big.Int).SetUint64(node.CreatedAtBlock) } + node, err := r.GetNode(callOpts, nodeNum) + if err != nil { + return nil, err + } + createdAtBlock := new(big.Int).SetUint64(node.CreatedAtBlock) return createdAtBlock, nil } diff --git a/staker/staker.go b/staker/staker.go index 61267b6a26..a34e955d81 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -17,10 +17,14 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/rpc" flag "github.com/spf13/pflag" + "github.com/offchainlabs/nitro/arbnode/dataposter" + "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/offchainlabs/nitro/validator" @@ -30,6 +34,7 @@ var ( stakerBalanceGauge = metrics.NewRegisteredGaugeFloat64("arb/staker/balance", nil) stakerAmountStakedGauge = metrics.NewRegisteredGauge("arb/staker/amount_staked", nil) stakerLatestStakedNodeGauge = metrics.NewRegisteredGauge("arb/staker/staked_node", nil) + stakerLatestConfirmedNodeGauge = metrics.NewRegisteredGauge("arb/staker/confirmed_node", nil) stakerLastSuccessfulActionGauge = metrics.NewRegisteredGauge("arb/staker/action/last_success", nil) stakerActionSuccessCounter = metrics.NewRegisteredCounter("arb/staker/action/success", nil) stakerActionFailureCounter = metrics.NewRegisteredCounter("arb/staker/action/failure", nil) @@ -67,20 +72,24 @@ func L1PostingStrategyAddOptions(prefix string, f *flag.FlagSet) { } type L1ValidatorConfig struct { - Enable bool `koanf:"enable"` - Strategy string `koanf:"strategy"` - StakerInterval time.Duration `koanf:"staker-interval"` - MakeAssertionInterval time.Duration `koanf:"make-assertion-interval"` - L1PostingStrategy L1PostingStrategy `koanf:"posting-strategy"` - DisableChallenge bool `koanf:"disable-challenge"` - ConfirmationBlocks int64 `koanf:"confirmation-blocks"` - UseSmartContractWallet bool `koanf:"use-smart-contract-wallet"` - OnlyCreateWalletContract bool `koanf:"only-create-wallet-contract"` - StartFromStaked bool `koanf:"start-validation-from-staked"` - ContractWalletAddress string `koanf:"contract-wallet-address"` - GasRefunderAddress string `koanf:"gas-refunder-address"` - Dangerous DangerousConfig `koanf:"dangerous"` - L1Wallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` + Enable bool `koanf:"enable"` + Strategy string `koanf:"strategy"` + StakerInterval time.Duration `koanf:"staker-interval"` + MakeAssertionInterval time.Duration `koanf:"make-assertion-interval"` + PostingStrategy L1PostingStrategy `koanf:"posting-strategy"` + DisableChallenge bool `koanf:"disable-challenge"` + ConfirmationBlocks int64 `koanf:"confirmation-blocks"` + UseSmartContractWallet bool `koanf:"use-smart-contract-wallet"` + OnlyCreateWalletContract bool `koanf:"only-create-wallet-contract"` + StartValidationFromStaked bool `koanf:"start-validation-from-staked"` + ContractWalletAddress string `koanf:"contract-wallet-address"` + GasRefunderAddress string `koanf:"gas-refunder-address"` + DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` + RedisUrl string `koanf:"redis-url"` + RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` + ExtraGas uint64 `koanf:"extra-gas" reload:"hot"` + Dangerous DangerousConfig `koanf:"dangerous"` + ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` strategy StakerStrategy gasRefunder common.Address @@ -130,25 +139,50 @@ func (c *L1ValidatorConfig) Validate() error { } var DefaultL1ValidatorConfig = L1ValidatorConfig{ - Enable: true, - Strategy: "Watchtower", - StakerInterval: time.Minute, - MakeAssertionInterval: time.Hour, - L1PostingStrategy: L1PostingStrategy{}, - DisableChallenge: false, - ConfirmationBlocks: 12, - UseSmartContractWallet: false, - OnlyCreateWalletContract: false, - StartFromStaked: true, - ContractWalletAddress: "", - GasRefunderAddress: "", - Dangerous: DefaultDangerousConfig, - L1Wallet: DefaultValidatorL1WalletConfig, + Enable: true, + Strategy: "Watchtower", + StakerInterval: time.Minute, + MakeAssertionInterval: time.Hour, + PostingStrategy: L1PostingStrategy{}, + DisableChallenge: false, + ConfirmationBlocks: 12, + UseSmartContractWallet: false, + OnlyCreateWalletContract: false, + StartValidationFromStaked: true, + ContractWalletAddress: "", + GasRefunderAddress: "", + DataPoster: dataposter.DefaultDataPosterConfigForValidator, + RedisUrl: "", + RedisLock: redislock.DefaultCfg, + ExtraGas: 50000, + Dangerous: DefaultDangerousConfig, + ParentChainWallet: DefaultValidatorL1WalletConfig, +} + +var TestL1ValidatorConfig = L1ValidatorConfig{ + Enable: true, + Strategy: "Watchtower", + StakerInterval: time.Millisecond * 10, + MakeAssertionInterval: 0, + PostingStrategy: L1PostingStrategy{}, + DisableChallenge: false, + ConfirmationBlocks: 0, + UseSmartContractWallet: false, + OnlyCreateWalletContract: false, + StartValidationFromStaked: true, + ContractWalletAddress: "", + GasRefunderAddress: "", + DataPoster: dataposter.TestDataPosterConfigForValidator, + RedisUrl: "", + RedisLock: redislock.DefaultCfg, + ExtraGas: 50000, + Dangerous: DefaultDangerousConfig, + ParentChainWallet: DefaultValidatorL1WalletConfig, } var DefaultValidatorL1WalletConfig = genericconf.WalletConfig{ Pathname: "validator-wallet", - PasswordImpl: genericconf.WalletConfigDefault.PasswordImpl, + Password: genericconf.WalletConfigDefault.Password, PrivateKey: genericconf.WalletConfigDefault.PrivateKey, Account: genericconf.WalletConfigDefault.Account, OnlyCreateKey: genericconf.WalletConfigDefault.OnlyCreateKey, @@ -164,22 +198,29 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int64(prefix+".confirmation-blocks", DefaultL1ValidatorConfig.ConfirmationBlocks, "confirmation blocks") f.Bool(prefix+".use-smart-contract-wallet", DefaultL1ValidatorConfig.UseSmartContractWallet, "use a smart contract wallet instead of an EOA address") f.Bool(prefix+".only-create-wallet-contract", DefaultL1ValidatorConfig.OnlyCreateWalletContract, "only create smart wallet contract and exit") - f.Bool(prefix+".start-validation-from-staked", DefaultL1ValidatorConfig.StartFromStaked, "assume staked nodes are valid") + f.Bool(prefix+".start-validation-from-staked", DefaultL1ValidatorConfig.StartValidationFromStaked, "assume staked nodes are valid") f.String(prefix+".contract-wallet-address", DefaultL1ValidatorConfig.ContractWalletAddress, "validator smart contract wallet public address") f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") + f.String(prefix+".redis-url", DefaultL1ValidatorConfig.RedisUrl, "redis url for L1 validator") + f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") + dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfigForValidator) + redislock.AddConfigOptions(prefix+".redis-lock", f) DangerousConfigAddOptions(prefix+".dangerous", f) - genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.L1Wallet.Pathname) + genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) } type DangerousConfig struct { - WithoutBlockValidator bool `koanf:"without-block-validator"` + IgnoreRollupWasmModuleRoot bool `koanf:"ignore-rollup-wasm-module-root"` + WithoutBlockValidator bool `koanf:"without-block-validator"` } var DefaultDangerousConfig = DangerousConfig{ - WithoutBlockValidator: false, + IgnoreRollupWasmModuleRoot: false, + WithoutBlockValidator: false, } func DangerousConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".ignore-rollup-wasm-module-root", DefaultL1ValidatorConfig.Dangerous.IgnoreRollupWasmModuleRoot, "DANGEROUS! make assertions even when the wasm module root is wrong") f.Bool(prefix+".without-block-validator", DefaultL1ValidatorConfig.Dangerous.WithoutBlockValidator, "DANGEROUS! allows running an L1 validator without a block validator") } @@ -192,11 +233,16 @@ type LatestStakedNotifier interface { UpdateLatestStaked(count arbutil.MessageIndex, globalState validator.GoGlobalState) } +type LatestConfirmedNotifier interface { + UpdateLatestConfirmed(count arbutil.MessageIndex, globalState validator.GoGlobalState) +} + type Staker struct { *L1Validator stopwaiter.StopWaiter l1Reader L1ReaderInterface - notifiers []LatestStakedNotifier + stakedNotifiers []LatestStakedNotifier + confirmedNotifiers []LatestConfirmedNotifier activeChallenge *ChallengeManager baseCallOpts bind.CallOpts config L1ValidatorConfig @@ -209,6 +255,27 @@ type Staker struct { fatalErr chan<- error } +type ValidatorWalletInterface interface { + Initialize(context.Context) error + // Address must be able to be called concurrently with other functions + Address() *common.Address + // Address must be able to be called concurrently with other functions + AddressOrZero() common.Address + TxSenderAddress() *common.Address + RollupAddress() common.Address + ChallengeManagerAddress() common.Address + L1Client() arbutil.L1Interface + TestTransactions(context.Context, []*types.Transaction) error + ExecuteTransactions(context.Context, *txbuilder.Builder, common.Address) (*types.Transaction, error) + TimeoutChallenges(context.Context, []uint64) (*types.Transaction, error) + CanBatchTxs() bool + AuthIfEoa() *bind.TransactOpts + Start(context.Context) + StopAndWait() + // May be nil + DataPoster() *dataposter.DataPoster +} + func NewStaker( l1Reader L1ReaderInterface, wallet ValidatorWalletInterface, @@ -216,7 +283,8 @@ func NewStaker( config L1ValidatorConfig, blockValidator *BlockValidator, statelessBlockValidator *StatelessBlockValidator, - notifiers []LatestStakedNotifier, + stakedNotifiers []LatestStakedNotifier, + confirmedNotifiers []LatestConfirmedNotifier, validatorUtilsAddress common.Address, fatalErr chan<- error, ) (*Staker, error) { @@ -231,16 +299,17 @@ func NewStaker( return nil, err } stakerLastSuccessfulActionGauge.Update(time.Now().Unix()) - if config.StartFromStaked { - notifiers = append(notifiers, blockValidator) + if config.StartValidationFromStaked && blockValidator != nil { + stakedNotifiers = append(stakedNotifiers, blockValidator) } return &Staker{ L1Validator: val, l1Reader: l1Reader, - notifiers: notifiers, + stakedNotifiers: stakedNotifiers, + confirmedNotifiers: confirmedNotifiers, baseCallOpts: callOpts, config: config, - highGasBlocksBuffer: big.NewInt(config.L1PostingStrategy.HighGasDelayBlocks), + highGasBlocksBuffer: big.NewInt(config.PostingStrategy.HighGasDelayBlocks), lastActCalledBlock: nil, inboxReader: statelessBlockValidator.inboxReader, statelessBlockValidator: statelessBlockValidator, @@ -257,7 +326,7 @@ func (s *Staker) Initialize(ctx context.Context) error { if walletAddressOrZero != (common.Address{}) { s.updateStakerBalanceMetric(ctx) } - if s.blockValidator != nil && s.config.StartFromStaked { + if s.blockValidator != nil && s.config.StartValidationFromStaked { latestStaked, _, err := s.validatorUtils.LatestStaked(&s.baseCallOpts, s.rollupAddress, walletAddressOrZero) if err != nil { return err @@ -277,53 +346,63 @@ func (s *Staker) Initialize(ctx context.Context) error { return nil } -func (s *Staker) checkLatestStaked(ctx context.Context) error { - latestStaked, _, err := s.validatorUtils.LatestStaked(&s.baseCallOpts, s.rollupAddress, s.wallet.AddressOrZero()) +func (s *Staker) getLatestStakedState(ctx context.Context, staker common.Address) (uint64, arbutil.MessageIndex, *validator.GoGlobalState, error) { + callOpts := s.getCallOpts(ctx) + if s.l1Reader.UseFinalityData() { + callOpts.BlockNumber = big.NewInt(int64(rpc.FinalizedBlockNumber)) + } + latestStaked, _, err := s.validatorUtils.LatestStaked(s.getCallOpts(ctx), s.rollupAddress, staker) if err != nil { - return fmt.Errorf("couldn't get LatestStaked: %w", err) + return 0, 0, nil, fmt.Errorf("couldn't get LatestStaked(%v): %w", staker, err) } - stakerLatestStakedNodeGauge.Update(int64(latestStaked)) if latestStaked == 0 { - return nil + return latestStaked, 0, nil, nil } stakedInfo, err := s.rollup.LookupNode(ctx, latestStaked) if err != nil { - return fmt.Errorf("couldn't look up latest node: %w", err) + return 0, 0, nil, fmt.Errorf("couldn't look up latest assertion of %v (%v): %w", staker, latestStaked, err) } - stakedGlobalState := stakedInfo.AfterState().GlobalState - caughtUp, count, err := GlobalStateToMsgCount(s.inboxTracker, s.txStreamer, stakedGlobalState) + globalState := stakedInfo.AfterState().GlobalState + caughtUp, count, err := GlobalStateToMsgCount(s.inboxTracker, s.txStreamer, globalState) if err != nil { if errors.Is(err, ErrGlobalStateNotInChain) && s.fatalErr != nil { - fatal := fmt.Errorf("latest staked not in chain: %w", err) + fatal := fmt.Errorf("latest assertion of %v (%v) not in chain: %w", staker, latestStaked, err) s.fatalErr <- fatal } - return fmt.Errorf("staker: latest staked %w", err) + return 0, 0, nil, fmt.Errorf("latest assertion of %v (%v): %w", staker, latestStaked, err) } if !caughtUp { - log.Info("latest valid not yet in our node", "staked", stakedGlobalState) - return nil + log.Info("latest assertion not yet in our node", "staker", staker, "assertion", latestStaked, "state", globalState) + return latestStaked, 0, nil, nil } processedCount, err := s.txStreamer.GetProcessedMessageCount() if err != nil { - return err + return 0, 0, nil, err } if processedCount < count { - log.Info("execution catching up to last validated", "validatedCount", count, "processedCount", processedCount) - return nil + log.Info("execution catching up to rollup", "staker", staker, "rollupCount", count, "processedCount", processedCount) + return latestStaked, 0, nil, nil } - for _, notifier := range s.notifiers { - notifier.UpdateLatestStaked(count, stakedGlobalState) + return latestStaked, count, &globalState, nil +} + +func (s *Staker) StopAndWait() { + s.StopWaiter.StopAndWait() + if s.Strategy() != WatchtowerStrategy { + s.wallet.StopAndWait() } - return nil } func (s *Staker) Start(ctxIn context.Context) { + if s.Strategy() != WatchtowerStrategy { + s.wallet.Start(ctxIn) + } s.StopWaiter.Start(ctxIn, s) backoff := time.Second s.CallIteratively(func(ctx context.Context) (returningWait time.Duration) { @@ -378,10 +457,32 @@ func (s *Staker) Start(ctxIn context.Context) { return backoff }) s.CallIteratively(func(ctx context.Context) time.Duration { - err := s.checkLatestStaked(ctx) + wallet := s.wallet.AddressOrZero() + staked, stakedMsgCount, stakedGlobalState, err := s.getLatestStakedState(ctx, wallet) if err != nil && ctx.Err() == nil { log.Error("staker: error checking latest staked", "err", err) } + stakerLatestStakedNodeGauge.Update(int64(staked)) + if stakedGlobalState != nil { + for _, notifier := range s.stakedNotifiers { + notifier.UpdateLatestStaked(stakedMsgCount, *stakedGlobalState) + } + } + confirmed := staked + confirmedMsgCount := stakedMsgCount + confirmedGlobalState := stakedGlobalState + if wallet != (common.Address{}) { + confirmed, confirmedMsgCount, confirmedGlobalState, err = s.getLatestStakedState(ctx, common.Address{}) + if err != nil && ctx.Err() == nil { + log.Error("staker: error checking latest confirmed", "err", err) + } + } + stakerLatestConfirmedNodeGauge.Update(int64(confirmed)) + if confirmedGlobalState != nil { + for _, notifier := range s.confirmedNotifiers { + notifier.UpdateLatestConfirmed(confirmedMsgCount, *confirmedGlobalState) + } + } return s.config.StakerInterval }) } @@ -410,7 +511,7 @@ func (s *Staker) shouldAct(ctx context.Context) bool { log.Warn("error getting gas price", "err", err) } else { gasPriceFloat = float64(gasPrice.Int64()) / 1e9 - if gasPriceFloat >= s.config.L1PostingStrategy.HighGasThreshold { + if gasPriceFloat >= s.config.PostingStrategy.HighGasThreshold { gasPriceHigh = true } } @@ -435,14 +536,14 @@ func (s *Staker) shouldAct(ctx context.Context) bool { // Clamp `s.highGasBlocksBuffer` to between 0 and HighGasDelayBlocks if s.highGasBlocksBuffer.Sign() < 0 { s.highGasBlocksBuffer.SetInt64(0) - } else if s.highGasBlocksBuffer.Cmp(big.NewInt(s.config.L1PostingStrategy.HighGasDelayBlocks)) > 0 { - s.highGasBlocksBuffer.SetInt64(s.config.L1PostingStrategy.HighGasDelayBlocks) + } else if s.highGasBlocksBuffer.Cmp(big.NewInt(s.config.PostingStrategy.HighGasDelayBlocks)) > 0 { + s.highGasBlocksBuffer.SetInt64(s.config.PostingStrategy.HighGasDelayBlocks) } if gasPriceHigh && s.highGasBlocksBuffer.Sign() > 0 { log.Warn( "not acting yet as gas price is high", "gasPrice", gasPriceFloat, - "highGasPriceConfig", s.config.L1PostingStrategy.HighGasThreshold, + "highGasPriceConfig", s.config.PostingStrategy.HighGasThreshold, "highGasBuffer", s.highGasBlocksBuffer, ) return false @@ -450,8 +551,34 @@ func (s *Staker) shouldAct(ctx context.Context) bool { return true } +func (s *Staker) confirmDataPosterIsReady(ctx context.Context) error { + dp := s.wallet.DataPoster() + if dp == nil { + return nil + } + dataPosterNonce, _, err := dp.GetNextNonceAndMeta(ctx) + if err != nil { + return err + } + latestNonce, err := s.l1Reader.Client().NonceAt(ctx, dp.Sender(), nil) + if err != nil { + return err + } + if dataPosterNonce > latestNonce { + return fmt.Errorf("data poster nonce %v is ahead of on-chain nonce %v -- probably waiting for a pending transaction to be included in a block", dataPosterNonce, latestNonce) + } + if dataPosterNonce < latestNonce { + return fmt.Errorf("data poster nonce %v is behind on-chain nonce %v -- is something else making transactions on this address?", dataPosterNonce, latestNonce) + } + return nil +} + func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { if s.config.strategy != WatchtowerStrategy { + err := s.confirmDataPosterIsReady(ctx) + if err != nil { + return nil, err + } whitelisted, err := s.IsWhitelisted(ctx) if err != nil { return nil, fmt.Errorf("error checking if whitelisted: %w", err) @@ -672,8 +799,8 @@ func (s *Staker) handleConflict(ctx context.Context, info *StakerInfo) error { newChallengeManager, err := NewChallengeManager( ctx, s.builder, - s.builder.builderAuth, - *s.builder.wallet.Address(), + s.builder.BuilderAuth(), + *s.builder.WalletAddress(), s.wallet.ChallengeManagerAddress(), *info.CurrentChallenge, s.statelessBlockValidator, @@ -693,7 +820,7 @@ func (s *Staker) handleConflict(ctx context.Context, info *StakerInfo) error { func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiveStrategy StakerStrategy) error { active := effectiveStrategy >= StakeLatestStrategy - action, wrongNodesExist, err := s.generateNodeAction(ctx, info, effectiveStrategy, s.config.MakeAssertionInterval) + action, wrongNodesExist, err := s.generateNodeAction(ctx, info, effectiveStrategy, &s.config) if err != nil { return fmt.Errorf("error generating node action: %w", err) } diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index e297e92386..6bc16085b0 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "regexp" "sync" "testing" @@ -56,7 +57,7 @@ type InboxTrackerInterface interface { GetBatchMessageCount(seqNum uint64) (arbutil.MessageIndex, error) GetBatchAcc(seqNum uint64) (common.Hash, error) GetBatchCount() (uint64, error) - FindL1BatchForMessage(pos arbutil.MessageIndex) (uint64, error) + FindInboxBatchContainingMessage(pos arbutil.MessageIndex) (uint64, error) } type TransactionStreamerInterface interface { @@ -76,6 +77,7 @@ type L1ReaderInterface interface { Client() arbutil.L1Interface Subscribe(bool) (<-chan *types.Header, func()) WaitForTxApproval(tx *types.Transaction) containers.PromiseInterface[*types.Receipt] + UseFinalityData() bool } type GlobalStatePosition struct { @@ -136,7 +138,7 @@ type validationEntry struct { // Has batch when created - others could be added on record BatchInfo []validator.BatchInfo // Valid since Ready - Preimages map[common.Hash][]byte + Preimages map[arbutil.PreimageType]map[common.Hash][]byte DelayedMsg []byte } @@ -229,6 +231,7 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * if e.Stage != ReadyForRecord { return fmt.Errorf("validation entry should be ReadyForRecord, is: %v", e.Stage) } + e.Preimages = make(map[arbutil.PreimageType]map[common.Hash][]byte) if e.Pos != 0 { recording, err := v.recorder.RecordBlockCreation(e.Pos, e.msg).Await(ctx) if err != nil { @@ -240,7 +243,7 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * e.BatchInfo = append(e.BatchInfo, recording.BatchInfo...) if recording.Preimages != nil { - e.Preimages = recording.Preimages + e.Preimages[arbutil.Keccak256PreimageType] = recording.Preimages } } if e.HasDelayedMsg { @@ -254,9 +257,6 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * } e.DelayedMsg = delayedMsg } - if e.Preimages == nil { - e.Preimages = make(map[common.Hash][]byte) - } for _, batch := range e.BatchInfo { if len(batch.Data) <= 40 { continue @@ -298,7 +298,7 @@ func (v *StatelessBlockValidator) GlobalStatePositionsAtCount(count arbutil.Mess if count == 1 { return GlobalStatePosition{}, GlobalStatePosition{1, 0}, nil } - batch, err := v.inboxTracker.FindL1BatchForMessage(count - 1) + batch, err := v.inboxTracker.FindInboxBatchContainingMessage(count - 1) if err != nil { return GlobalStatePosition{}, GlobalStatePosition{}, err } @@ -409,8 +409,9 @@ func (v *StatelessBlockValidator) Start(ctx_in context.Context) error { } v.pendingWasmModuleRoot = latest } else { + valid, _ := regexp.MatchString("(0x)?[0-9a-fA-F]{64}", v.config.PendingUpgradeModuleRoot) v.pendingWasmModuleRoot = common.HexToHash(v.config.PendingUpgradeModuleRoot) - if (v.pendingWasmModuleRoot == common.Hash{}) { + if (!valid || v.pendingWasmModuleRoot == common.Hash{}) { return errors.New("pending-upgrade-module-root config value illegal") } } diff --git a/staker/builder_backend.go b/staker/txbuilder/builder.go similarity index 64% rename from staker/builder_backend.go rename to staker/txbuilder/builder.go index 1bf15ff027..9a5e9df2b5 100644 --- a/staker/builder_backend.go +++ b/staker/txbuilder/builder.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package staker +package txbuilder import ( "context" @@ -15,12 +15,21 @@ import ( "github.com/offchainlabs/nitro/arbutil" ) -// ValidatorTxBuilder combines any transactions sent to it via SendTransaction into one batch, +type ValidatorWalletInterface interface { + // Address must be able to be called concurrently with other functions + Address() *common.Address + L1Client() arbutil.L1Interface + TestTransactions(context.Context, []*types.Transaction) error + ExecuteTransactions(context.Context, *Builder, common.Address) (*types.Transaction, error) + AuthIfEoa() *bind.TransactOpts +} + +// Builder combines any transactions sent to it via SendTransaction into one batch, // which is then sent to the validator wallet. // This lets the validator make multiple atomic transactions. // This inherits from an eth client so it can be used as an L1Interface, // where it transparently intercepts calls to SendTransaction and queues them for the next batch. -type ValidatorTxBuilder struct { +type Builder struct { arbutil.L1Interface transactions []*types.Transaction builderAuth *bind.TransactOpts @@ -28,7 +37,7 @@ type ValidatorTxBuilder struct { wallet ValidatorWalletInterface } -func NewValidatorTxBuilder(wallet ValidatorWalletInterface) (*ValidatorTxBuilder, error) { +func NewBuilder(wallet ValidatorWalletInterface) (*Builder, error) { randKey, err := crypto.GenerateKey() if err != nil { return nil, err @@ -43,7 +52,7 @@ func NewValidatorTxBuilder(wallet ValidatorWalletInterface) (*ValidatorTxBuilder } isAuthFake = true } - return &ValidatorTxBuilder{ + return &Builder{ builderAuth: builderAuth, wallet: wallet, L1Interface: wallet.L1Client(), @@ -51,22 +60,22 @@ func NewValidatorTxBuilder(wallet ValidatorWalletInterface) (*ValidatorTxBuilder }, nil } -func (b *ValidatorTxBuilder) BuildingTransactionCount() int { +func (b *Builder) BuildingTransactionCount() int { return len(b.transactions) } -func (b *ValidatorTxBuilder) ClearTransactions() { +func (b *Builder) ClearTransactions() { b.transactions = nil } -func (b *ValidatorTxBuilder) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { +func (b *Builder) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { if len(b.transactions) == 0 && !b.isAuthFake { return b.L1Interface.EstimateGas(ctx, call) } return 0, nil } -func (b *ValidatorTxBuilder) SendTransaction(ctx context.Context, tx *types.Transaction) error { +func (b *Builder) SendTransaction(ctx context.Context, tx *types.Transaction) error { b.transactions = append(b.transactions, tx) err := b.wallet.TestTransactions(ctx, b.transactions) if err != nil { @@ -80,7 +89,7 @@ func (b *ValidatorTxBuilder) SendTransaction(ctx context.Context, tx *types.Tran // While this is not currently required, it's recommended not to reuse the returned auth for multiple transactions, // as for an EOA this has the nonce in it. However, the EOA wwallet currently will only publish the first created tx, // which is why that doesn't really matter. -func (b *ValidatorTxBuilder) AuthWithAmount(ctx context.Context, amount *big.Int) (*bind.TransactOpts, error) { +func (b *Builder) AuthWithAmount(ctx context.Context, amount *big.Int) (*bind.TransactOpts, error) { nonce, err := b.NonceAt(ctx, b.builderAuth.From, nil) if err != nil { return nil, err @@ -98,6 +107,20 @@ func (b *ValidatorTxBuilder) AuthWithAmount(ctx context.Context, amount *big.Int // Auth is the same as AuthWithAmount with a 0 amount specified. // See AuthWithAmount docs for important details. -func (b *ValidatorTxBuilder) Auth(ctx context.Context) (*bind.TransactOpts, error) { +func (b *Builder) Auth(ctx context.Context) (*bind.TransactOpts, error) { return b.AuthWithAmount(ctx, common.Big0) } + +func (b *Builder) Transactions() []*types.Transaction { + return b.transactions +} + +// Auth is the same as AuthWithAmount with a 0 amount specified. +// See AuthWithAmount docs for important details. +func (b *Builder) BuilderAuth() *bind.TransactOpts { + return b.builderAuth +} + +func (b *Builder) WalletAddress() *common.Address { + return b.wallet.Address() +} diff --git a/staker/validator_wallet.go b/staker/validatorwallet/contract.go similarity index 57% rename from staker/validator_wallet.go rename to staker/validatorwallet/contract.go index 845f3b2866..52cb1678db 100644 --- a/staker/validator_wallet.go +++ b/staker/validatorwallet/contract.go @@ -1,13 +1,16 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package staker +package validatorwallet import ( "context" "errors" + "fmt" "math/big" "strings" + "sync/atomic" + "time" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -15,8 +18,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/staker/txbuilder" + "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/containers" + "github.com/offchainlabs/nitro/util/headerreader" ) var validatorABI abi.ABI @@ -36,37 +45,23 @@ func init() { walletCreatedID = parsedValidatorWalletCreator.Events["WalletCreated"].ID } -type ValidatorWalletInterface interface { - Initialize(context.Context) error - Address() *common.Address - AddressOrZero() common.Address - TxSenderAddress() *common.Address - RollupAddress() common.Address - ChallengeManagerAddress() common.Address - L1Client() arbutil.L1Interface - TestTransactions(context.Context, []*types.Transaction) error - ExecuteTransactions(context.Context, *ValidatorTxBuilder, common.Address) (*types.Transaction, error) - TimeoutChallenges(context.Context, []uint64) (*types.Transaction, error) - CanBatchTxs() bool - AuthIfEoa() *bind.TransactOpts -} - -type ContractValidatorWallet struct { +type Contract struct { con *rollupgen.ValidatorWallet - address *common.Address + address atomic.Pointer[common.Address] onWalletCreated func(common.Address) - l1Reader L1ReaderInterface + l1Reader *headerreader.HeaderReader auth *bind.TransactOpts walletFactoryAddr common.Address rollupFromBlock int64 rollup *rollupgen.RollupUserLogic rollupAddress common.Address challengeManagerAddress common.Address + dataPoster *dataposter.DataPoster + getExtraGas func() uint64 } -var _ ValidatorWalletInterface = (*ContractValidatorWallet)(nil) - -func NewContractValidatorWallet(address *common.Address, walletFactoryAddr, rollupAddress common.Address, l1Reader L1ReaderInterface, auth *bind.TransactOpts, rollupFromBlock int64, onWalletCreated func(common.Address)) (*ContractValidatorWallet, error) { +func NewContract(dp *dataposter.DataPoster, address *common.Address, walletFactoryAddr, rollupAddress common.Address, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, rollupFromBlock int64, onWalletCreated func(common.Address), + getExtraGas func() uint64) (*Contract, error) { var con *rollupgen.ValidatorWallet if address != nil { var err error @@ -79,9 +74,8 @@ func NewContractValidatorWallet(address *common.Address, walletFactoryAddr, roll if err != nil { return nil, err } - return &ContractValidatorWallet{ + wallet := &Contract{ con: con, - address: address, onWalletCreated: onWalletCreated, l1Reader: l1Reader, auth: auth, @@ -89,10 +83,15 @@ func NewContractValidatorWallet(address *common.Address, walletFactoryAddr, roll rollupAddress: rollupAddress, rollup: rollup, rollupFromBlock: rollupFromBlock, - }, nil + dataPoster: dp, + getExtraGas: getExtraGas, + } + // Go complains if we make an address variable before wallet and copy it in + wallet.address.Store(address) + return wallet, nil } -func (v *ContractValidatorWallet) validateWallet(ctx context.Context) error { +func (v *Contract) validateWallet(ctx context.Context) error { if v.con == nil || v.auth == nil { return nil } @@ -111,7 +110,7 @@ func (v *ContractValidatorWallet) validateWallet(ctx context.Context) error { return nil } -func (v *ContractValidatorWallet) Initialize(ctx context.Context) error { +func (v *Contract) Initialize(ctx context.Context) error { err := v.populateWallet(ctx, false) if err != nil { return err @@ -126,26 +125,27 @@ func (v *ContractValidatorWallet) Initialize(ctx context.Context) error { } // May be the nil if the wallet hasn't been deployed yet -func (v *ContractValidatorWallet) Address() *common.Address { - return v.address +func (v *Contract) Address() *common.Address { + return v.address.Load() } // May be zero if the wallet hasn't been deployed yet -func (v *ContractValidatorWallet) AddressOrZero() common.Address { - if v.address == nil { +func (v *Contract) AddressOrZero() common.Address { + addr := v.address.Load() + if addr == nil { return common.Address{} } - return *v.address + return *addr } -func (v *ContractValidatorWallet) TxSenderAddress() *common.Address { +func (v *Contract) TxSenderAddress() *common.Address { if v.auth == nil { return nil } return &v.auth.From } -func (v *ContractValidatorWallet) From() common.Address { +func (v *Contract) From() common.Address { if v.auth == nil { return common.Address{} } @@ -153,7 +153,7 @@ func (v *ContractValidatorWallet) From() common.Address { } // nil value == 0 value -func (v *ContractValidatorWallet) getAuth(ctx context.Context, value *big.Int) (*bind.TransactOpts, error) { +func (v *Contract) getAuth(ctx context.Context, value *big.Int) (*bind.TransactOpts, error) { newAuth := *v.auth newAuth.Context = ctx newAuth.Value = value @@ -165,15 +165,23 @@ func (v *ContractValidatorWallet) getAuth(ctx context.Context, value *big.Int) ( return &newAuth, nil } -func (v *ContractValidatorWallet) executeTransaction(ctx context.Context, tx *types.Transaction, gasRefunder common.Address) (*types.Transaction, error) { +func (v *Contract) executeTransaction(ctx context.Context, tx *types.Transaction, gasRefunder common.Address) (*types.Transaction, error) { auth, err := v.getAuth(ctx, tx.Value()) if err != nil { return nil, err } - return v.con.ExecuteTransactionWithGasRefunder(auth, gasRefunder, tx.Data(), *tx.To(), tx.Value()) + data, err := validatorABI.Pack("executeTransactionWithGasRefunder", gasRefunder, tx.Data(), *tx.To(), tx.Value()) + if err != nil { + return nil, fmt.Errorf("packing arguments for executeTransactionWithGasRefunder: %w", err) + } + gas, err := v.gasForTxData(ctx, auth, data) + if err != nil { + return nil, fmt.Errorf("getting gas for tx data: %w", err) + } + return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) } -func (v *ContractValidatorWallet) populateWallet(ctx context.Context, createIfMissing bool) error { +func (v *Contract) populateWallet(ctx context.Context, createIfMissing bool) error { if v.con != nil { return nil } @@ -183,7 +191,7 @@ func (v *ContractValidatorWallet) populateWallet(ctx context.Context, createIfMi } return nil } - if v.address == nil { + if v.address.Load() == nil { auth, err := v.getAuth(ctx, nil) if err != nil { return err @@ -195,12 +203,12 @@ func (v *ContractValidatorWallet) populateWallet(ctx context.Context, createIfMi if addr == nil { return nil } - v.address = addr + v.address.Store(addr) if v.onWalletCreated != nil { v.onWalletCreated(*addr) } } - con, err := rollupgen.NewValidatorWallet(*v.address, v.l1Reader.Client()) + con, err := rollupgen.NewValidatorWallet(*v.Address(), v.l1Reader.Client()) if err != nil { return err } @@ -228,8 +236,8 @@ func combineTxes(txes []*types.Transaction) ([][]byte, []common.Address, []*big. } // Not thread safe! Don't call this from multiple threads at the same time. -func (v *ContractValidatorWallet) ExecuteTransactions(ctx context.Context, builder *ValidatorTxBuilder, gasRefunder common.Address) (*types.Transaction, error) { - txes := builder.transactions +func (v *Contract) ExecuteTransactions(ctx context.Context, builder *txbuilder.Builder, gasRefunder common.Address) (*types.Transaction, error) { + txes := builder.Transactions() if len(txes) == 0 { return nil, nil } @@ -244,7 +252,7 @@ func (v *ContractValidatorWallet) ExecuteTransactions(ctx context.Context, build if err != nil { return nil, err } - builder.transactions = nil + builder.ClearTransactions() return arbTx, nil } @@ -260,7 +268,7 @@ func (v *ContractValidatorWallet) ExecuteTransactions(ctx context.Context, build totalAmount = totalAmount.Add(totalAmount, tx.Value()) } - balanceInContract, err := v.l1Reader.Client().BalanceAt(ctx, *v.address, nil) + balanceInContract, err := v.l1Reader.Client().BalanceAt(ctx, *v.Address(), nil) if err != nil { return nil, err } @@ -273,35 +281,88 @@ func (v *ContractValidatorWallet) ExecuteTransactions(ctx context.Context, build if err != nil { return nil, err } - arbTx, err := v.con.ExecuteTransactionsWithGasRefunder(auth, gasRefunder, data, dest, amount) + txData, err := validatorABI.Pack("executeTransactionsWithGasRefunder", gasRefunder, data, dest, amount) + if err != nil { + return nil, fmt.Errorf("packing arguments for executeTransactionWithGasRefunder: %w", err) + } + gas, err := v.gasForTxData(ctx, auth, txData) + if err != nil { + return nil, fmt.Errorf("getting gas for tx data: %w", err) + } + arbTx, err := v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), txData, gas, auth.Value, nil) if err != nil { return nil, err } - builder.transactions = nil + builder.ClearTransactions() return arbTx, nil } -func (v *ContractValidatorWallet) TimeoutChallenges(ctx context.Context, challenges []uint64) (*types.Transaction, error) { +func (v *Contract) estimateGas(ctx context.Context, value *big.Int, data []byte) (uint64, error) { + h, err := v.l1Reader.LastHeader(ctx) + if err != nil { + return 0, fmt.Errorf("getting the last header: %w", err) + } + gasFeeCap := new(big.Int).Mul(h.BaseFee, big.NewInt(2)) + gasFeeCap = arbmath.BigMax(gasFeeCap, arbmath.FloatToBig(params.GWei)) + + gasTipCap, err := v.l1Reader.Client().SuggestGasTipCap(ctx) + if err != nil { + return 0, fmt.Errorf("getting suggested gas tip cap: %w", err) + } + g, err := v.l1Reader.Client().EstimateGas( + ctx, + ethereum.CallMsg{ + From: v.auth.From, + To: v.Address(), + Value: value, + Data: data, + GasFeeCap: gasFeeCap, + GasTipCap: gasTipCap, + }, + ) + if err != nil { + return 0, fmt.Errorf("estimating gas: %w", err) + } + return g + v.getExtraGas(), nil +} + +func (v *Contract) TimeoutChallenges(ctx context.Context, challenges []uint64) (*types.Transaction, error) { auth, err := v.getAuth(ctx, nil) if err != nil { return nil, err } - return v.con.TimeoutChallenges(auth, v.challengeManagerAddress, challenges) + data, err := validatorABI.Pack("timeoutChallenges", v.challengeManagerAddress, challenges) + if err != nil { + return nil, fmt.Errorf("packing arguments for timeoutChallenges: %w", err) + } + gas, err := v.gasForTxData(ctx, auth, data) + if err != nil { + return nil, fmt.Errorf("getting gas for tx data: %w", err) + } + return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) +} + +// gasForTxData returns auth.GasLimit if it's nonzero, otherwise returns estimate. +func (v *Contract) gasForTxData(ctx context.Context, auth *bind.TransactOpts, data []byte) (uint64, error) { + if auth.GasLimit != 0 { + return auth.GasLimit, nil + } + return v.estimateGas(ctx, auth.Value, data) } -func (v *ContractValidatorWallet) L1Client() arbutil.L1Interface { +func (v *Contract) L1Client() arbutil.L1Interface { return v.l1Reader.Client() } -func (v *ContractValidatorWallet) RollupAddress() common.Address { +func (v *Contract) RollupAddress() common.Address { return v.rollupAddress } -func (v *ContractValidatorWallet) ChallengeManagerAddress() common.Address { +func (v *Contract) ChallengeManagerAddress() common.Address { return v.challengeManagerAddress } -func (v *ContractValidatorWallet) TestTransactions(ctx context.Context, txs []*types.Transaction) error { +func (v *Contract) TestTransactions(ctx context.Context, txs []*types.Transaction) error { if v.Address() == nil { return nil } @@ -320,14 +381,33 @@ func (v *ContractValidatorWallet) TestTransactions(ctx context.Context, txs []*t return err } -func (v *ContractValidatorWallet) CanBatchTxs() bool { +func (v *Contract) CanBatchTxs() bool { return true } -func (v *ContractValidatorWallet) AuthIfEoa() *bind.TransactOpts { +func (v *Contract) AuthIfEoa() *bind.TransactOpts { return nil } +func (w *Contract) Start(ctx context.Context) { + w.dataPoster.Start(ctx) +} + +func (b *Contract) StopAndWait() { + b.dataPoster.StopAndWait() +} + +func (b *Contract) DataPoster() *dataposter.DataPoster { + return b.dataPoster +} + +type L1ReaderInterface interface { + Client() arbutil.L1Interface + Subscribe(bool) (<-chan *types.Header, func()) + WaitForTxApproval(tx *types.Transaction) containers.PromiseInterface[*types.Receipt] + UseFinalityData() bool +} + func GetValidatorWalletContract( ctx context.Context, validatorWalletFactoryAddr common.Address, diff --git a/staker/validatorwallet/eoa.go b/staker/validatorwallet/eoa.go new file mode 100644 index 0000000000..d86181f42f --- /dev/null +++ b/staker/validatorwallet/eoa.go @@ -0,0 +1,137 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package validatorwallet + +import ( + "context" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/offchainlabs/nitro/arbnode/dataposter" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/solgen/go/challengegen" + "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/staker/txbuilder" +) + +type EOA struct { + auth *bind.TransactOpts + client arbutil.L1Interface + rollupAddress common.Address + challengeManager *challengegen.ChallengeManager + challengeManagerAddress common.Address + dataPoster *dataposter.DataPoster + getExtraGas func() uint64 +} + +func NewEOA(dataPoster *dataposter.DataPoster, rollupAddress common.Address, l1Client arbutil.L1Interface, auth *bind.TransactOpts, getExtraGas func() uint64) (*EOA, error) { + return &EOA{ + auth: auth, + client: l1Client, + rollupAddress: rollupAddress, + dataPoster: dataPoster, + getExtraGas: getExtraGas, + }, nil +} + +func (w *EOA) Initialize(ctx context.Context) error { + rollup, err := rollupgen.NewRollupUserLogic(w.rollupAddress, w.client) + if err != nil { + return err + } + callOpts := &bind.CallOpts{Context: ctx} + w.challengeManagerAddress, err = rollup.ChallengeManager(callOpts) + if err != nil { + return err + } + w.challengeManager, err = challengegen.NewChallengeManager(w.challengeManagerAddress, w.client) + return err +} + +func (w *EOA) Address() *common.Address { + return &w.auth.From +} + +func (w *EOA) AddressOrZero() common.Address { + return w.auth.From +} + +func (w *EOA) TxSenderAddress() *common.Address { + return &w.auth.From +} + +func (w *EOA) L1Client() arbutil.L1Interface { + return w.client +} + +func (w *EOA) RollupAddress() common.Address { + return w.rollupAddress +} + +func (w *EOA) ChallengeManagerAddress() common.Address { + return w.challengeManagerAddress +} + +func (w *EOA) TestTransactions(context.Context, []*types.Transaction) error { + // We only use the first tx which is checked implicitly by gas estimation + return nil +} + +func (w *EOA) ExecuteTransactions(ctx context.Context, builder *txbuilder.Builder, _ common.Address) (*types.Transaction, error) { + if len(builder.Transactions()) == 0 { + return nil, nil + } + tx := builder.Transactions()[0] // we ignore future txs and only execute the first + return w.postTransaction(ctx, tx) +} + +func (w *EOA) postTransaction(ctx context.Context, baseTx *types.Transaction) (*types.Transaction, error) { + nonce, err := w.L1Client().NonceAt(ctx, w.auth.From, nil) + if err != nil { + return nil, err + } + gas := baseTx.Gas() + w.getExtraGas() + newTx, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *baseTx.To(), baseTx.Data(), gas, baseTx.Value(), nil) + if err != nil { + return nil, fmt.Errorf("post transaction: %w", err) + } + return newTx, nil +} + +func (w *EOA) TimeoutChallenges(ctx context.Context, timeouts []uint64) (*types.Transaction, error) { + if len(timeouts) == 0 { + return nil, nil + } + auth := *w.auth + auth.Context = ctx + auth.NoSend = true + tx, err := w.challengeManager.Timeout(&auth, timeouts[0]) + if err != nil { + return nil, err + } + return w.postTransaction(ctx, tx) +} + +func (w *EOA) CanBatchTxs() bool { + return false +} + +func (w *EOA) AuthIfEoa() *bind.TransactOpts { + return w.auth +} + +func (w *EOA) Start(ctx context.Context) { + w.dataPoster.Start(ctx) +} + +func (b *EOA) StopAndWait() { + b.dataPoster.StopAndWait() +} + +func (b *EOA) DataPoster() *dataposter.DataPoster { + return b.dataPoster +} diff --git a/staker/validatorwallet/noop.go b/staker/validatorwallet/noop.go new file mode 100644 index 0000000000..b050ebe861 --- /dev/null +++ b/staker/validatorwallet/noop.go @@ -0,0 +1,67 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package validatorwallet + +import ( + "context" + "errors" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/offchainlabs/nitro/arbnode/dataposter" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/staker/txbuilder" +) + +// NoOp validator wallet is used for watchtower mode. +type NoOp struct { + l1Client arbutil.L1Interface + rollupAddress common.Address +} + +func NewNoOp(l1Client arbutil.L1Interface, rollupAddress common.Address) *NoOp { + return &NoOp{ + l1Client: l1Client, + rollupAddress: rollupAddress, + } +} + +func (*NoOp) Initialize(context.Context) error { return nil } + +func (*NoOp) Address() *common.Address { return nil } + +func (*NoOp) AddressOrZero() common.Address { return common.Address{} } + +func (*NoOp) TxSenderAddress() *common.Address { return nil } + +func (*NoOp) From() common.Address { return common.Address{} } + +func (*NoOp) ExecuteTransactions(context.Context, *txbuilder.Builder, common.Address) (*types.Transaction, error) { + return nil, errors.New("no op validator wallet cannot execute transactions") +} + +func (*NoOp) TimeoutChallenges(ctx context.Context, challenges []uint64) (*types.Transaction, error) { + return nil, errors.New("no op validator wallet cannot timeout challenges") +} + +func (n *NoOp) L1Client() arbutil.L1Interface { return n.l1Client } + +func (n *NoOp) RollupAddress() common.Address { return n.rollupAddress } + +func (*NoOp) ChallengeManagerAddress() common.Address { return common.Address{} } + +func (*NoOp) TestTransactions(ctx context.Context, txs []*types.Transaction) error { + return nil +} + +func (*NoOp) CanBatchTxs() bool { return false } + +func (*NoOp) AuthIfEoa() *bind.TransactOpts { return nil } + +func (w *NoOp) Start(ctx context.Context) {} + +func (b *NoOp) StopAndWait() {} + +func (b *NoOp) DataPoster() *dataposter.DataPoster { return nil } diff --git a/system_tests/aliasing_test.go b/system_tests/aliasing_test.go index 5e4e65a2ca..60a89468a5 100644 --- a/system_tests/aliasing_test.go +++ b/system_tests/aliasing_test.go @@ -22,20 +22,20 @@ func TestAliasing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - auth := l2info.GetDefaultTransactOpts("Owner", ctx) - user := l1info.GetDefaultTransactOpts("User", ctx) - TransferBalanceTo(t, "Owner", util.RemapL1Address(user.From), big.NewInt(1e18), l2info, l2client, ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + user := builder.L1Info.GetDefaultTransactOpts("User", ctx) + builder.L2.TransferBalanceTo(t, "Owner", util.RemapL1Address(user.From), big.NewInt(1e18), builder.L2Info) - simpleAddr, simple := deploySimple(t, ctx, auth, l2client) + simpleAddr, simple := builder.L2.DeploySimple(t, auth) simpleContract, err := abi.JSON(strings.NewReader(mocksgen.SimpleABI)) Require(t, err) // Test direct calls - arbsys, err := precompilesgen.NewArbSys(types.ArbSysAddress, l2client) + arbsys, err := precompilesgen.NewArbSys(types.ArbSysAddress, builder.L2.Client) Require(t, err) top, err := arbsys.IsTopLevelCall(nil) Require(t, err) @@ -56,14 +56,14 @@ func TestAliasing(t *testing.T) { // check via L2 tx, err := simple.CheckCalls(&auth, top, direct, static, delegate, callcode, call) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) // check signed txes via L1 data, err := simpleContract.Pack("checkCalls", top, direct, static, delegate, callcode, call) Require(t, err) - tx = l2info.PrepareTxTo("Owner", &simpleAddr, 500000, big.NewInt(0), data) - SendSignedTxViaL1(t, ctx, l1info, l1client, l2client, tx) + tx = builder.L2Info.PrepareTxTo("Owner", &simpleAddr, 500000, big.NewInt(0), data) + builder.L1.SendSignedTx(t, builder.L2.Client, tx, builder.L1Info) } testUnsigned := func(top, direct, static, delegate, callcode, call bool) { @@ -72,8 +72,8 @@ func TestAliasing(t *testing.T) { // check unsigned txes via L1 data, err := simpleContract.Pack("checkCalls", top, direct, static, delegate, callcode, call) Require(t, err) - tx := l2info.PrepareTxTo("Owner", &simpleAddr, 500000, big.NewInt(0), data) - SendUnsignedTxViaL1(t, ctx, l1info, l1client, l2client, tx) + tx := builder.L2Info.PrepareTxTo("Owner", &simpleAddr, 500000, big.NewInt(0), data) + builder.L1.SendUnsignedTx(t, builder.L2.Client, tx, builder.L1Info) } testL2Signed(true, true, false, false, false, false) diff --git a/system_tests/arbtrace_test.go b/system_tests/arbtrace_test.go index 28b382476a..36e4cc9402 100644 --- a/system_tests/arbtrace_test.go +++ b/system_tests/arbtrace_test.go @@ -4,15 +4,12 @@ import ( "context" "encoding/json" "errors" - "path/filepath" "testing" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/execution/gethexec" - "github.com/offchainlabs/nitro/util/testhelpers" ) type callTxArgs struct { @@ -132,7 +129,7 @@ func (s *ArbTraceAPIStub) Filter(ctx context.Context, filter *filterRequest) ([] func TestArbTraceForwarding(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ipcPath := filepath.Join(t.TempDir(), "redirect.ipc") + ipcPath := tmpPath(t, "redirect.ipc") var apis []rpc.API apis = append(apis, rpc.API{ Namespace: "arbtrace", @@ -141,18 +138,17 @@ func TestArbTraceForwarding(t *testing.T) { Public: false, }) listener, srv, err := rpc.StartIPCEndpoint(ipcPath, apis) - testhelpers.RequireImpl(t, err) + Require(t, err) defer srv.Stop() defer listener.Close() - execConfig := gethexec.ConfigDefaultTest() - execConfig.RPC.ClassicRedirect = ipcPath - execConfig.RPC.ClassicRedirectTimeout = time.Second - _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, execConfig, nil, nil, nil) - defer requireClose(t, l1stack) - defer requireClose(t, l2stack) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.RPC.ClassicRedirect = ipcPath + builder.execConfig.RPC.ClassicRedirectTimeout = time.Second + cleanup := builder.Build(t) + defer cleanup() - l2rpc, _ := l2stack.Attach() + l2rpc, _ := builder.L2.Stack.Attach() txArgs := callTxArgs{} traceTypes := []string{"trace"} blockNum := rpc.BlockNumberOrHash{} @@ -163,22 +159,22 @@ func TestArbTraceForwarding(t *testing.T) { filter := filterRequest{} var result traceResult err = l2rpc.CallContext(ctx, &result, "arbtrace_call", txArgs, traceTypes, blockNum) - testhelpers.RequireImpl(t, err) + Require(t, err) var results []*traceResult err = l2rpc.CallContext(ctx, &results, "arbtrace_callMany", traceRequests, blockNum) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &results, "arbtrace_replayBlockTransactions", blockNum, traceTypes) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &result, "arbtrace_replayTransaction", txHash, traceTypes) - testhelpers.RequireImpl(t, err) + Require(t, err) var frames []traceFrame err = l2rpc.CallContext(ctx, &frames, "arbtrace_transaction", txHash) - testhelpers.RequireImpl(t, err) + Require(t, err) var frame traceFrame err = l2rpc.CallContext(ctx, &frame, "arbtrace_get", txHash, path) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &frames, "arbtrace_block", blockNum) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &frames, "arbtrace_filter", filter) - testhelpers.RequireImpl(t, err) + Require(t, err) } diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 18efd47fa1..8561e3ffc7 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -16,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/redisutil" ) @@ -46,44 +45,55 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { parallelBatchPosters = 4 } - conf := arbnode.ConfigDefaultL1Test() - conf.BatchPoster.Enable = false - conf.BatchPoster.RedisUrl = redisUrl - l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.Enable = false + builder.nodeConfig.BatchPoster.RedisUrl = redisUrl + cleanup := builder.Build(t) + defer cleanup() - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil) - defer nodeB.StopAndWait() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") var txs []*types.Transaction for i := 0; i < 100; i++ { - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) txs = append(txs, tx) - err := l2clientA.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) } for _, tx := range txs { - _, err := EnsureTxSucceeded(ctx, l2clientA, tx) + _, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) } firstTxData, err := txs[0].MarshalBinary() Require(t, err) - seqTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) - conf.BatchPoster.Enable = true - conf.BatchPoster.MaxBatchSize = len(firstTxData) * 2 - startL1Block, err := l1client.BlockNumber(ctx) + seqTxOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) + builder.nodeConfig.BatchPoster.Enable = true + builder.nodeConfig.BatchPoster.MaxSize = len(firstTxData) * 2 + startL1Block, err := builder.L1.Client.BlockNumber(ctx) Require(t, err) for i := 0; i < parallelBatchPosters; i++ { // Make a copy of the batch poster config so NewBatchPoster calling Validate() on it doesn't race - batchPosterConfig := conf.BatchPoster - batchPoster, err := arbnode.NewBatchPoster(nodeA.L1Reader, nodeA.InboxTracker, nodeA.TxStreamer, nodeA.SyncMonitor, func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, nodeA.DeployInfo, &seqTxOpts, nil) + batchPosterConfig := builder.nodeConfig.BatchPoster + batchPoster, err := arbnode.NewBatchPoster(ctx, + &arbnode.BatchPosterOpts{ + DataPosterDB: nil, + L1Reader: builder.L2.ConsensusNode.L1Reader, + Inbox: builder.L2.ConsensusNode.InboxTracker, + Streamer: builder.L2.ConsensusNode.TxStreamer, + SyncMonitor: builder.L2.ConsensusNode.SyncMonitor, + Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, + DeployInfo: builder.L2.ConsensusNode.DeployInfo, + TransactOpts: &seqTxOpts, + DAWriter: nil, + }, + ) Require(t, err) batchPoster.Start(ctx) defer batchPoster.StopAndWait() @@ -91,11 +101,11 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { lastTxHash := txs[len(txs)-1].Hash() for i := 90; i > 0; i-- { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) time.Sleep(500 * time.Millisecond) - _, err := l2clientB.TransactionReceipt(ctx, lastTxHash) + _, err := testClientB.Client.TransactionReceipt(ctx, lastTxHash) if err == nil { break } @@ -104,13 +114,15 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } } + // TODO: factor this out in separate test case and skip it or delete this + // code entirely. // I've locally confirmed that this passes when the clique period is set to 1. // However, setting the clique period to 1 slows everything else (including the L1 deployment for this test) down to a crawl. if false { // Make sure the batch poster is able to post multiple batches in one block - endL1Block, err := l1client.BlockNumber(ctx) + endL1Block, err := builder.L1.Client.BlockNumber(ctx) Require(t, err) - seqInbox, err := arbnode.NewSequencerInbox(l1client, nodeA.DeployInfo.SequencerInbox, 0) + seqInbox, err := arbnode.NewSequencerInbox(builder.L1.Client, builder.L2.ConsensusNode.DeployInfo.SequencerInbox, 0) Require(t, err) batches, err := seqInbox.LookupBatchesInRange(ctx, new(big.Int).SetUint64(startL1Block), new(big.Int).SetUint64(endL1Block)) Require(t, err) @@ -130,7 +142,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } } - l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Sign() == 0 { @@ -143,26 +155,25 @@ func TestBatchPosterLargeTx(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := gethexec.ConfigDefaultTest() - conf.Sequencer.MaxTxDataSize = 110000 - l2info, nodeA, l2clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, conf, nil, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Sequencer.MaxTxDataSize = 110000 + cleanup := builder.Build(t) + defer cleanup() - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil) - defer nodeB.StopAndWait() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() data := make([]byte, 100000) _, err := rand.Read(data) Require(t, err) - faucetAddr := l2info.GetAddress("Faucet") - gas := l2info.TransferGas + 20000*uint64(len(data)) - tx := l2info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data) - err = l2clientA.SendTransaction(ctx, tx) + faucetAddr := builder.L2Info.GetAddress("Faucet") + gas := builder.L2Info.TransferGas + 20000*uint64(len(data)) + tx := builder.L2Info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data) + err = builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - receiptA, err := EnsureTxSucceeded(ctx, l2clientA, tx) + receiptA, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) - receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2clientB, tx, time.Second*30) + receiptB, err := testClientB.EnsureTxSucceededWithTimeout(tx, time.Second*30) Require(t, err) if receiptA.BlockHash != receiptB.BlockHash { Fatal(t, "receipt A block hash", receiptA.BlockHash, "does not equal receipt B block hash", receiptB.BlockHash) @@ -174,26 +185,25 @@ func TestBatchPosterKeepsUp(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := arbnode.ConfigDefaultL1Test() - conf.BatchPoster.CompressionLevel = brotli.BestCompression - conf.BatchPoster.MaxBatchPostDelay = time.Hour - execConf := gethexec.ConfigDefaultTest() - execConf.RPC.RPCTxFeeCap = 1000. - l2info, nodeA, l2clientA, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, execConf, nil, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() - l2info.GasPrice = big.NewInt(100e9) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.CompressionLevel = brotli.BestCompression + builder.nodeConfig.BatchPoster.MaxDelay = time.Hour + builder.execConfig.RPC.RPCTxFeeCap = 1000. + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GasPrice = big.NewInt(100e9) go func() { data := make([]byte, 90000) _, err := rand.Read(data) Require(t, err) for { - gas := l2info.TransferGas + 20000*uint64(len(data)) - tx := l2info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data) - err = l2clientA.SendTransaction(ctx, tx) + gas := builder.L2Info.TransferGas + 20000*uint64(len(data)) + tx := builder.L2Info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data) + err = builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err := EnsureTxSucceeded(ctx, l2clientA, tx) + _, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) } }() @@ -201,11 +211,11 @@ func TestBatchPosterKeepsUp(t *testing.T) { start := time.Now() for { time.Sleep(time.Second) - batches, err := nodeA.InboxTracker.GetBatchCount() + batches, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount() Require(t, err) - postedMessages, err := nodeA.InboxTracker.GetBatchMessageCount(batches - 1) + postedMessages, err := builder.L2.ConsensusNode.InboxTracker.GetBatchMessageCount(batches - 1) Require(t, err) - haveMessages, err := nodeA.TxStreamer.GetMessageCount() + haveMessages, err := builder.L2.ConsensusNode.TxStreamer.GetMessageCount() Require(t, err) duration := time.Since(start) fmt.Printf("batches posted: %v over %v (%.2f batches/second)\n", batches, duration, float64(batches)/(float64(duration)/float64(time.Second))) diff --git a/system_tests/block_hash_test.go b/system_tests/block_hash_test.go index 2b8051242e..b437f3dad9 100644 --- a/system_tests/block_hash_test.go +++ b/system_tests/block_hash_test.go @@ -16,13 +16,13 @@ func TestBlockHash(t *testing.T) { defer cancel() // Even though we don't use the L1, we need to create this node on L1 to get accurate L1 block numbers - l2info, l2node, l2client, _, _, _, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - auth := l2info.GetDefaultTransactOpts("Faucet", ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) - _, _, simple, err := mocksgen.DeploySimple(&auth, l2client) + _, _, simple, err := mocksgen.DeploySimple(&auth, builder.L2.Client) Require(t, err) _, err = simple.CheckBlockHashes(&bind.CallOpts{Context: ctx}) diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index 2f134f58a6..25081951c5 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -46,24 +46,27 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops var delayEvery int if workloadLoops > 1 { - l1NodeConfigA.BatchPoster.MaxBatchPostDelay = time.Millisecond * 500 + l1NodeConfigA.BatchPoster.MaxDelay = time.Millisecond * 500 delayEvery = workloadLoops / 3 } - l2info, nodeA, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, nil, chainConfig, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig = l1NodeConfigA + builder.chainConfig = chainConfig + builder.L2Info = nil + cleanup := builder.Build(t) + defer cleanup() - authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) + authorizeDASKeyset(t, ctx, dasSignerKey, builder.L1Info, builder.L1.Client) validatorConfig := arbnode.ConfigDefaultL1NonSequencerTest() validatorConfig.BlockValidator.Enable = true validatorConfig.DataAvailability = l1NodeConfigA.DataAvailability - validatorConfig.DataAvailability.AggregatorConfig.Enable = false + validatorConfig.DataAvailability.RPCAggregator.Enable = false AddDefaultValNode(t, ctx, validatorConfig, !arbitrator) - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, validatorConfig, nil, nil) - defer nodeB.StopAndWait() - l2info.GenerateAccount("User2") + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: validatorConfig}) + defer cleanupB() + builder.L2Info.GenerateAccount("User2") perTransfer := big.NewInt(1e12) @@ -72,7 +75,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops var tx *types.Transaction if workload == ethSend { - tx = l2info.PrepareTx("Owner", "User2", l2info.TransferGas, perTransfer, nil) + tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, perTransfer, nil) } else { var contractCode []byte var gas uint64 @@ -86,10 +89,10 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops contractCode = append(contractCode, byte(vm.CODECOPY)) contractCode = append(contractCode, byte(vm.PUSH0)) contractCode = append(contractCode, byte(vm.RETURN)) - basefee := GetBaseFee(t, l2client, ctx) + basefee := builder.L2.GetBaseFee(t) var err error - gas, err = l2client.EstimateGas(ctx, ethereum.CallMsg{ - From: l2info.GetAddress("Owner"), + gas, err = builder.L2.Client.EstimateGas(ctx, ethereum.CallMsg{ + From: builder.L2Info.GetAddress("Owner"), GasPrice: basefee, Value: big.NewInt(0), Data: contractCode, @@ -101,14 +104,14 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops contractCode = append(contractCode, 0x60, 0x00, 0x60, 0x00, 0x52) // PUSH1 0 MSTORE } contractCode = append(contractCode, 0x60, 0x00, 0x56) // JUMP - gas = l2info.TransferGas*2 + l2pricing.InitialPerBlockGasLimitV6 + gas = builder.L2Info.TransferGas*2 + l2pricing.InitialPerBlockGasLimitV6 } - tx = l2info.PrepareTxTo("Owner", nil, gas, common.Big0, contractCode) + tx = builder.L2Info.PrepareTxTo("Owner", nil, gas, common.Big0, contractCode) } - err := l2client.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceededWithTimeout(ctx, l2client, tx, time.Second*5) + _, err = builder.L2.EnsureTxSucceeded(tx) if workload != depleteGas { Require(t, err) } @@ -117,49 +120,49 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops } } } else { - auth := l2info.GetDefaultTransactOpts("Owner", ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) // make auth a chain owner - arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), l2client) + arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err) tx, err := arbDebug.BecomeChainOwner(&auth) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), l2client) + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) Require(t, err) tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 11, 0) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - tx = l2info.PrepareTxTo("Owner", nil, l2info.TransferGas, perTransfer, []byte{byte(vm.PUSH0)}) - err = l2client.SendTransaction(ctx, tx) + tx = builder.L2Info.PrepareTxTo("Owner", nil, builder.L2Info.TransferGas, perTransfer, []byte{byte(vm.PUSH0)}) + err = builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceededWithTimeout(ctx, l2client, tx, time.Second*5) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } if workload != depleteGas { - delayedTx := l2info.PrepareTx("Owner", "User2", 30002, perTransfer, nil) - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - WrapL2ForDelayed(t, delayedTx, l1info, "User", 100000), + delayedTx := builder.L2Info.PrepareTx("Owner", "User2", 30002, perTransfer, nil) + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + WrapL2ForDelayed(t, delayedTx, builder.L1Info, "User", 100000), }) // give the inbox reader a bit of time to pick up the delayed message time.Sleep(time.Millisecond * 500) // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } - _, err := WaitForTx(ctx, l2clientB, delayedTx.Hash(), time.Second*5) + _, err := WaitForTx(ctx, testClientB.Client, delayedTx.Hash(), time.Second*5) Require(t, err) } if workload == ethSend { - l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) expectedBalance := new(big.Int).Mul(perTransfer, big.NewInt(int64(workloadLoops+1))) @@ -168,7 +171,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops } } - lastBlock, err := l2clientB.BlockByNumber(ctx, nil) + lastBlock, err := testClientB.Client.BlockByNumber(ctx, nil) Require(t, err) for { usefulBlock := false @@ -181,19 +184,19 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops if usefulBlock { break } - lastBlock, err = l2clientB.BlockByHash(ctx, lastBlock.ParentHash()) + lastBlock, err = testClientB.Client.BlockByHash(ctx, lastBlock.ParentHash()) Require(t, err) } t.Log("waiting for block: ", lastBlock.NumberU64()) timeout := getDeadlineTimeout(t, time.Minute*10) // messageindex is same as block number here - if !nodeB.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(lastBlock.NumberU64()), timeout) { + if !testClientB.ConsensusNode.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(lastBlock.NumberU64()), timeout) { Fatal(t, "did not validate all blocks") } - gethExec := getExecNode(t, nodeB) + gethExec := testClientB.ExecNode gethExec.Recorder.TrimAllPrepared(t) finalRefCount := gethExec.Recorder.RecordingDBReferenceCount() - lastBlockNow, err := l2clientB.BlockByNumber(ctx, nil) + lastBlockNow, err := testClientB.Client.BlockByNumber(ctx, nil) Require(t, err) // up to 3 extra references: awaiting validation, recently valid, lastValidatedHeader largestRefCount := lastBlockNow.NumberU64() - lastBlock.NumberU64() + 3 diff --git a/system_tests/bloom_test.go b/system_tests/bloom_test.go index 14c42f6a2f..a3cab748e2 100644 --- a/system_tests/bloom_test.go +++ b/system_tests/bloom_test.go @@ -17,7 +17,6 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/mocksgen" ) @@ -25,17 +24,19 @@ func TestBloom(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - execconfig := gethexec.ConfigDefaultTest() - execconfig.RPC.BloomBitsBlocks = 256 - execconfig.RPC.BloomConfirms = 1 - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, execconfig, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.execConfig.RPC.BloomBitsBlocks = 256 + builder.execConfig.RPC.BloomConfirms = 1 + builder.takeOwnership = false + cleanup := builder.Build(t) - l2info.GenerateAccount("User2") + defer cleanup() - ownerTxOpts := l2info.GetDefaultTransactOpts("Owner", ctx) + builder.L2Info.GenerateAccount("User2") + + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) ownerTxOpts.Context = ctx - _, simple := deploySimple(t, ctx, ownerTxOpts, client) + _, simple := builder.L2.DeploySimple(t, ownerTxOpts) simpleABI, err := mocksgen.SimpleMetaData.GetAbi() Require(t, err) @@ -63,7 +64,7 @@ func TestBloom(t *testing.T) { if sendNullEvent { tx, err = simple.EmitNullEvent(&ownerTxOpts) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } @@ -74,15 +75,14 @@ func TestBloom(t *testing.T) { tx, err = simple.Increment(&ownerTxOpts) } Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) if i%100 == 0 { t.Log("counts: ", i, "/", countsNum) } } - execNode := getExecNode(t, node) for { - sectionSize, sectionNum := execNode.Backend.APIBackend().BloomStatus() + sectionSize, sectionNum := builder.L2.ExecNode.Backend.APIBackend().BloomStatus() if sectionSize != 256 { Fatal(t, "unexpected section size: ", sectionSize) } @@ -92,14 +92,14 @@ func TestBloom(t *testing.T) { } <-time.After(time.Second) } - lastHeader, err := client.HeaderByNumber(ctx, nil) + lastHeader, err := builder.L2.Client.HeaderByNumber(ctx, nil) Require(t, err) nullEventQuery := ethereum.FilterQuery{ FromBlock: big.NewInt(0), ToBlock: lastHeader.Number, Topics: [][]common.Hash{{simpleABI.Events["NullEvent"].ID}}, } - logs, err := client.FilterLogs(ctx, nullEventQuery) + logs, err := builder.L2.Client.FilterLogs(ctx, nullEventQuery) Require(t, err) if len(logs) != len(nullEventCounts) { Fatal(t, "expected ", len(nullEventCounts), " logs, got ", len(logs)) @@ -107,7 +107,7 @@ func TestBloom(t *testing.T) { incrementEventQuery := ethereum.FilterQuery{ Topics: [][]common.Hash{{simpleABI.Events["CounterEvent"].ID}}, } - logs, err = client.FilterLogs(ctx, incrementEventQuery) + logs, err = builder.L2.Client.FilterLogs(ctx, incrementEventQuery) Require(t, err) if len(logs) != len(eventCounts) { Fatal(t, "expected ", len(eventCounts), " logs, got ", len(logs)) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 49fdcc64ce..df39e1260d 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -8,9 +8,11 @@ import ( "context" "encoding/hex" "encoding/json" - "fmt" "math/big" "net" + "os" + "strconv" + "strings" "sync" "testing" "time" @@ -34,6 +36,7 @@ import ( "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" @@ -44,6 +47,7 @@ import ( "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" @@ -55,6 +59,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/testhelpers" ) @@ -62,6 +67,180 @@ import ( type info = *BlockchainTestInfo type client = arbutil.L1Interface +type SecondNodeParams struct { + nodeConfig *arbnode.Config + execConfig *gethexec.Config + stackConfig *node.Config + dasConfig *das.DataAvailabilityConfig + initData *statetransfer.ArbosInitializationInfo +} + +type TestClient struct { + ctx context.Context + Client *ethclient.Client + L1Backend *eth.Ethereum + Stack *node.Node + ConsensusNode *arbnode.Node + ExecNode *gethexec.ExecutionNode + + // having cleanup() field makes cleanup customizable from default cleanup methods after calling build + cleanup func() +} + +func NewTestClient(ctx context.Context) *TestClient { + return &TestClient{ctx: ctx} +} + +func (tc *TestClient) SendSignedTx(t *testing.T, l2Client *ethclient.Client, transaction *types.Transaction, lInfo info) *types.Receipt { + return SendSignedTxViaL1(t, tc.ctx, lInfo, tc.Client, l2Client, transaction) +} + +func (tc *TestClient) SendUnsignedTx(t *testing.T, l2Client *ethclient.Client, transaction *types.Transaction, lInfo info) *types.Receipt { + return SendUnsignedTxViaL1(t, tc.ctx, lInfo, tc.Client, l2Client, transaction) +} + +func (tc *TestClient) TransferBalance(t *testing.T, from string, to string, amount *big.Int, lInfo info) (*types.Transaction, *types.Receipt) { + return TransferBalanceTo(t, from, lInfo.GetAddress(to), amount, lInfo, tc.Client, tc.ctx) +} + +func (tc *TestClient) TransferBalanceTo(t *testing.T, from string, to common.Address, amount *big.Int, lInfo info) (*types.Transaction, *types.Receipt) { + return TransferBalanceTo(t, from, to, amount, lInfo, tc.Client, tc.ctx) +} + +func (tc *TestClient) GetBalance(t *testing.T, account common.Address) *big.Int { + return GetBalance(t, tc.ctx, tc.Client, account) +} + +func (tc *TestClient) GetBaseFee(t *testing.T) *big.Int { + return GetBaseFee(t, tc.Client, tc.ctx) +} + +func (tc *TestClient) GetBaseFeeAt(t *testing.T, blockNum *big.Int) *big.Int { + return GetBaseFeeAt(t, tc.Client, tc.ctx, blockNum) +} + +func (tc *TestClient) SendWaitTestTransactions(t *testing.T, txs []*types.Transaction) { + SendWaitTestTransactions(t, tc.ctx, tc.Client, txs) +} + +func (tc *TestClient) DeploySimple(t *testing.T, auth bind.TransactOpts) (common.Address, *mocksgen.Simple) { + return deploySimple(t, tc.ctx, auth, tc.Client) +} + +func (tc *TestClient) EnsureTxSucceeded(transaction *types.Transaction) (*types.Receipt, error) { + return tc.EnsureTxSucceededWithTimeout(transaction, time.Second*5) +} + +func (tc *TestClient) EnsureTxSucceededWithTimeout(transaction *types.Transaction, timeout time.Duration) (*types.Receipt, error) { + return EnsureTxSucceededWithTimeout(tc.ctx, tc.Client, transaction, timeout) +} + +type NodeBuilder struct { + // NodeBuilder configuration + ctx context.Context + chainConfig *params.ChainConfig + nodeConfig *arbnode.Config + execConfig *gethexec.Config + l1StackConfig *node.Config + l2StackConfig *node.Config + L1Info info + L2Info info + + // L1, L2 Node parameters + dataDir string + isSequencer bool + takeOwnership bool + withL1 bool + + // Created nodes + L1 *TestClient + L2 *TestClient +} + +func NewNodeBuilder(ctx context.Context) *NodeBuilder { + return &NodeBuilder{ctx: ctx} +} + +func (b *NodeBuilder) DefaultConfig(t *testing.T, withL1 bool) *NodeBuilder { + // most used values across current tests are set here as default + b.withL1 = withL1 + if withL1 { + b.isSequencer = true + b.nodeConfig = arbnode.ConfigDefaultL1Test() + } else { + b.takeOwnership = true + b.nodeConfig = arbnode.ConfigDefaultL2Test() + } + b.chainConfig = params.ArbitrumDevTestChainConfig() + b.L1Info = NewL1TestInfo(t) + b.L2Info = NewArbTestInfo(t, b.chainConfig.ChainID) + b.dataDir = t.TempDir() + b.l1StackConfig = createStackConfigForTest(b.dataDir) + b.l2StackConfig = createStackConfigForTest(b.dataDir) + b.execConfig = gethexec.ConfigDefaultTest() + return b +} + +func (b *NodeBuilder) Build(t *testing.T) func() { + if b.withL1 { + l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) + b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = + createTestNodeWithL1(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l2StackConfig, b.L2Info) + b.L1, b.L2 = l1, l2 + b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } + } else { + l2 := NewTestClient(b.ctx) + b.L2Info, l2.ConsensusNode, l2.Client = + createTestNode(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.takeOwnership) + b.L2 = l2 + } + b.L2.ExecNode = getExecNode(t, b.L2.ConsensusNode) + b.L2.cleanup = func() { b.L2.ConsensusNode.StopAndWait() } + return func() { + b.L2.cleanup() + if b.L1 != nil && b.L1.cleanup != nil { + b.L1.cleanup() + } + } +} + +func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*TestClient, func()) { + if b.L2 == nil { + t.Fatal("builder did not previously build a L2 Node") + } + if b.withL1 && b.L1 == nil { + t.Fatal("builder did not previously build a L1 Node") + } + if params.nodeConfig == nil { + params.nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() + } + if params.dasConfig != nil { + params.nodeConfig.DataAvailability = *params.dasConfig + } + if params.stackConfig == nil { + params.stackConfig = b.l2StackConfig + // should use different dataDir from the previously used ones + params.stackConfig.DataDir = t.TempDir() + } + if params.initData == nil { + params.initData = &b.L2Info.ArbInitData + } + if params.execConfig == nil { + params.execConfig = b.execConfig + } + + l2 := NewTestClient(b.ctx) + l2.Client, l2.ConsensusNode = + Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig) + l2.ExecNode = getExecNode(t, l2.ConsensusNode) + l2.cleanup = func() { l2.ConsensusNode.StopAndWait() } + return l2, func() { l2.cleanup() } +} + +func (b *NodeBuilder) BridgeBalance(t *testing.T, account string, amount *big.Int) (*types.Transaction, *types.Receipt) { + return BridgeBalance(t, account, amount, b.L1Info, b.L2Info, b.L1.Client, b.L2.Client, b.ctx) +} + func SendWaitTestTransactions(t *testing.T, ctx context.Context, client client, txs []*types.Transaction) { t.Helper() for _, tx := range txs { @@ -239,6 +418,12 @@ func GetBaseFee(t *testing.T, client client, ctx context.Context) *big.Int { return header.BaseFee } +func GetBaseFeeAt(t *testing.T, client client, ctx context.Context, blockNum *big.Int) *big.Int { + header, err := client.HeaderByNumber(ctx, blockNum) + Require(t, err) + return header.BaseFee +} + type lifecycle struct { start func() error stop func() error @@ -286,38 +471,22 @@ func createTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, return createTestL1BlockChainWithConfig(t, l1info, nil) } -func stackConfigForTest(t *testing.T) *node.Config { - stackConfig := node.DefaultConfig - stackConfig.HTTPPort = 0 - stackConfig.WSPort = 0 - stackConfig.WSHost = "127.0.0.1" - stackConfig.WSModules = []string{server_api.Namespace, consensus.RPCNamespace, execution.RPCNamespace} - stackConfig.UseLightweightKDF = true - stackConfig.P2P.ListenAddr = "" - stackConfig.P2P.NoDial = true - stackConfig.P2P.NoDiscovery = true - stackConfig.P2P.NAT = nil - stackConfig.DataDir = t.TempDir() - return &stackConfig -} - -func createDefaultStackForTest(dataDir string) (*node.Node, error) { +func createStackConfigForTest(dataDir string) *node.Config { stackConf := node.DefaultConfig - var err error stackConf.DataDir = dataDir + stackConf.UseLightweightKDF = true + stackConf.WSPort = 0 + stackConf.HTTPPort = 0 stackConf.HTTPHost = "" stackConf.HTTPModules = append(stackConf.HTTPModules, "eth") stackConf.WSPort = 0 stackConf.WSHost = "127.0.0.1" stackConf.WSModules = []string{server_api.Namespace, consensus.RPCNamespace, execution.RPCNamespace} stackConf.P2P.NoDiscovery = true + stackConf.P2P.NoDial = true stackConf.P2P.ListenAddr = "" - - stack, err := node.New(&stackConf) - if err != nil { - return nil, fmt.Errorf("error creating protocol stack: %w", err) - } - return stack, nil + stackConf.P2P.NAT = nil + return &stackConf } func createTestValidationNode(t *testing.T, ctx context.Context, config *valnode.Config) (*valnode.ValidationNode, *node.Node) { @@ -330,6 +499,8 @@ func createTestValidationNode(t *testing.T, ctx context.Context, config *valnode stackConf.P2P.NoDiscovery = true stackConf.P2P.ListenAddr = "" + valnode.EnsureValidationExposedViaAuthRPC(&stackConf) + stack, err := node.New(&stackConf) Require(t, err) @@ -393,7 +564,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no l1info = NewL1TestInfo(t) } if stackConfig == nil { - stackConfig = stackConfigForTest(t) + stackConfig = createStackConfigForTest(t.TempDir()) } l1info.GenerateAccount("Faucet") @@ -434,7 +605,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no }}) Require(t, stack.Start()) - Require(t, l1backend.StartMining(1)) + Require(t, l1backend.StartMining()) rpcClient, err := stack.Attach() Require(t, err) @@ -476,31 +647,42 @@ func DeployOnTestL1( Require(t, err) serializedChainConfig, err := json.Marshal(chainConfig) Require(t, err) + + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys) + Require(t, err) + l1Reader.Start(ctx) + defer l1Reader.StopAndWait() + + nativeToken := common.Address{} + maxDataSize := big.NewInt(117964) addresses, err := arbnode.DeployOnL1( ctx, - l1client, + l1Reader, &l1TransactionOpts, l1info.GetAddress("Sequencer"), 0, - func() *headerreader.Config { return &headerreader.TestConfig }, arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), + nativeToken, + maxDataSize, ) Require(t, err) l1info.SetContract("Bridge", addresses.Bridge) l1info.SetContract("SequencerInbox", addresses.SequencerInbox) l1info.SetContract("Inbox", addresses.Inbox) + l1info.SetContract("UpgradeExecutor", addresses.UpgradeExecutor) initMessage := getInitMessage(ctx, t, l1client, addresses) return addresses, initMessage } func createL2BlockChain( - t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, + t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, cacheConfig *gethexec.CachingConfig, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { - return createL2BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil) + return createL2BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil, cacheConfig) } func createL2BlockChainWithStackConfig( - t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, + t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, cacheConfig *gethexec.CachingConfig, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { if l2info == nil { l2info = NewArbTestInfo(t, chainConfig.ChainID) @@ -508,12 +690,10 @@ func createL2BlockChainWithStackConfig( var stack *node.Node var err error if stackConfig == nil { - stack, err = createDefaultStackForTest(dataDir) - Require(t, err) - } else { - stack, err = node.New(stackConfig) - Require(t, err) + stackConfig = createStackConfigForTest(dataDir) } + stack, err = node.New(stackConfig) + Require(t, err) chainDb, err := stack.OpenDatabase("chaindb", 0, 0, "", false) Require(t, err) @@ -531,7 +711,11 @@ func createL2BlockChainWithStackConfig( SerializedChainConfig: serializedChainConfig, } } - blockchain, err := gethexec.WriteOrTestBlockChain(chainDb, nil, initReader, chainConfig, initMessage, gethexec.ConfigDefaultTest().TxLookupLimit, 0) + var coreCacheConfig *core.CacheConfig + if cacheConfig != nil { + coreCacheConfig = gethexec.DefaultCacheConfigFor(stack, cacheConfig) + } + blockchain, err := gethexec.WriteOrTestBlockChain(chainDb, coreCacheConfig, initReader, chainConfig, initMessage, gethexec.ConfigDefaultTest().TxLookupLimit, 0) Require(t, err) return l2info, stack, chainDb, arbDb, blockchain @@ -544,34 +728,7 @@ func ClientForStack(t *testing.T, backend *node.Node) *ethclient.Client { } // Create and deploy L1 and arbnode for L2 -func createTestNodeOnL1( - t *testing.T, - ctx context.Context, - isSequencer bool, -) ( - l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, - l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, -) { - return createTestNodeOnL1WithConfig(t, ctx, isSequencer, nil, nil, nil, nil) -} - -func createTestNodeOnL1WithConfig( - t *testing.T, - ctx context.Context, - isSequencer bool, - nodeConfig *arbnode.Config, - execConfig *gethexec.Config, - chainConfig *params.ChainConfig, - stackConfig *node.Config, -) ( - l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l1info info, - l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, -) { - l2info, currentNode, l2client, _, l1info, l1backend, l1client, l1stack = createTestNodeOnL1WithConfigImpl(t, ctx, isSequencer, nodeConfig, execConfig, chainConfig, stackConfig, nil) - return -} - -func createTestNodeOnL1WithConfigImpl( +func createTestNodeWithL1( t *testing.T, ctx context.Context, isSequencer bool, @@ -603,7 +760,7 @@ func createTestNodeOnL1WithConfigImpl( l2info = NewArbTestInfo(t, chainConfig.ChainID) } addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) - _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, stackConfig) + _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, stackConfig, &execConfig.Caching) var sequencerTxOptsPtr *bind.TransactOpts var dataSigner signature.DataSignerFunc if isSequencer { @@ -683,9 +840,9 @@ func rmExecNode(t *testing.T, endpoint string, node *gethexec.ExecutionNode) { return } newNodes := []*gethexec.ExecutionNode{} - for _, storedNode := range nodeInfo.execNodes { - if storedNode != node { - newNodes = append(newNodes, storedNode) + for i := 0; i < len(nodeInfo.execNodes); i++ { + if !(node == nodeInfo.execNodes[i]) { + newNodes = append(newNodes, nodeInfo.execNodes[i]) } } nodeInfo.execNodes = newNodes @@ -706,11 +863,7 @@ func getExecNodeFromEndpoint(t *testing.T, endpoint string) *gethexec.ExecutionN // L2 -Only. Enough for tests that needs no interface to L1 // Requires precompiles.AllowDebugPrecompiles = true -func CreateTestL2(t *testing.T, ctx context.Context) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { - return CreateTestL2WithConfig(t, ctx, nil, nil, nil, true) -} - -func CreateTestL2WithConfig( +func createTestNode( t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, execConfig *gethexec.Config, takeOwnership bool, ) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { if nodeConfig == nil { @@ -724,7 +877,7 @@ func CreateTestL2WithConfig( AddDefaultValNode(t, ctx, nodeConfig, true) - l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, "", params.ArbitrumDevTestChainConfig()) + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, "", params.ArbitrumDevTestChainConfig(), &execConfig.Caching) Require(t, execConfig.Validate()) execConfigFetcher := func() *gethexec.Config { return execConfig } @@ -795,24 +948,6 @@ func Fatal(t *testing.T, printables ...interface{}) { testhelpers.FailImpl(t, printables...) } -func Create2ndNode( - t *testing.T, - ctx context.Context, - first *arbnode.Node, - l1stack *node.Node, - l1info *BlockchainTestInfo, - l2InitData *statetransfer.ArbosInitializationInfo, - dasConfig *das.DataAvailabilityConfig, -) (*ethclient.Client, *arbnode.Node) { - nodeConf := arbnode.ConfigDefaultL1NonSequencerTest() - if dasConfig == nil { - nodeConf.DataAvailability.Enable = false - } else { - nodeConf.DataAvailability = *dasConfig - } - return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConf, nil, nil) -} - func Create2ndNodeWithConfig( t *testing.T, ctx context.Context, @@ -838,7 +973,7 @@ func Create2ndNodeWithConfig( l1client := ethclient.NewClient(l1rpcClient) if stackConfig == nil { - stackConfig = stackConfigForTest(t) + stackConfig = createStackConfigForTest(t.TempDir()) } l2stack, err := node.New(stackConfig) Require(t, err) @@ -855,7 +990,9 @@ func Create2ndNodeWithConfig( chainConfig := firstExec.ArbInterface.BlockChain().Config() initMessage := getInitMessage(ctx, t, l1client, first.DeployInfo) - l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, nil, initReader, chainConfig, initMessage, gethexec.ConfigDefaultTest().TxLookupLimit, 0) + + coreCacheConfig := gethexec.DefaultCacheConfigFor(l2stack, &execConfig.Caching) + l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, initMessage, gethexec.ConfigDefaultTest().TxLookupLimit, 0) Require(t, err) AddDefaultValNode(t, ctx, nodeConfig, true) @@ -913,11 +1050,19 @@ func authorizeDASKeyset( err := keyset.Serialize(wr) Require(t, err, "unable to serialize DAS keyset") keysetBytes := wr.Bytes() - sequencerInbox, err := bridgegen.NewSequencerInbox(l1info.Accounts["SequencerInbox"].Address, l1client) - Require(t, err, "unable to create sequencer inbox") + + sequencerInboxABI, err := abi.JSON(strings.NewReader(bridgegen.SequencerInboxABI)) + Require(t, err, "unable to parse sequencer inbox ABI") + setKeysetCalldata, err := sequencerInboxABI.Pack("setValidKeyset", keysetBytes) + Require(t, err, "unable to generate calldata") + + upgradeExecutor, err := upgrade_executorgen.NewUpgradeExecutor(l1info.Accounts["UpgradeExecutor"].Address, l1client) + Require(t, err, "unable to bind upgrade executor") + trOps := l1info.GetDefaultTransactOpts("RollupOwner", ctx) - tx, err := sequencerInbox.SetValidKeyset(&trOps, keysetBytes) + tx, err := upgradeExecutor.ExecuteCall(&trOps, l1info.Accounts["SequencerInbox"].Address, setKeysetCalldata) Require(t, err, "unable to set valid keyset") + _, err = EnsureTxSucceeded(ctx, l1client, tx) Require(t, err, "unable to ensure transaction success for setting valid keyset") } @@ -949,19 +1094,19 @@ func setupConfigWithDAS( dasConfig := &das.DataAvailabilityConfig{ Enable: enableDas, - KeyConfig: das.KeyConfig{ + Key: das.KeyConfig{ KeyDir: dbPath, }, - LocalFileStorageConfig: das.LocalFileStorageConfig{ + LocalFileStorage: das.LocalFileStorageConfig{ Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorageConfig: das.LocalDBStorageConfig{ + LocalDBStorage: das.LocalDBStorageConfig{ Enable: enableDbStorage, DataDir: dbPath, }, RequestTimeout: 5 * time.Second, - L1NodeURL: "none", + ParentChainNodeURL: "none", SequencerInboxAddress: "none", PanicOnError: true, DisableSignatureChecking: true, @@ -990,12 +1135,12 @@ func setupConfigWithDAS( PubKeyBase64Encoded: blsPubToBase64(dasSignerKey), SignerMask: 1, } - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, beConfigA) + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, beConfigA) l1NodeConfigA.DataAvailability.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{"http://" + restLis.Addr().String()} - l1NodeConfigA.DataAvailability.L1NodeURL = "none" + l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigA.DataAvailability.RestAggregator.Enable = true + l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} + l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" } return chainConfig, l1NodeConfigA, lifecycleManager, dbPath, dasSignerKey @@ -1026,6 +1171,21 @@ func deploySimple( return addr, simple } +func TestMain(m *testing.M) { + logLevelEnv := os.Getenv("TEST_LOGLEVEL") + if logLevelEnv != "" { + logLevel, err := strconv.ParseUint(logLevelEnv, 10, 32) + if err != nil || logLevel > uint64(log.LvlTrace) { + log.Warn("TEST_LOGLEVEL exists but out of bound, ignoring", "logLevel", logLevelEnv, "max", log.LvlTrace) + } + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.Lvl(logLevel)) + log.Root().SetHandler(glogger) + } + code := m.Run() + os.Exit(code) +} + func getExecNode(t *testing.T, node *arbnode.Node) *gethexec.ExecutionNode { t.Helper() gethExec, ok := node.Execution.(*gethexec.ExecutionNode) diff --git a/system_tests/conditionaltx_test.go b/system_tests/conditionaltx_test.go index fb130eaf29..d75dd27255 100644 --- a/system_tests/conditionaltx_test.go +++ b/system_tests/conditionaltx_test.go @@ -16,7 +16,7 @@ import ( "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/arbitrum_types" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rpc" @@ -102,23 +102,23 @@ func getOptions(address common.Address, rootHash common.Hash, slotValueMap map[c } func getFulfillableBlockTimeLimits(t *testing.T, blockNumber uint64, timestamp uint64) []*arbitrum_types.ConditionalOptions { - future := hexutil.Uint64(timestamp + 30) - past := hexutil.Uint64(timestamp - 1) - futureBlockNumber := hexutil.Uint64(blockNumber + 1000) - currentBlockNumber := hexutil.Uint64(blockNumber) + future := math.HexOrDecimal64(timestamp + 30) + past := math.HexOrDecimal64(timestamp - 1) + futureBlockNumber := math.HexOrDecimal64(blockNumber + 1000) + currentBlockNumber := math.HexOrDecimal64(blockNumber) return getBlockTimeLimits(t, currentBlockNumber, futureBlockNumber, past, future) } func getUnfulfillableBlockTimeLimits(t *testing.T, blockNumber uint64, timestamp uint64) []*arbitrum_types.ConditionalOptions { - future := hexutil.Uint64(timestamp + 30) - past := hexutil.Uint64(timestamp - 1) - futureBlockNumber := hexutil.Uint64(blockNumber + 1000) - previousBlockNumber := hexutil.Uint64(blockNumber - 1) + future := math.HexOrDecimal64(timestamp + 30) + past := math.HexOrDecimal64(timestamp - 1) + futureBlockNumber := math.HexOrDecimal64(blockNumber + 1000) + previousBlockNumber := math.HexOrDecimal64(blockNumber - 1) // skip first empty options return getBlockTimeLimits(t, futureBlockNumber, previousBlockNumber, future, past)[1:] } -func getBlockTimeLimits(t *testing.T, blockMin, blockMax hexutil.Uint64, timeMin, timeMax hexutil.Uint64) []*arbitrum_types.ConditionalOptions { +func getBlockTimeLimits(t *testing.T, blockMin, blockMax math.HexOrDecimal64, timeMin, timeMax math.HexOrDecimal64) []*arbitrum_types.ConditionalOptions { basic := []*arbitrum_types.ConditionalOptions{ {}, {TimestampMin: &timeMin}, @@ -156,9 +156,9 @@ func optionsProduct(optionsA, optionsB []*arbitrum_types.ConditionalOptions) []* c.KnownAccounts[k] = v } limitTriples := []struct { - a *hexutil.Uint64 - b *hexutil.Uint64 - c **hexutil.Uint64 + a *math.HexOrDecimal64 + b *math.HexOrDecimal64 + c **math.HexOrDecimal64 }{ {a.BlockNumberMin, b.BlockNumberMin, &c.BlockNumberMin}, {a.BlockNumberMax, b.BlockNumberMax, &c.BlockNumberMax}, @@ -167,10 +167,10 @@ func optionsProduct(optionsA, optionsB []*arbitrum_types.ConditionalOptions) []* } for _, tripple := range limitTriples { if tripple.b != nil { - value := hexutil.Uint64(*tripple.b) + value := math.HexOrDecimal64(*tripple.b) *tripple.c = &value } else if tripple.a != nil { - value := hexutil.Uint64(*tripple.a) + value := math.HexOrDecimal64(*tripple.a) *tripple.c = &value } else { *tripple.c = nil @@ -202,43 +202,42 @@ func TestSendRawTransactionConditionalBasic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, l2client, _, _, l1client, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - execNode := getExecNode(t, node) - auth := l2info.GetDefaultTransactOpts("Owner", ctx) - contractAddress1, simple1 := deploySimple(t, ctx, auth, l2client) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + contractAddress1, simple1 := builder.L2.DeploySimple(t, auth) tx, err := simple1.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - contractAddress2, simple2 := deploySimple(t, ctx, auth, l2client) + contractAddress2, simple2 := builder.L2.DeploySimple(t, auth) tx, err = simple2.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) tx, err = simple2.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - currentRootHash1 := getStorageRootHash(t, execNode, contractAddress1) - currentSlotValueMap1 := getStorageSlotValue(t, execNode, contractAddress1) - currentRootHash2 := getStorageRootHash(t, execNode, contractAddress2) - currentSlotValueMap2 := getStorageSlotValue(t, execNode, contractAddress2) + currentRootHash1 := getStorageRootHash(t, builder.L2.ExecNode, contractAddress1) + currentSlotValueMap1 := getStorageSlotValue(t, builder.L2.ExecNode, contractAddress1) + currentRootHash2 := getStorageRootHash(t, builder.L2.ExecNode, contractAddress2) + currentSlotValueMap2 := getStorageSlotValue(t, builder.L2.ExecNode, contractAddress2) - rpcClient, err := node.Stack.Attach() + rpcClient, err := builder.L2.ConsensusNode.Stack.Attach() Require(t, err) - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - testConditionalTxThatShouldSucceed(t, ctx, -1, l2info, rpcClient, nil) + testConditionalTxThatShouldSucceed(t, ctx, -1, builder.L2Info, rpcClient, nil) for i, options := range getEmptyOptions(contractAddress1) { - testConditionalTxThatShouldSucceed(t, ctx, i, l2info, rpcClient, options) + testConditionalTxThatShouldSucceed(t, ctx, i, builder.L2Info, rpcClient, options) } - block, err := l1client.BlockByNumber(ctx, nil) + block, err := builder.L1.Client.BlockByNumber(ctx, nil) Require(t, err) blockNumber := block.NumberU64() blockTime := block.Time() @@ -249,33 +248,33 @@ func TestSendRawTransactionConditionalBasic(t *testing.T) { options1 := dedupOptions(t, append(append(optionsAB, optionsA...), optionsB...)) options1 = optionsDedupProduct(t, options1, getFulfillableBlockTimeLimits(t, blockNumber, blockTime)) for i, options := range options1 { - testConditionalTxThatShouldSucceed(t, ctx, i, l2info, rpcClient, options) + testConditionalTxThatShouldSucceed(t, ctx, i, builder.L2Info, rpcClient, options) } tx, err = simple1.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) tx, err = simple2.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) previousStorageRootHash1 := currentRootHash1 - currentRootHash1 = getStorageRootHash(t, execNode, contractAddress1) + currentRootHash1 = getStorageRootHash(t, builder.L2.ExecNode, contractAddress1) if bytes.Equal(previousStorageRootHash1.Bytes(), currentRootHash1.Bytes()) { Fatal(t, "storage root hash didn't change as expected") } - currentSlotValueMap1 = getStorageSlotValue(t, execNode, contractAddress1) + currentSlotValueMap1 = getStorageSlotValue(t, builder.L2.ExecNode, contractAddress1) previousStorageRootHash2 := currentRootHash2 - currentRootHash2 = getStorageRootHash(t, execNode, contractAddress2) + currentRootHash2 = getStorageRootHash(t, builder.L2.ExecNode, contractAddress2) if bytes.Equal(previousStorageRootHash2.Bytes(), currentRootHash2.Bytes()) { Fatal(t, "storage root hash didn't change as expected") } - currentSlotValueMap2 = getStorageSlotValue(t, execNode, contractAddress2) + currentSlotValueMap2 = getStorageSlotValue(t, builder.L2.ExecNode, contractAddress2) - block, err = l1client.BlockByNumber(ctx, nil) + block, err = builder.L1.Client.BlockByNumber(ctx, nil) Require(t, err) blockNumber = block.NumberU64() blockTime = block.Time() @@ -286,35 +285,38 @@ func TestSendRawTransactionConditionalBasic(t *testing.T) { options2 := dedupOptions(t, append(append(optionsCD, optionsC...), optionsD...)) options2 = optionsDedupProduct(t, options2, getFulfillableBlockTimeLimits(t, blockNumber, blockTime)) for i, options := range options2 { - testConditionalTxThatShouldSucceed(t, ctx, i, l2info, rpcClient, options) + testConditionalTxThatShouldSucceed(t, ctx, i, builder.L2Info, rpcClient, options) } for i, options := range options1 { - testConditionalTxThatShouldFail(t, ctx, i, l2info, rpcClient, options, -32003) + testConditionalTxThatShouldFail(t, ctx, i, builder.L2Info, rpcClient, options, -32003) } - block, err = l1client.BlockByNumber(ctx, nil) + block, err = builder.L1.Client.BlockByNumber(ctx, nil) Require(t, err) blockNumber = block.NumberU64() blockTime = block.Time() options3 := optionsDedupProduct(t, options2, getUnfulfillableBlockTimeLimits(t, blockNumber, blockTime)) for i, options := range options3 { - testConditionalTxThatShouldFail(t, ctx, i, l2info, rpcClient, options, -32003) + testConditionalTxThatShouldFail(t, ctx, i, builder.L2Info, rpcClient, options, -32003) } options4 := optionsDedupProduct(t, options2, options1) for i, options := range options4 { - testConditionalTxThatShouldFail(t, ctx, i, l2info, rpcClient, options, -32003) + testConditionalTxThatShouldFail(t, ctx, i, builder.L2Info, rpcClient, options, -32003) } } func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() - rpcClient, err := node.Stack.Attach() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + rpcClient, err := builder.L2.ConsensusNode.Stack.Attach() Require(t, err) - auth := l2info.GetDefaultTransactOpts("Owner", ctx) - contractAddress, simple := deploySimple(t, ctx, auth, client) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + contractAddress, simple := builder.L2.DeploySimple(t, auth) simpleContract, err := abi.JSON(strings.NewReader(mocksgen.SimpleABI)) Require(t, err) @@ -325,11 +327,11 @@ func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { var options []*arbitrum_types.ConditionalOptions for i := 0; i < numTxes; i++ { account := fmt.Sprintf("User%v", i) - l2info.GenerateAccount(account) - tx := l2info.PrepareTx("Owner", account, l2info.TransferGas, big.NewInt(1e16), nil) - err := client.SendTransaction(ctx, tx) + builder.L2Info.GenerateAccount(account) + tx := builder.L2Info.PrepareTx("Owner", account, builder.L2Info.TransferGas, big.NewInt(1e16), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } for i := numTxes - 1; i >= 0; i-- { @@ -337,7 +339,7 @@ func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { data, err := simpleContract.Pack("logAndIncrement", big.NewInt(int64(expected))) Require(t, err) account := fmt.Sprintf("User%v", i) - txes = append(txes, l2info.PrepareTxTo(account, &contractAddress, l2info.TransferGas, big.NewInt(0), data)) + txes = append(txes, builder.L2Info.PrepareTxTo(account, &contractAddress, builder.L2Info.TransferGas, big.NewInt(0), data)) options = append(options, &arbitrum_types.ConditionalOptions{KnownAccounts: map[common.Address]arbitrum_types.RootHashOrSlots{contractAddress: {SlotValue: map[common.Hash]common.Hash{{0}: common.BigToHash(big.NewInt(int64(expected)))}}}}) } ctxWithTimeout, cancelCtxWithTimeout := context.WithTimeout(ctx, 5*time.Second) @@ -367,8 +369,7 @@ func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { } cancelCtxWithTimeout() wg.Wait() - execNode := getExecNode(t, node) - bc := execNode.Backend.ArbInterface().BlockChain() + bc := builder.L2.ExecNode.Backend.ArbInterface().BlockChain() genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum var receipts types.Receipts @@ -404,41 +405,39 @@ func TestSendRawTransactionConditionalPreCheck(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - execConfig := gethexec.ConfigDefaultTest() - execConfig.Sequencer.MaxBlockSpeed = 0 - execConfig.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessLikelyCompatible - execConfig.TxPreChecker.RequiredStateAge = 1 - execConfig.TxPreChecker.RequiredStateMaxBlocks = 2 + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Sequencer.MaxBlockSpeed = 0 + builder.execConfig.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessLikelyCompatible + builder.execConfig.TxPreChecker.RequiredStateAge = 1 + builder.execConfig.TxPreChecker.RequiredStateMaxBlocks = 2 + cleanup := builder.Build(t) + defer cleanup() - l2info, node, l2client, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, execConfig, nil, nil) - defer requireClose(t, l1stack) - defer node.StopAndWait() - rpcClient, err := node.Stack.Attach() + rpcClient, err := builder.L2.ConsensusNode.Stack.Attach() Require(t, err) - execNode := getExecNode(t, node) - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - auth := l2info.GetDefaultTransactOpts("Owner", ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) start := time.Now().Unix() - contractAddress, simple := deploySimple(t, ctx, auth, l2client) + contractAddress, simple := builder.L2.DeploySimple(t, auth) if time.Since(time.Unix(start, 0)) > 200*time.Millisecond { start++ time.Sleep(time.Until(time.Unix(start, 0))) } tx, err := simple.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - currentRootHash := getStorageRootHash(t, execNode, contractAddress) + currentRootHash := getStorageRootHash(t, builder.L2.ExecNode, contractAddress) options := &arbitrum_types.ConditionalOptions{ KnownAccounts: map[common.Address]arbitrum_types.RootHashOrSlots{ contractAddress: {RootHash: ¤tRootHash}, }, } - testConditionalTxThatShouldFail(t, ctx, 0, l2info, rpcClient, options, -32003) + testConditionalTxThatShouldFail(t, ctx, 0, builder.L2Info, rpcClient, options, -32003) time.Sleep(time.Until(time.Unix(start+1, 0))) - testConditionalTxThatShouldSucceed(t, ctx, 1, l2info, rpcClient, options) + testConditionalTxThatShouldSucceed(t, ctx, 1, builder.L2Info, rpcClient, options) start = time.Now().Unix() if time.Since(time.Unix(start, 0)) > 200*time.Millisecond { @@ -447,23 +446,23 @@ func TestSendRawTransactionConditionalPreCheck(t *testing.T) { } tx, err = simple.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - currentRootHash = getStorageRootHash(t, execNode, contractAddress) + currentRootHash = getStorageRootHash(t, builder.L2.ExecNode, contractAddress) options = &arbitrum_types.ConditionalOptions{ KnownAccounts: map[common.Address]arbitrum_types.RootHashOrSlots{ contractAddress: {RootHash: ¤tRootHash}, }, } - testConditionalTxThatShouldFail(t, ctx, 2, l2info, rpcClient, options, -32003) - tx = l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) - Require(t, l2client.SendTransaction(ctx, tx)) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + testConditionalTxThatShouldFail(t, ctx, 2, builder.L2Info, rpcClient, options, -32003) + tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - testConditionalTxThatShouldFail(t, ctx, 3, l2info, rpcClient, options, -32003) - tx = l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) - Require(t, l2client.SendTransaction(ctx, tx)) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + testConditionalTxThatShouldFail(t, ctx, 3, builder.L2Info, rpcClient, options, -32003) + tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - testConditionalTxThatShouldSucceed(t, ctx, 4, l2info, rpcClient, options) + testConditionalTxThatShouldSucceed(t, ctx, 4, builder.L2Info, rpcClient, options) } diff --git a/system_tests/contract_tx_test.go b/system_tests/contract_tx_test.go index d6c2eb5f38..56d79b36d9 100644 --- a/system_tests/contract_tx_test.go +++ b/system_tests/contract_tx_test.go @@ -25,18 +25,20 @@ func TestContractTxDeploy(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, nil, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + cleanup := builder.Build(t) + defer cleanup() from := common.HexToAddress("0x123412341234") - TransferBalanceTo(t, "Faucet", from, big.NewInt(1e18), l2info, client, ctx) + builder.L2.TransferBalanceTo(t, "Faucet", from, big.NewInt(1e18), builder.L2Info) for stateNonce := uint64(0); stateNonce < 2; stateNonce++ { - pos, err := node.TxStreamer.GetMessageCount() + pos, err := builder.L2.ConsensusNode.TxStreamer.GetMessageCount() Require(t, err) var delayedMessagesRead uint64 if pos > 0 { - lastMessage, err := node.TxStreamer.GetMessage(pos - 1) + lastMessage, err := builder.L2.ConsensusNode.TxStreamer.GetMessage(pos - 1) Require(t, err) delayedMessagesRead = lastMessage.DelayedMessagesRead } @@ -68,7 +70,7 @@ func TestContractTxDeploy(t *testing.T) { l2Msg = append(l2Msg, math.U256Bytes(contractTx.Value)...) l2Msg = append(l2Msg, contractTx.Data...) - err = node.TxStreamer.AddMessages(pos, true, []arbostypes.MessageWithMetadata{ + err = builder.L2.ConsensusNode.TxStreamer.AddMessages(pos, true, []arbostypes.MessageWithMetadata{ { Message: &arbostypes.L1IncomingMessage{ Header: &arbostypes.L1IncomingMessageHeader{ @@ -89,7 +91,7 @@ func TestContractTxDeploy(t *testing.T) { txHash := types.NewTx(contractTx).Hash() t.Log("made contract tx", contractTx, "with hash", txHash) - receipt, err := WaitForTx(ctx, client, txHash, time.Second*10) + receipt, err := WaitForTx(ctx, builder.L2.Client, txHash, time.Second*10) Require(t, err) if receipt.Status != types.ReceiptStatusSuccessful { Fatal(t, "Receipt has non-successful status", receipt.Status) @@ -102,7 +104,7 @@ func TestContractTxDeploy(t *testing.T) { t.Log("deployed contract", receipt.ContractAddress, "from address", from, "with nonce", stateNonce) stateNonce++ - code, err := client.CodeAt(ctx, receipt.ContractAddress, nil) + code, err := builder.L2.Client.CodeAt(ctx, receipt.ContractAddress, nil) Require(t, err) if !bytes.Equal(code, []byte{0xFE}) { Fatal(t, "expected contract", receipt.ContractAddress, "code of 0xFE but got", hex.EncodeToString(code)) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 20aa770569..0e59a6a068 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" @@ -29,6 +30,7 @@ import ( "github.com/offchainlabs/nitro/execution/execclient" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/bridgegen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" @@ -47,15 +49,15 @@ func startLocalDASServer( config := das.DataAvailabilityConfig{ Enable: true, - KeyConfig: das.KeyConfig{ + Key: das.KeyConfig{ KeyDir: keyDir, }, - LocalFileStorageConfig: das.LocalFileStorageConfig{ + LocalFileStorage: das.LocalFileStorageConfig{ Enable: true, DataDir: dataDir, }, - L1NodeURL: "none", - RequestTimeout: 5 * time.Second, + ParentChainNodeURL: "none", + RequestTimeout: 5 * time.Second, } var syncFromStorageServices []*das.IterableStorageService @@ -66,7 +68,7 @@ func startLocalDASServer( Require(t, err) seqInboxCaller, err := bridgegen.NewSequencerInboxCaller(seqInboxAddress, l1client) Require(t, err) - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() Require(t, err) daWriter, err := das.NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, seqInboxCaller, storageService, "") Require(t, err) @@ -128,17 +130,17 @@ func TestDASRekey(t *testing.T) { authorizeDASKeyset(t, ctx, pubkeyA, l1info, l1client) // Setup L2 chain - _, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChainWithStackConfig(t, l2info, nodeDir, chainConfig, initMessage, nil) + _, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChainWithStackConfig(t, l2info, nodeDir, chainConfig, initMessage, nil, nil) l2info.GenerateAccount("User2") // Setup DAS config l1NodeConfigA.DataAvailability.Enable = true - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, backendConfigA) - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{restServerUrlA} - l1NodeConfigA.DataAvailability.L1NodeURL = "none" + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigA) + l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigA.DataAvailability.RestAggregator.Enable = true + l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{restServerUrlA} + l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) Require(t, err) @@ -154,11 +156,11 @@ func TestDASRekey(t *testing.T) { l1NodeConfigB.BlockValidator.Enable = false l1NodeConfigB.DataAvailability.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{restServerUrlA} + l1NodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigB.DataAvailability.RestAggregator.Enable = true + l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{restServerUrlA} - l1NodeConfigB.DataAvailability.L1NodeURL = "none" + l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) @@ -178,7 +180,8 @@ func TestDASRekey(t *testing.T) { // Restart the node on the new keyset against the new DAS server running on the same disk as the first with new keys - l2stackA, err := createDefaultStackForTest(nodeDir) + stackConfig := createStackConfigForTest(nodeDir) + l2stackA, err := node.New(stackConfig) Require(t, err) l2chainDb, err := l2stackA.OpenDatabase("chaindb", 0, 0, "", false) @@ -193,7 +196,7 @@ func TestDASRekey(t *testing.T) { execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) Require(t, err) - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, backendConfigB) + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) execClient := execclient.NewClient(StaticFetcherFrom(t, &rpcclient.TestClientConfig), l2stackA) nodeA, err := arbnode.CreateNode(ctx, l2stackA, execClient, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) Require(t, err) @@ -252,7 +255,8 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { chainConfig := params.ArbitrumDevTestDASChainConfig() l1info, l1client, _, l1stack := createTestL1BlockChain(t, nil) defer requireClose(t, l1stack) - l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }) + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys) Require(t, err) l1Reader.Start(ctx) defer l1Reader.StopAndWait() @@ -266,18 +270,18 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { serverConfig := das.DataAvailabilityConfig{ Enable: true, - LocalCacheConfig: das.TestBigCacheConfig, + LocalCache: das.TestBigCacheConfig, - LocalFileStorageConfig: das.LocalFileStorageConfig{ + LocalFileStorage: das.LocalFileStorageConfig{ Enable: true, DataDir: fileDataDir, }, - LocalDBStorageConfig: das.LocalDBStorageConfig{ + LocalDBStorage: das.LocalDBStorageConfig{ Enable: true, DataDir: dbDataDir, }, - KeyConfig: das.KeyConfig{ + Key: das.KeyConfig{ KeyDir: keyDir, }, @@ -312,18 +316,18 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { PubKeyBase64Encoded: blsPubToBase64(pubkey), SignerMask: 1, } - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, beConfigA) - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{"http://" + restLis.Addr().String()} - l1NodeConfigA.DataAvailability.L1NodeURL = "none" + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, beConfigA) + l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigA.DataAvailability.RestAggregator.Enable = true + l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} + l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" dataSigner := signature.DataSignerFromPrivateKey(l1info.Accounts["Sequencer"].PrivateKey) Require(t, err) // Setup L2 chain - l2info, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil) + l2info, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) l2info.GenerateAccount("User2") execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) @@ -349,16 +353,16 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { // AggregatorConfig set up below - L1NodeURL: "none", - RequestTimeout: 5 * time.Second, + ParentChainNodeURL: "none", + RequestTimeout: 5 * time.Second, } l1NodeConfigB.BlockValidator.Enable = false l1NodeConfigB.DataAvailability.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{"http://" + restLis.Addr().String()} - l1NodeConfigB.DataAvailability.L1NodeURL = "none" + l1NodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigB.DataAvailability.RestAggregator.Enable = true + l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} + l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) diff --git a/system_tests/debugapi_test.go b/system_tests/debugapi_test.go index ff28e2350c..b8fbffcfee 100644 --- a/system_tests/debugapi_test.go +++ b/system_tests/debugapi_test.go @@ -8,32 +8,31 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/util/testhelpers" ) func TestDebugAPI(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, nil, nil, nil, nil) - defer requireClose(t, l1stack) - defer requireClose(t, l2stack) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - l2rpc, _ := l2stack.Attach() + l2rpc, _ := builder.L2.Stack.Attach() var dump state.Dump err := l2rpc.CallContext(ctx, &dump, "debug_dumpBlock", rpc.LatestBlockNumber) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &dump, "debug_dumpBlock", rpc.PendingBlockNumber) - testhelpers.RequireImpl(t, err) + Require(t, err) var badBlocks []eth.BadBlockArgs err = l2rpc.CallContext(ctx, &badBlocks, "debug_getBadBlocks") - testhelpers.RequireImpl(t, err) + Require(t, err) var dumpIt state.IteratorDump err = l2rpc.CallContext(ctx, &dumpIt, "debug_accountRange", rpc.LatestBlockNumber, hexutil.Bytes{}, 10, true, true, false) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &dumpIt, "debug_accountRange", rpc.PendingBlockNumber, hexutil.Bytes{}, 10, true, true, false) - testhelpers.RequireImpl(t, err) + Require(t, err) } diff --git a/system_tests/delayedinbox_test.go b/system_tests/delayedinbox_test.go index e48cb37028..ca3e7b5999 100644 --- a/system_tests/delayedinbox_test.go +++ b/system_tests/delayedinbox_test.go @@ -38,16 +38,17 @@ func TestDelayInboxSimple(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, l2node, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - l2info.GenerateAccount("User2") + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - delayedTx := l2info.PrepareTx("Owner", "User2", 50001, big.NewInt(1e6), nil) - SendSignedTxViaL1(t, ctx, l1info, l1client, l2client, delayedTx) + builder.L2Info.GenerateAccount("User2") - l2balance, err := l2client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + delayedTx := builder.L2Info.PrepareTx("Owner", "User2", 50001, big.NewInt(1e6), nil) + builder.L1.SendSignedTx(t, builder.L2.Client, delayedTx, builder.L1Info) + + l2balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e6)) != 0 { Fatal(t, "Unexpected balance:", l2balance) diff --git a/system_tests/delayedinboxlong_test.go b/system_tests/delayedinboxlong_test.go index b1c8ea361b..7c57771f50 100644 --- a/system_tests/delayedinboxlong_test.go +++ b/system_tests/delayedinboxlong_test.go @@ -25,11 +25,11 @@ func TestDelayInboxLong(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, l2node, l2client, l1info, l1backend, l1client, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") fundsPerDelayed := int64(1000000) delayedMessages := int64(0) @@ -42,22 +42,22 @@ func TestDelayInboxLong(t *testing.T) { randNum := rand.Int() % messagesPerDelayed var l1tx *types.Transaction if randNum == 0 { - delayedTx := l2info.PrepareTx("Owner", "User2", 50001, big.NewInt(fundsPerDelayed), nil) - l1tx = WrapL2ForDelayed(t, delayedTx, l1info, "User", 100000) + delayedTx := builder.L2Info.PrepareTx("Owner", "User2", 50001, big.NewInt(fundsPerDelayed), nil) + l1tx = WrapL2ForDelayed(t, delayedTx, builder.L1Info, "User", 100000) lastDelayedMessage = delayedTx delayedMessages++ } else { - l1tx = l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) + l1tx = builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) } l1Txs = append(l1Txs, l1tx) } // adding multiple messages in the same AddLocal to get them in the same L1 block - errs := l1backend.TxPool().AddLocals(l1Txs) + errs := builder.L1.L1Backend.TxPool().AddLocals(l1Txs) for _, err := range errs { Require(t, err) } // Checking every tx is expensive, so we just check the last, assuming that the others succeeded too - _, err := EnsureTxSucceeded(ctx, l1client, l1Txs[len(l1Txs)-1]) + _, err := builder.L1.EnsureTxSucceeded(l1Txs[len(l1Txs)-1]) Require(t, err) } @@ -68,14 +68,14 @@ func TestDelayInboxLong(t *testing.T) { // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 100; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } - _, err := WaitForTx(ctx, l2client, lastDelayedMessage.Hash(), time.Second*5) + _, err := WaitForTx(ctx, builder.L2.Client, lastDelayedMessage.Hash(), time.Second*5) Require(t, err) - l2balance, err := l2client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(fundsPerDelayed*delayedMessages)) != 0 { Fatal(t, "Unexpected balance:", "balance", l2balance, "expected", fundsPerDelayed*delayedMessages) diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go new file mode 100644 index 0000000000..691b02a123 --- /dev/null +++ b/system_tests/estimation_test.go @@ -0,0 +1,257 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbtest + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/colors" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +func TestDeploy(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + auth.GasMargin = 0 // don't adjust, we want to see if the estimate alone is sufficient + + _, simple := builder.L2.DeploySimple(t, auth) + + tx, err := simple.Increment(&auth) + Require(t, err, "failed to call Increment()") + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + counter, err := simple.Counter(&bind.CallOpts{}) + Require(t, err, "failed to get counter") + + if counter != 1 { + Fatal(t, "Unexpected counter value", counter) + } +} + +func TestEstimate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + auth.GasMargin = 0 // don't adjust, we want to see if the estimate alone is sufficient + + gasPrice := big.NewInt(params.GWei / 10) + + // set the gas price + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) + Require(t, err, "could not deploy ArbOwner contract") + tx, err := arbOwner.SetMinimumL2BaseFee(&auth, gasPrice) + Require(t, err, "could not set L2 gas price") + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + // connect to arbGasInfo precompile + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), builder.L2.Client) + Require(t, err, "could not deploy contract") + + // wait for price to come to equilibrium + equilibrated := false + numTriesLeft := 20 + for !equilibrated && numTriesLeft > 0 { + // make an empty block to let the gas price update + builder.L2Info.GasPrice = new(big.Int).Mul(builder.L2Info.GasPrice, big.NewInt(2)) + builder.L2.TransferBalance(t, "Owner", "Owner", common.Big0, builder.L2Info) + + // check if the price has equilibrated + _, _, _, _, _, setPrice, err := arbGasInfo.GetPricesInWei(&bind.CallOpts{}) + Require(t, err, "could not get L2 gas price") + if gasPrice.Cmp(setPrice) == 0 { + equilibrated = true + } + numTriesLeft-- + } + if !equilibrated { + Fatal(t, "L2 gas price did not converge", gasPrice) + } + + initialBalance, err := builder.L2.Client.BalanceAt(ctx, auth.From, nil) + Require(t, err, "could not get balance") + + // deploy a test contract + _, tx, simple, err := mocksgen.DeploySimple(&auth, builder.L2.Client) + Require(t, err, "could not deploy contract") + receipt, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + header, err := builder.L2.Client.HeaderByNumber(ctx, receipt.BlockNumber) + Require(t, err, "could not get header") + if header.BaseFee.Cmp(gasPrice) != 0 { + Fatal(t, "Header has wrong basefee", header.BaseFee, gasPrice) + } + + balance, err := builder.L2.Client.BalanceAt(ctx, auth.From, nil) + Require(t, err, "could not get balance") + expectedCost := receipt.GasUsed * gasPrice.Uint64() + observedCost := initialBalance.Uint64() - balance.Uint64() + if expectedCost != observedCost { + Fatal(t, "Expected deployment to cost", expectedCost, "instead of", observedCost) + } + + tx, err = simple.Increment(&auth) + Require(t, err, "failed to call Increment()") + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + counter, err := simple.Counter(&bind.CallOpts{}) + Require(t, err, "failed to get counter") + + if counter != 1 { + Fatal(t, "Unexpected counter value", counter) + } +} + +func TestComponentEstimate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + l1BaseFee := new(big.Int).Set(arbostypes.DefaultInitialL1BaseFee) + l2BaseFee := builder.L2.GetBaseFee(t) + + colors.PrintGrey("l1 basefee ", l1BaseFee) + colors.PrintGrey("l2 basefee ", l2BaseFee) + + userBalance := big.NewInt(1e16) + maxPriorityFeePerGas := big.NewInt(0) + maxFeePerGas := arbmath.BigMulByUfrac(l2BaseFee, 3, 2) + + builder.L2Info.GenerateAccount("User") + builder.L2.TransferBalance(t, "Owner", "User", userBalance, builder.L2Info) + + from := builder.L2Info.GetAddress("User") + to := testhelpers.RandomAddress() + gas := uint64(100000000) + calldata := []byte{0x00, 0x12} + value := big.NewInt(4096) + + nodeAbi, err := node_interfacegen.NodeInterfaceMetaData.GetAbi() + Require(t, err) + + nodeMethod := nodeAbi.Methods["gasEstimateComponents"] + estimateCalldata := append([]byte{}, nodeMethod.ID...) + packed, err := nodeMethod.Inputs.Pack(to, false, calldata) + Require(t, err) + estimateCalldata = append(estimateCalldata, packed...) + + msg := ethereum.CallMsg{ + From: from, + To: &types.NodeInterfaceAddress, + Gas: gas, + GasFeeCap: maxFeePerGas, + GasTipCap: maxPriorityFeePerGas, + Value: value, + Data: estimateCalldata, + } + returnData, err := builder.L2.Client.CallContract(ctx, msg, nil) + Require(t, err) + + outputs, err := nodeMethod.Outputs.Unpack(returnData) + Require(t, err) + if len(outputs) != 4 { + Fatal(t, "expected 4 outputs from gasEstimateComponents, got", len(outputs)) + } + + gasEstimate, _ := outputs[0].(uint64) + gasEstimateForL1, _ := outputs[1].(uint64) + baseFee, _ := outputs[2].(*big.Int) + l1BaseFeeEstimate, _ := outputs[3].(*big.Int) + + tx := builder.L2Info.SignTxAs("User", &types.DynamicFeeTx{ + ChainID: builder.L2.ExecNode.ArbInterface.BlockChain().Config().ChainID, + Nonce: 0, + GasTipCap: maxPriorityFeePerGas, + GasFeeCap: maxFeePerGas, + Gas: gasEstimate, + To: &to, + Value: value, + Data: calldata, + }) + + l2Estimate := gasEstimate - gasEstimateForL1 + + colors.PrintBlue("Est. ", gasEstimate, " - ", gasEstimateForL1, " = ", l2Estimate) + + if !arbmath.BigEquals(l1BaseFeeEstimate, l1BaseFee) { + Fatal(t, l1BaseFeeEstimate, l1BaseFee) + } + if !arbmath.BigEquals(baseFee, l2BaseFee) { + Fatal(t, baseFee, l2BaseFee.Uint64()) + } + + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + receipt, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + l2Used := receipt.GasUsed - receipt.GasUsedForL1 + colors.PrintMint("True ", receipt.GasUsed, " - ", receipt.GasUsedForL1, " = ", l2Used) + + if l2Estimate != l2Used { + Fatal(t, l2Estimate, l2Used) + } +} + +func TestDisableL1Charging(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + addr := common.HexToAddress("0x12345678") + + gasWithL1Charging, err := builder.L2.Client.EstimateGas(ctx, ethereum.CallMsg{To: &addr}) + Require(t, err) + + gasWithoutL1Charging, err := builder.L2.Client.EstimateGas(ctx, ethereum.CallMsg{To: &addr, SkipL1Charging: true}) + Require(t, err) + + if gasWithL1Charging <= gasWithoutL1Charging { + Fatal(t, "SkipL1Charging didn't disable L1 charging") + } + if gasWithoutL1Charging != params.TxGas { + Fatal(t, "Incorrect gas estimate with disabled L1 charging") + } + + _, err = builder.L2.Client.CallContract(ctx, ethereum.CallMsg{To: &addr, Gas: gasWithL1Charging}, nil) + Require(t, err) + + _, err = builder.L2.Client.CallContract(ctx, ethereum.CallMsg{To: &addr, Gas: gasWithoutL1Charging}, nil) + if err == nil { + Fatal(t, "CallContract passed with insufficient gas") + } + + _, err = builder.L2.Client.CallContract(ctx, ethereum.CallMsg{To: &addr, Gas: gasWithoutL1Charging, SkipL1Charging: true}, nil) + Require(t, err) +} diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index 14927159fa..bf8b8f1672 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -20,7 +20,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbcompress" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/ethereum/go-ethereum/common" @@ -33,20 +32,20 @@ func TestSequencerFeePaid(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, l2node, l2client, _, _, _, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - execNode := getExecNode(t, l2node) - version := execNode.ArbInterface.BlockChain().Config().ArbitrumChainParams.InitialArbOSVersion - callOpts := l2info.GetDefaultCallOpts("Owner", ctx) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + version := builder.L2.ExecNode.ArbInterface.BlockChain().Config().ArbitrumChainParams.InitialArbOSVersion + callOpts := builder.L2Info.GetDefaultCallOpts("Owner", ctx) // get the network fee account - arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), l2client) + arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), builder.L2.Client) Require(t, err, "failed to deploy contract") - arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), l2client) + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), builder.L2.Client) Require(t, err, "failed to deploy contract") - arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), l2client) + arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err, "failed to deploy contract") networkFeeAccount, err := arbOwnerPublic.GetNetworkFeeAccount(callOpts) Require(t, err, "could not get the network fee account") @@ -54,24 +53,24 @@ func TestSequencerFeePaid(t *testing.T) { l1Estimate, err := arbGasInfo.GetL1BaseFeeEstimate(callOpts) Require(t, err) - baseFee := GetBaseFee(t, l2client, ctx) - l2info.GasPrice = baseFee + baseFee := builder.L2.GetBaseFee(t) + builder.L2Info.GasPrice = baseFee testFees := func(tip uint64) (*big.Int, *big.Int) { tipCap := arbmath.BigMulByUint(baseFee, tip) - txOpts := l2info.GetDefaultTransactOpts("Faucet", ctx) + txOpts := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) txOpts.GasTipCap = tipCap gasPrice := arbmath.BigAdd(baseFee, tipCap) - networkBefore := GetBalance(t, ctx, l2client, networkFeeAccount) + networkBefore := builder.L2.GetBalance(t, networkFeeAccount) tx, err := arbDebug.Events(&txOpts, true, [32]byte{}) Require(t, err) - receipt, err := EnsureTxSucceeded(ctx, l2client, tx) + receipt, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) - networkAfter := GetBalance(t, ctx, l2client, networkFeeAccount) - l1Charge := arbmath.BigMulByUint(l2info.GasPrice, receipt.GasUsedForL1) + networkAfter := builder.L2.GetBalance(t, networkFeeAccount) + l1Charge := arbmath.BigMulByUint(builder.L2Info.GasPrice, receipt.GasUsedForL1) // the network should receive // 1. compute costs @@ -93,7 +92,7 @@ func TestSequencerFeePaid(t *testing.T) { l1GasBought := arbmath.BigDiv(l1Charge, l1Estimate).Uint64() l1ChargeExpected := arbmath.BigMulByUint(l1Estimate, txSize*params.TxDataNonZeroGasEIP2028) // L1 gas can only be charged in terms of L2 gas, so subtract off any rounding error from the expected value - l1ChargeExpected.Sub(l1ChargeExpected, new(big.Int).Mod(l1ChargeExpected, l2info.GasPrice)) + l1ChargeExpected.Sub(l1ChargeExpected, new(big.Int).Mod(l1ChargeExpected, builder.L2Info.GasPrice)) colors.PrintBlue("bytes ", l1GasBought/params.TxDataNonZeroGasEIP2028, txSize) @@ -130,42 +129,39 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - chainConfig := params.ArbitrumDevTestChainConfig() - conf := arbnode.ConfigDefaultL1Test() - conf.DelayedSequencer.FinalizeDistance = 1 - - l2info, node, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, chainConfig, nil) - defer requireClose(t, l1stack) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.DelayedSequencer.FinalizeDistance = 1 + cleanup := builder.Build(t) + defer cleanup() - ownerAuth := l2info.GetDefaultTransactOpts("Owner", ctx) + ownerAuth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) // make ownerAuth a chain owner - arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), l2client) + arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err) tx, err := arbdebug.BecomeChainOwner(&ownerAuth) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) // use ownerAuth to set the L1 price per unit Require(t, err) - arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), l2client) + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) Require(t, err) tx, err = arbOwner.SetL1PricePerUnit(&ownerAuth, arbmath.UintToBig(initialEstimate)) Require(t, err) - _, err = WaitForTx(ctx, l2client, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, builder.L2.Client, tx.Hash(), time.Second*5) Require(t, err) - arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), l2client) + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), builder.L2.Client) Require(t, err) lastEstimate, err := arbGasInfo.GetL1BaseFeeEstimate(&bind.CallOpts{Context: ctx}) Require(t, err) - lastBatchCount, err := node.InboxTracker.GetBatchCount() + lastBatchCount, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount() Require(t, err) - l1Header, err := l1client.HeaderByNumber(ctx, nil) + l1Header, err := builder.L1.Client.HeaderByNumber(ctx, nil) Require(t, err) - rewardRecipientBalanceBefore := GetBalance(t, ctx, l2client, l1pricing.BatchPosterAddress) + rewardRecipientBalanceBefore := builder.L2.GetBalance(t, l1pricing.BatchPosterAddress) timesPriceAdjusted := 0 colors.PrintBlue("Initial values") @@ -174,17 +170,17 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { numRetrogradeMoves := 0 for i := 0; i < 256; i++ { - tx, receipt := TransferBalance(t, "Owner", "Owner", common.Big1, l2info, l2client, ctx) - header, err := l2client.HeaderByHash(ctx, receipt.BlockHash) + tx, receipt := builder.L2.TransferBalance(t, "Owner", "Owner", common.Big1, builder.L2Info) + header, err := builder.L2.Client.HeaderByHash(ctx, receipt.BlockHash) Require(t, err) - TransferBalance(t, "Faucet", "Faucet", common.Big1, l1info, l1client, ctx) // generate l1 traffic + builder.L1.TransferBalance(t, "Faucet", "Faucet", common.Big1, builder.L1Info) // generate l1 traffic units := compressedTxSize(t, tx) * params.TxDataNonZeroGasEIP2028 estimatedL1FeePerUnit := arbmath.BigDivByUint(arbmath.BigMulByUint(header.BaseFee, receipt.GasUsedForL1), units) if !arbmath.BigEquals(lastEstimate, estimatedL1FeePerUnit) { - l1Header, err = l1client.HeaderByNumber(ctx, nil) + l1Header, err = builder.L1.Client.HeaderByNumber(ctx, nil) Require(t, err) callOpts := &bind.CallOpts{Context: ctx, BlockNumber: receipt.BlockNumber} @@ -233,7 +229,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { // see that the inbox advances for j := 16; j > 0; j-- { - newBatchCount, err := node.InboxTracker.GetBatchCount() + newBatchCount, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount() Require(t, err) if newBatchCount > lastBatchCount { colors.PrintGrey("posted new batch ", newBatchCount) @@ -248,7 +244,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { } } - rewardRecipientBalanceAfter := GetBalance(t, ctx, l2client, chainConfig.ArbitrumChainParams.InitialChainOwner) + rewardRecipientBalanceAfter := builder.L2.GetBalance(t, builder.chainConfig.ArbitrumChainParams.InitialChainOwner) colors.PrintMint("reward recipient balance ", rewardRecipientBalanceBefore, " ➤ ", rewardRecipientBalanceAfter) colors.PrintMint("price changes ", timesPriceAdjusted) @@ -259,7 +255,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { Fatal(t, "reward recipient didn't get paid") } - arbAggregator, err := precompilesgen.NewArbAggregator(common.HexToAddress("0x6d"), l2client) + arbAggregator, err := precompilesgen.NewArbAggregator(common.HexToAddress("0x6d"), builder.L2.Client) Require(t, err) batchPosterAddresses, err := arbAggregator.GetBatchPosters(&bind.CallOpts{Context: ctx}) Require(t, err) @@ -267,7 +263,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { for _, bpAddr := range batchPosterAddresses { if bpAddr != l1pricing.BatchPosterAddress && bpAddr != l1pricing.L1PricerFundsPoolAddress { numReimbursed++ - bal, err := l1client.BalanceAt(ctx, bpAddr, nil) + bal, err := builder.L1.Client.BalanceAt(ctx, bpAddr, nil) Require(t, err) if bal.Sign() == 0 { Fatal(t, "Batch poster balance is zero for", bpAddr) @@ -302,7 +298,7 @@ func TestSequencerPriceAdjustsFrom25Gwei(t *testing.T) { func compressedTxSize(t *testing.T, tx *types.Transaction) uint64 { txBin, err := tx.MarshalBinary() Require(t, err) - compressed, err := arbcompress.CompressFast(txBin) + compressed, err := arbcompress.CompressLevel(txBin, 0) Require(t, err) return uint64(len(compressed)) } diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 22421407d8..d29e82c12c 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "math/big" + "os" "path/filepath" "strings" "sync" @@ -14,13 +15,9 @@ import ( "time" "github.com/alicebob/miniredis/v2" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/node" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/execution/gethexec" - "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/redisutil" ) @@ -31,17 +28,15 @@ const nodesCount = 5 // number of testnodes to create in tests func TestStaticForwarder(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ipcPath := filepath.Join(t.TempDir(), "test.ipc") - ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = ipcPath - stackConfig := stackConfigForTest(t) - ipcConfig.Apply(stackConfig) - nodeConfigA := arbnode.ConfigDefaultL1Test() - nodeConfigA.BatchPoster.Enable = false - - l2info, nodeA, clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfigA, nil, nil, stackConfig) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + ipcPath := tmpPath(t, "test.ipc") + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.Enable = false + builder.l2StackConfig.IPCPath = ipcPath + cleanupA := builder.Build(t) + defer cleanupA() + + clientA := builder.L2.Client nodeConfigB := arbnode.ConfigDefaultL1Test() execConfigB := gethexec.ConfigDefaultTest() @@ -49,21 +44,25 @@ func TestStaticForwarder(t *testing.T) { nodeConfigB.Sequencer = false nodeConfigB.DelayedSequencer.Enable = false execConfigB.Forwarder.RedisUrl = "" - execConfigB.ForwardingTargetImpl = ipcPath + execConfigB.ForwardingTarget = ipcPath nodeConfigB.BatchPoster.Enable = false - clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nodeConfigB, execConfigB, nil) - defer nodeB.StopAndWait() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{ + nodeConfig: nodeConfigB, + execConfig: execConfigB, + }) + defer cleanupB() + clientB := testClientB.Client - l2info.GenerateAccount("User2") - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, transferAmount, nil) + builder.L2Info.GenerateAccount("User2") + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, transferAmount, nil) err := clientB.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - l2balance, err := clientA.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := clientA.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(transferAmount) != 0 { @@ -94,74 +93,57 @@ type fallbackSequencerOpts struct { enableSecCoordinator bool } -func fallbackSequencer( - ctx context.Context, t *testing.T, opts *fallbackSequencerOpts, -) (l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, - l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { - stackConfig := stackConfigForTest(t) - ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = opts.ipcPath - ipcConfig.Apply(stackConfig) - nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.SeqCoordinator.Enable = opts.enableSecCoordinator - nodeConfig.SeqCoordinator.RedisUrl = opts.redisUrl - nodeConfig.SeqCoordinator.MyUrlImpl = opts.ipcPath - return createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, nil, stackConfig) +func fallbackSequencer(ctx context.Context, t *testing.T, opts *fallbackSequencerOpts) *NodeBuilder { + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.l2StackConfig.IPCPath = opts.ipcPath + builder.nodeConfig.SeqCoordinator.Enable = opts.enableSecCoordinator + builder.nodeConfig.SeqCoordinator.RedisUrl = opts.redisUrl + builder.nodeConfig.SeqCoordinator.MyUrl = opts.ipcPath + return builder } -func createForwardingNode( - ctx context.Context, t *testing.T, - first *arbnode.Node, - l1stack *node.Node, - l1info *BlockchainTestInfo, - l2InitData *statetransfer.ArbosInitializationInfo, - ipcPath string, - redisUrl string, - fallbackPath string, -) (*ethclient.Client, *arbnode.Node) { - stackConfig := stackConfigForTest(t) +func createForwardingNode(t *testing.T, builder *NodeBuilder, ipcPath string, redisUrl string, fallbackPath string) (*TestClient, func()) { if ipcPath != "" { - ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = ipcPath - ipcConfig.Apply(stackConfig) + builder.l2StackConfig.IPCPath = ipcPath } nodeConfig := arbnode.ConfigDefaultL1Test() nodeConfig.Sequencer = false nodeConfig.DelayedSequencer.Enable = false + nodeConfig.BatchPoster.Enable = false execConfig := gethexec.ConfigDefaultTest() execConfig.Sequencer.Enable = false execConfig.Forwarder.RedisUrl = redisUrl - execConfig.ForwardingTargetImpl = fallbackPath + execConfig.ForwardingTarget = fallbackPath // nodeConfig.Feed.Output.Enable = false - return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, execConfig, stackConfig) + return builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfig, execConfig: execConfig}) } -func createSequencer( - ctx context.Context, t *testing.T, - first *arbnode.Node, - l1stack *node.Node, - l1info *BlockchainTestInfo, - l2InitData *statetransfer.ArbosInitializationInfo, - ipcPath string, - redisUrl string, -) (*ethclient.Client, *arbnode.Node) { - stackConfig := stackConfigForTest(t) - ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = ipcPath - ipcConfig.Apply(stackConfig) +func createSequencer(t *testing.T, builder *NodeBuilder, ipcPath string, redisUrl string) (*TestClient, func()) { + builder.l2StackConfig.IPCPath = ipcPath nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.BatchPoster.Enable = true + nodeConfig.BatchPoster.Enable = false nodeConfig.SeqCoordinator.Enable = true nodeConfig.SeqCoordinator.RedisUrl = redisUrl - nodeConfig.SeqCoordinator.MyUrlImpl = ipcPath + nodeConfig.SeqCoordinator.MyUrl = ipcPath - return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, gethexec.ConfigDefaultTest(), stackConfig) + return builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfig}) } // tmpPath returns file path with specified filename from temporary directory of the test. func tmpPath(t *testing.T, filename string) string { - return filepath.Join(t.TempDir(), filename) + t.Helper() + // create a unique, maximum 10 characters-long temporary directory {name} with path as $TMPDIR/{name} + tmpDir, err := os.MkdirTemp("", "") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + t.Cleanup(func() { + if err = os.RemoveAll(tmpDir); err != nil { + t.Errorf("Failed to cleanup temp dir: %v", err) + } + }) + return filepath.Join(tmpDir, filename) } // testNodes creates specified number of paths for ipc from temporary directory of the test. @@ -239,34 +221,36 @@ func TestRedisForwarder(t *testing.T) { redisServer, redisUrl := initRedis(ctx, t, append(nodePaths, fbNodePath)) defer redisServer.Close() - l2info, fallbackNode, fallbackClient, l1info, _, _, l1stack := fallbackSequencer(ctx, t, + builder := fallbackSequencer(ctx, t, &fallbackSequencerOpts{ ipcPath: fbNodePath, redisUrl: redisUrl, enableSecCoordinator: true, }) - defer requireClose(t, l1stack) - defer fallbackNode.StopAndWait() + cleanup := builder.Build(t) + defer cleanup() + fallbackNode, fallbackClient := builder.L2.ConsensusNode, builder.L2.Client - forwardingClient, forwardingNode := createForwardingNode(ctx, t, fallbackNode, l1stack, l1info, &l2info.ArbInitData, "", redisUrl, fbNodePath) - defer forwardingNode.StopAndWait() + TestClientForwarding, cleanupForwarding := createForwardingNode(t, builder, "", redisUrl, fbNodePath) + defer cleanupForwarding() + forwardingClient := TestClientForwarding.Client var seqNodes []*arbnode.Node var seqClients []*ethclient.Client for _, path := range nodePaths { - client, node := createSequencer(ctx, t, fallbackNode, l1stack, l1info, &l2info.ArbInitData, path, redisUrl) - seqNodes = append(seqNodes, node) - seqClients = append(seqClients, client) + testClientSeq, _ := createSequencer(t, builder, path, redisUrl) + seqNodes = append(seqNodes, testClientSeq.ConsensusNode) + seqClients = append(seqClients, testClientSeq.Client) } defer stopNodes(seqNodes) for i := range seqClients { userA := user("A", i) - l2info.GenerateAccount(userA) - tx := l2info.PrepareTx("Owner", userA, l2info.TransferGas, big.NewInt(1e12+int64(l2info.TransferGas)*l2info.GasPrice.Int64()), nil) + builder.L2Info.GenerateAccount(userA) + tx := builder.L2Info.PrepareTx("Owner", userA, builder.L2Info.TransferGas, big.NewInt(1e12+int64(builder.L2Info.TransferGas)*builder.L2Info.GasPrice.Int64()), nil) err := fallbackClient.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, fallbackClient, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } @@ -276,8 +260,8 @@ func TestRedisForwarder(t *testing.T) { } userA := user("A", i) userB := user("B", i) - l2info.GenerateAccount(userB) - tx := l2info.PrepareTx(userA, userB, l2info.TransferGas, transferAmount, nil) + builder.L2Info.GenerateAccount(userB) + tx := builder.L2Info.PrepareTx(userA, userB, builder.L2Info.TransferGas, transferAmount, nil) sendFunc := func() error { return forwardingClient.SendTransaction(ctx, tx) } if err := tryWithTimeout(ctx, sendFunc, gethexec.DefaultTestForwarderConfig.UpdateInterval*10); err != nil { @@ -286,7 +270,7 @@ func TestRedisForwarder(t *testing.T) { _, err := EnsureTxSucceeded(ctx, seqClients[i], tx) Require(t, err) - l2balance, err := seqClients[i].BalanceAt(ctx, l2info.GetAddress(userB), nil) + l2balance, err := seqClients[i].BalanceAt(ctx, builder.L2Info.GetAddress(userB), nil) Require(t, err) if l2balance.Cmp(transferAmount) != 0 { @@ -307,29 +291,31 @@ func TestRedisForwarderFallbackNoRedis(t *testing.T) { redisServer, redisUrl := initRedis(ctx, t, nodePaths) redisServer.Close() - l2info, fallbackNode, fallbackClient, l1info, _, _, l1stack := fallbackSequencer(ctx, t, + builder := fallbackSequencer(ctx, t, &fallbackSequencerOpts{ ipcPath: fallbackIpcPath, redisUrl: redisUrl, enableSecCoordinator: false, }) - defer requireClose(t, l1stack) - defer fallbackNode.StopAndWait() + cleanup := builder.Build(t) + defer cleanup() + fallbackClient := builder.L2.Client - forwardingClient, forwardingNode := createForwardingNode(ctx, t, fallbackNode, l1stack, l1info, &l2info.ArbInitData, "", redisUrl, fallbackIpcPath) - defer forwardingNode.StopAndWait() + TestClientForwarding, cleanupForwarding := createForwardingNode(t, builder, "", redisUrl, fallbackIpcPath) + defer cleanupForwarding() + forwardingClient := TestClientForwarding.Client user := "User2" - l2info.GenerateAccount(user) - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, transferAmount, nil) + builder.L2Info.GenerateAccount(user) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, transferAmount, nil) sendFunc := func() error { return forwardingClient.SendTransaction(ctx, tx) } err := tryWithTimeout(ctx, sendFunc, gethexec.DefaultTestForwarderConfig.UpdateInterval*10) Require(t, err) - _, err = EnsureTxSucceeded(ctx, fallbackClient, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - l2balance, err := fallbackClient.BalanceAt(ctx, l2info.GetAddress(user), nil) + l2balance, err := fallbackClient.BalanceAt(ctx, builder.L2Info.GetAddress(user), nil) Require(t, err) if l2balance.Cmp(transferAmount) != 0 { diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index d0a0092143..de965f958b 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -208,6 +208,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha bridgeAddr, l1Info.GetAddress("sequencer"), timeBounds, + big.NewInt(117964), ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) @@ -228,7 +229,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha } func createL2Nodes(t *testing.T, ctx context.Context, conf *arbnode.Config, chainConfig *params.ChainConfig, l1Client arbutil.L1Interface, l2info *BlockchainTestInfo, rollupAddresses *chaininfo.RollupAddresses, initMsg *arbostypes.ParsedInitMessage, txOpts *bind.TransactOpts, signer signature.DataSignerFunc, fatalErrChan chan error) (*arbnode.Node, *gethexec.ExecutionNode) { - _, stack, l2ChainDb, l2ArbDb, l2Blockchain := createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMsg, nil) + _, stack, l2ChainDb, l2ArbDb, l2Blockchain := createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMsg, nil, nil) execNode, err := gethexec.CreateExecutionNode(ctx, stack, l2ChainDb, l2Blockchain, l1Client, gethexec.ConfigDefaultTest) Require(t, err) diff --git a/system_tests/infra_fee_test.go b/system_tests/infra_fee_test.go index a56e054563..9366fc204e 100644 --- a/system_tests/infra_fee_test.go +++ b/system_tests/infra_fee_test.go @@ -23,45 +23,46 @@ func TestInfraFee(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, nil, true) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - ownerTxOpts := l2info.GetDefaultTransactOpts("Owner", ctx) + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) ownerTxOpts.Context = ctx - ownerCallOpts := l2info.GetDefaultCallOpts("Owner", ctx) + ownerCallOpts := builder.L2Info.GetDefaultCallOpts("Owner", ctx) - arbowner, err := precompilesgen.NewArbOwner(common.HexToAddress("70"), client) + arbowner, err := precompilesgen.NewArbOwner(common.HexToAddress("70"), builder.L2.Client) Require(t, err) - arbownerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("6b"), client) + arbownerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("6b"), builder.L2.Client) Require(t, err) networkFeeAddr, err := arbownerPublic.GetNetworkFeeAccount(ownerCallOpts) Require(t, err) infraFeeAddr := common.BytesToAddress(crypto.Keccak256([]byte{3, 2, 6})) tx, err := arbowner.SetInfraFeeAccount(&ownerTxOpts, infraFeeAddr) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - _, simple := deploySimple(t, ctx, ownerTxOpts, client) + _, simple := builder.L2.DeploySimple(t, ownerTxOpts) - netFeeBalanceBefore, err := client.BalanceAt(ctx, networkFeeAddr, nil) + netFeeBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) - infraFeeBalanceBefore, err := client.BalanceAt(ctx, infraFeeAddr, nil) + infraFeeBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) Require(t, err) tx, err = simple.Increment(&ownerTxOpts) Require(t, err) - receipt, err := EnsureTxSucceeded(ctx, client, tx) + receipt, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) l2GasUsed := receipt.GasUsed - receipt.GasUsedForL1 expectedFunds := arbmath.BigMulByUint(arbmath.UintToBig(l2pricing.InitialBaseFeeWei), l2GasUsed) expectedBalanceAfter := arbmath.BigAdd(infraFeeBalanceBefore, expectedFunds) - netFeeBalanceAfter, err := client.BalanceAt(ctx, networkFeeAddr, nil) + netFeeBalanceAfter, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) - infraFeeBalanceAfter, err := client.BalanceAt(ctx, infraFeeAddr, nil) + infraFeeBalanceAfter, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) Require(t, err) if !arbmath.BigEquals(netFeeBalanceBefore, netFeeBalanceAfter) { diff --git a/system_tests/initialization_test.go b/system_tests/initialization_test.go index 0e055adc5f..6707df1c64 100644 --- a/system_tests/initialization_test.go +++ b/system_tests/initialization_test.go @@ -62,14 +62,16 @@ func TestInitContract(t *testing.T) { l2info.ArbInitData.Accounts = append(l2info.ArbInitData.Accounts, accountInfo) expectedSums[accountAddress] = sum } - _, node, client := CreateTestL2WithConfig(t, ctx, l2info, nil, nil, true) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.L2Info = l2info + cleanup := builder.Build(t) + defer cleanup() for accountAddress, sum := range expectedSums { msg := ethereum.CallMsg{ To: &accountAddress, } - res, err := client.CallContract(ctx, msg, big.NewInt(0)) + res, err := builder.L2.Client.CallContract(ctx, msg, big.NewInt(0)) Require(t, err) resBig := new(big.Int).SetBytes(res) if resBig.Cmp(sum) != 0 { diff --git a/system_tests/ipc_test.go b/system_tests/ipc_test.go index e25b4a21ea..511a608e67 100644 --- a/system_tests/ipc_test.go +++ b/system_tests/ipc_test.go @@ -9,24 +9,18 @@ import ( "testing" "github.com/ethereum/go-ethereum/ethclient" - "github.com/offchainlabs/nitro/cmd/genericconf" ) func TestIpcRpc(t *testing.T) { ipcPath := filepath.Join(t.TempDir(), "test.ipc") - ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = ipcPath - - stackConf := stackConfigForTest(t) - ipcConfig.Apply(stackConf) - ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, l2node, _, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, nil, nil, stackConf) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.l2StackConfig.IPCPath = ipcPath + cleanup := builder.Build(t) + defer cleanup() _, err := ethclient.Dial(ipcPath) Require(t, err) diff --git a/system_tests/log_subscription_test.go b/system_tests/log_subscription_test.go index 5ee1732fb0..e4402533a6 100644 --- a/system_tests/log_subscription_test.go +++ b/system_tests/log_subscription_test.go @@ -19,21 +19,22 @@ func TestLogSubscription(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - auth := l2info.GetDefaultTransactOpts("Owner", ctx) - arbSys, err := precompilesgen.NewArbSys(types.ArbSysAddress, client) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + arbSys, err := precompilesgen.NewArbSys(types.ArbSysAddress, builder.L2.Client) Require(t, err) logChan := make(chan types.Log, 128) - subscription, err := client.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, logChan) + subscription, err := builder.L2.Client.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, logChan) Require(t, err) defer subscription.Unsubscribe() tx, err := arbSys.WithdrawEth(&auth, common.Address{}) Require(t, err) - receipt, err := EnsureTxSucceeded(ctx, client, tx) + receipt, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) if len(receipt.Logs) != 1 { @@ -52,6 +53,6 @@ func TestLogSubscription(t *testing.T) { if !reflect.DeepEqual(receiptLog, subscriptionLog) { Fatal(t, "Receipt log", receiptLog, "is different than subscription log", subscriptionLog) } - _, err = client.BlockByHash(ctx, subscriptionLog.BlockHash) + _, err = builder.L2.Client.BlockByHash(ctx, subscriptionLog.BlockHash) Require(t, err) } diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index 48b28b29e4..e095adf6c1 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -10,7 +10,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/solgen/go/bridgegen" ) @@ -18,27 +17,26 @@ func TestMeaninglessBatchReorg(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := arbnode.ConfigDefaultL1Test() - conf.BatchPoster.Enable = false - l2Info, arbNode, l2Client, l1Info, l1Backend, l1Client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil, nil) - defer requireClose(t, l1stack) - defer arbNode.StopAndWait() - seqInbox, err := bridgegen.NewSequencerInbox(l1Info.GetAddress("SequencerInbox"), l1Client) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.Enable = false + cleanup := builder.Build(t) + defer cleanup() + + seqInbox, err := bridgegen.NewSequencerInbox(builder.L1Info.GetAddress("SequencerInbox"), builder.L1.Client) Require(t, err) - seqOpts := l1Info.GetDefaultTransactOpts("Sequencer", ctx) + seqOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) tx, err := seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}) Require(t, err) - batchReceipt, err := EnsureTxSucceeded(ctx, l1Client, tx) + batchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) - execNode := getExecNode(t, arbNode) for i := 0; ; i++ { if i >= 500 { Fatal(t, "Failed to read batch from L1") } - msgNum, err := execNode.ExecEngine.HeadMessageNumber().Await(ctx) + msgNum, err := builder.L2.ExecNode.ExecEngine.HeadMessageNumber().Await(ctx) Require(t, err) if msgNum == 1 { break @@ -47,33 +45,33 @@ func TestMeaninglessBatchReorg(t *testing.T) { } time.Sleep(10 * time.Millisecond) } - metadata, err := arbNode.InboxTracker.GetBatchMetadata(1) + metadata, err := builder.L2.ConsensusNode.InboxTracker.GetBatchMetadata(1) Require(t, err) originalBatchBlock := batchReceipt.BlockNumber.Uint64() if metadata.ParentChainBlock != originalBatchBlock { Fatal(t, "Posted batch in block", originalBatchBlock, "but metadata says L1 block was", metadata.ParentChainBlock) } - _, l2Receipt := TransferBalance(t, "Owner", "Owner", common.Big1, l2Info, l2Client, ctx) + _, l2Receipt := builder.L2.TransferBalance(t, "Owner", "Owner", common.Big1, builder.L2Info) // Make the reorg larger to force the miner to discard transactions. // The miner usually collects transactions from deleted blocks and puts them in the mempool. // However, this code doesn't run on reorgs larger than 64 blocks for performance reasons. // Therefore, we make a bunch of small blocks to prevent the code from running. for j := uint64(0); j < 70; j++ { - TransferBalance(t, "Faucet", "Faucet", common.Big1, l1Info, l1Client, ctx) + builder.L1.TransferBalance(t, "Faucet", "Faucet", common.Big1, builder.L1Info) } - parentBlock := l1Backend.BlockChain().GetBlockByNumber(batchReceipt.BlockNumber.Uint64() - 1) - err = l1Backend.BlockChain().ReorgToOldBlock(parentBlock) + parentBlock := builder.L1.L1Backend.BlockChain().GetBlockByNumber(batchReceipt.BlockNumber.Uint64() - 1) + err = builder.L1.L1Backend.BlockChain().ReorgToOldBlock(parentBlock) Require(t, err) // Produce a new l1Block so that the batch ends up in a different l1Block than before - TransferBalance(t, "User", "User", common.Big1, l1Info, l1Client, ctx) + builder.L1.TransferBalance(t, "User", "User", common.Big1, builder.L1Info) tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}) Require(t, err) - newBatchReceipt, err := EnsureTxSucceeded(ctx, l1Client, tx) + newBatchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) newBatchBlock := newBatchReceipt.BlockNumber.Uint64() @@ -87,7 +85,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { if i >= 500 { Fatal(t, "Failed to read batch reorg from L1") } - metadata, err = arbNode.InboxTracker.GetBatchMetadata(1) + metadata, err = builder.L2.ConsensusNode.InboxTracker.GetBatchMetadata(1) Require(t, err) if metadata.ParentChainBlock == newBatchBlock { break @@ -97,10 +95,10 @@ func TestMeaninglessBatchReorg(t *testing.T) { time.Sleep(10 * time.Millisecond) } - _, err = arbNode.InboxReader.GetSequencerMessageBytes(1).Await(ctx) + _, err = builder.L2.ConsensusNode.InboxReader.GetSequencerMessageBytes(1).Await(ctx) Require(t, err) - l2Header, err := l2Client.HeaderByNumber(ctx, l2Receipt.BlockNumber) + l2Header, err := builder.L2.Client.HeaderByNumber(ctx, l2Receipt.BlockNumber) Require(t, err) if l2Header.Hash() != l2Receipt.BlockHash { diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index cd9c9f9cec..edefe630ef 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -16,213 +16,10 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbos/arbostypes" - "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" - "github.com/offchainlabs/nitro/solgen/go/precompilesgen" - "github.com/offchainlabs/nitro/util/arbmath" - "github.com/offchainlabs/nitro/util/colors" - "github.com/offchainlabs/nitro/util/testhelpers" ) -func TestDeploy(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() - - auth := l2info.GetDefaultTransactOpts("Owner", ctx) - auth.GasMargin = 0 // don't adjust, we want to see if the estimate alone is sufficient - - _, simple := deploySimple(t, ctx, auth, client) - - tx, err := simple.Increment(&auth) - Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, client, tx) - Require(t, err) - - counter, err := simple.Counter(&bind.CallOpts{}) - Require(t, err, "failed to get counter") - - if counter != 1 { - Fatal(t, "Unexpected counter value", counter) - } -} - -func TestEstimate(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() - - auth := l2info.GetDefaultTransactOpts("Owner", ctx) - auth.GasMargin = 0 // don't adjust, we want to see if the estimate alone is sufficient - - gasPrice := big.NewInt(params.GWei / 10) - - // set the gas price - arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), client) - Require(t, err, "could not deploy ArbOwner contract") - tx, err := arbOwner.SetMinimumL2BaseFee(&auth, gasPrice) - Require(t, err, "could not set L2 gas price") - _, err = EnsureTxSucceeded(ctx, client, tx) - Require(t, err) - - // connect to arbGasInfo precompile - arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), client) - Require(t, err, "could not deploy contract") - - // wait for price to come to equilibrium - equilibrated := false - numTriesLeft := 20 - for !equilibrated && numTriesLeft > 0 { - // make an empty block to let the gas price update - l2info.GasPrice = new(big.Int).Mul(l2info.GasPrice, big.NewInt(2)) - TransferBalance(t, "Owner", "Owner", common.Big0, l2info, client, ctx) - - // check if the price has equilibrated - _, _, _, _, _, setPrice, err := arbGasInfo.GetPricesInWei(&bind.CallOpts{}) - Require(t, err, "could not get L2 gas price") - if gasPrice.Cmp(setPrice) == 0 { - equilibrated = true - } - numTriesLeft-- - } - if !equilibrated { - Fatal(t, "L2 gas price did not converge", gasPrice) - } - - initialBalance, err := client.BalanceAt(ctx, auth.From, nil) - Require(t, err, "could not get balance") - - // deploy a test contract - _, tx, simple, err := mocksgen.DeploySimple(&auth, client) - Require(t, err, "could not deploy contract") - receipt, err := EnsureTxSucceeded(ctx, client, tx) - Require(t, err) - - header, err := client.HeaderByNumber(ctx, receipt.BlockNumber) - Require(t, err, "could not get header") - if header.BaseFee.Cmp(gasPrice) != 0 { - Fatal(t, "Header has wrong basefee", header.BaseFee, gasPrice) - } - - balance, err := client.BalanceAt(ctx, auth.From, nil) - Require(t, err, "could not get balance") - expectedCost := receipt.GasUsed * gasPrice.Uint64() - observedCost := initialBalance.Uint64() - balance.Uint64() - if expectedCost != observedCost { - Fatal(t, "Expected deployment to cost", expectedCost, "instead of", observedCost) - } - - tx, err = simple.Increment(&auth) - Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, client, tx) - Require(t, err) - - counter, err := simple.Counter(&bind.CallOpts{}) - Require(t, err, "failed to get counter") - - if counter != 1 { - Fatal(t, "Unexpected counter value", counter) - } -} - -func TestComponentEstimate(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() - - l1BaseFee := new(big.Int).Set(arbostypes.DefaultInitialL1BaseFee) - l2BaseFee := GetBaseFee(t, client, ctx) - - colors.PrintGrey("l1 basefee ", l1BaseFee) - colors.PrintGrey("l2 basefee ", l2BaseFee) - - userBalance := big.NewInt(1e16) - maxPriorityFeePerGas := big.NewInt(0) - maxFeePerGas := arbmath.BigMulByUfrac(l2BaseFee, 3, 2) - - l2info.GenerateAccount("User") - TransferBalance(t, "Owner", "User", userBalance, l2info, client, ctx) - - from := l2info.GetAddress("User") - to := testhelpers.RandomAddress() - gas := uint64(100000000) - calldata := []byte{0x00, 0x12} - value := big.NewInt(4096) - - nodeAbi, err := node_interfacegen.NodeInterfaceMetaData.GetAbi() - Require(t, err) - - nodeMethod := nodeAbi.Methods["gasEstimateComponents"] - estimateCalldata := append([]byte{}, nodeMethod.ID...) - packed, err := nodeMethod.Inputs.Pack(to, false, calldata) - Require(t, err) - estimateCalldata = append(estimateCalldata, packed...) - - msg := ethereum.CallMsg{ - From: from, - To: &types.NodeInterfaceAddress, - Gas: gas, - GasFeeCap: maxFeePerGas, - GasTipCap: maxPriorityFeePerGas, - Value: value, - Data: estimateCalldata, - } - returnData, err := client.CallContract(ctx, msg, nil) - Require(t, err) - - outputs, err := nodeMethod.Outputs.Unpack(returnData) - Require(t, err) - if len(outputs) != 4 { - Fatal(t, "expected 4 outputs from gasEstimateComponents, got", len(outputs)) - } - - gasEstimate, _ := outputs[0].(uint64) - gasEstimateForL1, _ := outputs[1].(uint64) - baseFee, _ := outputs[2].(*big.Int) - l1BaseFeeEstimate, _ := outputs[3].(*big.Int) - - execNode := getExecNode(t, node) - tx := l2info.SignTxAs("User", &types.DynamicFeeTx{ - ChainID: execNode.ArbInterface.BlockChain().Config().ChainID, - Nonce: 0, - GasTipCap: maxPriorityFeePerGas, - GasFeeCap: maxFeePerGas, - Gas: gasEstimate, - To: &to, - Value: value, - Data: calldata, - }) - - l2Estimate := gasEstimate - gasEstimateForL1 - - colors.PrintBlue("Est. ", gasEstimate, " - ", gasEstimateForL1, " = ", l2Estimate) - - if !arbmath.BigEquals(l1BaseFeeEstimate, l1BaseFee) { - Fatal(t, l1BaseFeeEstimate, l1BaseFee) - } - if !arbmath.BigEquals(baseFee, l2BaseFee) { - Fatal(t, baseFee, l2BaseFee.Uint64()) - } - - Require(t, client.SendTransaction(ctx, tx)) - receipt, err := EnsureTxSucceeded(ctx, client, tx) - Require(t, err) - - l2Used := receipt.GasUsed - receipt.GasUsedForL1 - colors.PrintMint("True ", receipt.GasUsed, " - ", receipt.GasUsedForL1, " = ", l2Used) - - if l2Estimate != l2Used { - Fatal(t, l2Estimate, l2Used) - } -} - func callFindBatchContainig(t *testing.T, ctx context.Context, client *ethclient.Client, nodeAbi *abi.ABI, blockNum uint64) uint64 { findBatch := nodeAbi.Methods["findBatchContainingBlock"] callData := append([]byte{}, findBatch.ID...) @@ -338,3 +135,63 @@ func TestFindBatch(t *testing.T) { } } } + +func TestL2BlockRangeForL1(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + user := builder.L1Info.GetDefaultTransactOpts("User", ctx) + + numTransactions := 200 + for i := 0; i < numTransactions; i++ { + builder.L2.TransferBalanceTo(t, "Owner", util.RemapL1Address(user.From), big.NewInt(1e18), builder.L2Info) + } + + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) + if err != nil { + t.Fatalf("Error creating node interface: %v", err) + } + + l1BlockNums := map[uint64]*[2]uint64{} + latestL2, err := builder.L2.Client.BlockNumber(ctx) + if err != nil { + t.Fatalf("Error querying most recent l2 block: %v", err) + } + for l2BlockNum := uint64(0); l2BlockNum <= latestL2; l2BlockNum++ { + l1BlockNum, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, l2BlockNum) + if err != nil { + t.Fatalf("Error quering l1 block number for l2 block: %d, error: %v", l2BlockNum, err) + } + if _, ok := l1BlockNums[l1BlockNum]; !ok { + l1BlockNums[l1BlockNum] = &[2]uint64{l2BlockNum, l2BlockNum} + } + l1BlockNums[l1BlockNum][1] = l2BlockNum + } + + // Test success. + for l1BlockNum := range l1BlockNums { + rng, err := nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, l1BlockNum) + if err != nil { + t.Fatalf("Error getting l2 block range for l1 block: %d, error: %v", l1BlockNum, err) + } + expected := l1BlockNums[l1BlockNum] + if rng.FirstBlock != expected[0] || rng.LastBlock != expected[1] { + unexpectedL1BlockNum, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, rng.LastBlock) + if err != nil { + t.Fatalf("Error quering l1 block number for l2 block: %d, error: %v", rng.LastBlock, err) + } + // Handle the edge case when new l2 blocks are produced between latestL2 was last calculated and now. + if unexpectedL1BlockNum != l1BlockNum || rng.LastBlock < expected[1] || rng.FirstBlock != expected[0] { + t.Errorf("L2BlockRangeForL1(%d) = (%d %d) want (%d %d)", l1BlockNum, rng.FirstBlock, rng.LastBlock, expected[0], expected[1]) + } + } + } + // Test invalid case. + if _, err := nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, 1e5); err == nil { + t.Fatalf("GetL2BlockRangeForL1 didn't fail for an invalid input") + } +} diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go index 6b43cc83b0..d0ca0ccda3 100644 --- a/system_tests/outbox_test.go +++ b/system_tests/outbox_test.go @@ -35,14 +35,15 @@ func TestOutboxProofs(t *testing.T) { withdrawTopic := arbSysAbi.Events["L2ToL1Tx"].ID merkleTopic := arbSysAbi.Events["SendMerkleUpdate"].ID - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - auth := l2info.GetDefaultTransactOpts("Owner", ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) - arbSys, err := precompilesgen.NewArbSys(types.ArbSysAddress, client) + arbSys, err := precompilesgen.NewArbSys(types.ArbSysAddress, builder.L2.Client) Require(t, err) - nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, client) + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) Require(t, err) txnCount := int64(1 + rand.Intn(16)) @@ -71,7 +72,7 @@ func TestOutboxProofs(t *testing.T) { txns = append(txns, tx.Hash()) time.Sleep(4 * time.Millisecond) // Geth takes a few ms for the receipt to show up - _, err = client.TransactionReceipt(ctx, tx.Hash()) + _, err = builder.L2.Client.TransactionReceipt(ctx, tx.Hash()) if err == nil { merkleState, err := arbSys.SendMerkleTreeState(&bind.CallOpts{}) Require(t, err, "could not get merkle root") @@ -86,7 +87,7 @@ func TestOutboxProofs(t *testing.T) { for _, tx := range txns { var receipt *types.Receipt - receipt, err = client.TransactionReceipt(ctx, tx) + receipt, err = builder.L2.Client.TransactionReceipt(ctx, tx) Require(t, err, "No receipt for txn") if receipt.Status != types.ReceiptStatusSuccessful { @@ -187,7 +188,7 @@ func TestOutboxProofs(t *testing.T) { // in one lookup, query geth for all the data we need to construct a proof var logs []types.Log if len(query) > 0 { - logs, err = client.FilterLogs(ctx, ethereum.FilterQuery{ + logs, err = builder.L2.Client.FilterLogs(ctx, ethereum.FilterQuery{ Addresses: []common.Address{ types.ArbSysAddress, }, diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index ad08ff7471..10db09275b 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -21,10 +21,11 @@ func TestPurePrecompileMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - arbSys, err := precompilesgen.NewArbSys(common.HexToAddress("0x64"), client) + arbSys, err := precompilesgen.NewArbSys(common.HexToAddress("0x64"), builder.L2.Client) Require(t, err, "could not deploy ArbSys contract") chainId, err := arbSys.ArbChainID(&bind.CallOpts{}) Require(t, err, "failed to get the ChainID") @@ -37,10 +38,11 @@ func TestViewLogReverts(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), client) + arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err, "could not deploy ArbSys contract") err = arbDebug.EventsView(nil) @@ -53,11 +55,12 @@ func TestCustomSolidityErrors(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() callOpts := &bind.CallOpts{Context: ctx} - arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), client) + arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err, "could not bind ArbDebug contract") customError := arbDebug.CustomRevert(callOpts, 1024) if customError == nil { @@ -69,7 +72,7 @@ func TestCustomSolidityErrors(t *testing.T) { Fatal(t, observedMessage) } - arbSys, err := precompilesgen.NewArbSys(arbos.ArbSysAddress, client) + arbSys, err := precompilesgen.NewArbSys(arbos.ArbSysAddress, builder.L2.Client) Require(t, err, "could not bind ArbSys contract") _, customError = arbSys.ArbBlockHash(callOpts, big.NewInt(1e9)) if customError == nil { @@ -86,11 +89,12 @@ func TestPrecompileErrorGasLeft(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - auth := info.GetDefaultTransactOpts("Faucet", ctx) - _, _, simple, err := mocksgen.DeploySimple(&auth, client) + auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + _, _, simple, err := mocksgen.DeploySimple(&auth, builder.L2.Client) Require(t, err) assertNotAllGasConsumed := func(to common.Address, input []byte) { diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go new file mode 100644 index 0000000000..8f587fcaf7 --- /dev/null +++ b/system_tests/recreatestate_rpc_test.go @@ -0,0 +1,461 @@ +package arbtest + +import ( + "context" + "errors" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/arbitrum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/util" +) + +func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (node *arbnode.Node, executionNode *gethexec.ExecutionNode, l2client *ethclient.Client, cancel func()) { + t.Helper() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig = execConfig + cleanup := builder.Build(t) + builder.L2Info.GenerateAccount("User2") + var txs []*types.Transaction + for i := uint64(0); i < txCount; i++ { + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + txs = append(txs, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) + Require(t, err) + } + for _, tx := range txs { + _, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + } + + return builder.L2.ConsensusNode, builder.L2.ExecNode, builder.L2.Client, cleanup +} + +func fillHeaderCache(t *testing.T, bc *core.BlockChain, from, to uint64) { + t.Helper() + for i := from; i <= to; i++ { + header := bc.GetHeaderByNumber(i) + if header == nil { + Fatal(t, "internal test error - failed to get header while trying to fill headerCache, header:", i) + } + } +} + +func fillBlockCache(t *testing.T, bc *core.BlockChain, from, to uint64) { + t.Helper() + for i := from; i <= to; i++ { + block := bc.GetBlockByNumber(i) + if block == nil { + Fatal(t, "internal test error - failed to get block while trying to fill blockCache, block:", i) + } + } +} + +func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, from, to uint64) { + t.Helper() + for i := from; i <= to; i++ { + header := bc.GetHeaderByNumber(i) + if header == nil { + Fatal(t, "failed to get last block header") + } + hash := header.Root + err := db.Delete(hash.Bytes()) + Require(t, err) + } + for i := from; i <= to; i++ { + header := bc.GetHeaderByNumber(i) + _, err := bc.StateAt(header.Root) + if err == nil { + Fatal(t, "internal test error - failed to remove state from db") + } + expectedErr := &trie.MissingNodeError{} + if !errors.As(err, &expectedErr) { + Fatal(t, "internal test error - failed to remove state from db, err: ", err) + } + } +} + +func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodeConfig := gethexec.ConfigDefaultTest() + nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() + + lastBlock, err := l2client.BlockNumber(ctx) + Require(t, err) + middleBlock := lastBlock / 2 + + expectedBalance, err := l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + Require(t, err) + + removeStatesFromDb(t, bc, db, middleBlock, lastBlock) + + balance, err := l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + Require(t, err) + if balance.Cmp(expectedBalance) != 0 { + Fatal(t, "unexpected balance result for last block, want: ", expectedBalance, " have: ", balance) + } +} + +func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) + nodeConfig := gethexec.ConfigDefaultTest() + nodeConfig.RPC.MaxRecreateStateDepth = depthGasLimit + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() + + lastBlock, err := l2client.BlockNumber(ctx) + Require(t, err) + middleBlock := lastBlock / 2 + + expectedBalance, err := l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + Require(t, err) + + removeStatesFromDb(t, bc, db, middleBlock, lastBlock) + + balance, err := l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + Require(t, err) + if balance.Cmp(expectedBalance) != 0 { + Fatal(t, "unexpected balance result for last block, want: ", expectedBalance, " have: ", balance) + } + +} + +func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodeConfig := gethexec.ConfigDefaultTest() + nodeConfig.RPC.MaxRecreateStateDepth = int64(200) + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() + + lastBlock, err := l2client.BlockNumber(ctx) + Require(t, err) + middleBlock := lastBlock / 2 + + removeStatesFromDb(t, bc, db, middleBlock, lastBlock) + + _, err = l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + if err == nil { + Fatal(t, "Didn't fail as expected") + } + if err.Error() != arbitrum.ErrDepthLimitExceeded.Error() { + Fatal(t, "Failed with unexpected error:", err) + } +} + +func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { + // HeaderChain.headerCache size limit is currently core.headerCacheLimit = 512 + var headerCacheLimit uint64 = 512 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodeConfig := gethexec.ConfigDefaultTest() + nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, headerCacheLimit+5) + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() + + lastBlock, err := l2client.BlockNumber(ctx) + Require(t, err) + if lastBlock < headerCacheLimit+4 { + Fatal(t, "Internal test error - not enough blocks produced during preparation, want:", headerCacheLimit, "have:", lastBlock) + } + + removeStatesFromDb(t, bc, db, lastBlock-4, lastBlock) + + headerToRemove := lastBlock - 4 + hash := rawdb.ReadCanonicalHash(db, headerToRemove) + rawdb.DeleteHeader(db, hash, headerToRemove) + + firstBlock := lastBlock - headerCacheLimit - 5 + fillHeaderCache(t, bc, firstBlock, firstBlock+headerCacheLimit) + + for i := lastBlock; i > lastBlock-3; i-- { + _, err = l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(i)) + if err == nil { + hash := rawdb.ReadCanonicalHash(db, i) + Fatal(t, "Didn't fail to get balance at block:", i, " with hash:", hash, ", lastBlock:", lastBlock) + } + if !strings.Contains(err.Error(), "chain doesn't contain parent of block") { + Fatal(t, "Failed with unexpected error: \"", err, "\", at block:", i, "lastBlock:", lastBlock) + } + } +} + +func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nodeConfig := gethexec.ConfigDefaultTest() + nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there + nodeConfig.Caching.TrieCleanCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() + + lastBlock, err := l2client.BlockNumber(ctx) + Require(t, err) + + genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum + removeStatesFromDb(t, bc, db, genesis, lastBlock) + + _, err = l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + if err == nil { + hash := rawdb.ReadCanonicalHash(db, lastBlock) + Fatal(t, "Didn't fail to get balance at block:", lastBlock, " with hash:", hash, ", lastBlock:", lastBlock) + } + if !strings.Contains(err.Error(), "moved beyond genesis") { + Fatal(t, "Failed with unexpected error: \"", err, "\", at block:", lastBlock, "lastBlock:", lastBlock) + } +} + +func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { + // BlockChain.blockCache size limit is currently core.blockCacheLimit = 256 + var blockCacheLimit uint64 = 256 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodeConfig := gethexec.ConfigDefaultTest() + nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + nodeConfig.Sequencer.MaxBlockSpeed = 0 + nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + nodeConfig.Caching.Archive = true + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there + nodeConfig.Caching.TrieCleanCache = 0 + + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, blockCacheLimit+4) + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + db := execNode.Backend.ChainDb() + + lastBlock, err := l2client.BlockNumber(ctx) + Require(t, err) + if lastBlock < blockCacheLimit+4 { + Fatal(t, "Internal test error - not enough blocks produced during preparation, want:", blockCacheLimit, "have:", lastBlock) + } + + removeStatesFromDb(t, bc, db, lastBlock-4, lastBlock) + + blockBodyToRemove := lastBlock - 1 + hash := rawdb.ReadCanonicalHash(db, blockBodyToRemove) + rawdb.DeleteBody(db, hash, blockBodyToRemove) + + firstBlock := lastBlock - blockCacheLimit - 4 + fillBlockCache(t, bc, firstBlock, firstBlock+blockCacheLimit) + + _, err = l2client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + if err == nil { + hash := rawdb.ReadCanonicalHash(db, lastBlock) + Fatal(t, "Didn't fail to get balance at block:", lastBlock, " with hash:", hash, ", lastBlock:", lastBlock) + } + if !strings.Contains(err.Error(), "block not found while recreating") { + Fatal(t, "Failed with unexpected error: \"", err, "\", at block:", lastBlock, "lastBlock:", lastBlock) + } +} + +func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig *gethexec.CachingConfig, txCount int) { + maxRecreateStateDepth := int64(30 * 1000 * 1000) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ctx1, cancel1 := context.WithCancel(ctx) + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.ConsensusServer.URL = "" + execConfig.Caching = *cacheConfig + + skipBlocks := execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving + skipGas := execConfig.Caching.MaxAmountOfGasToSkipStateSaving + + feedErrChan := make(chan error, 10) + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), &execConfig.Caching) + + Require(t, execConfig.Validate()) + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) + Require(t, err) + + node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan) + Require(t, err) + err = node.TxStreamer.AddFakeInitMessage() + Require(t, err) + Require(t, node.Start(ctx1)) + client := ClientForStack(t, stack) + + StartWatchChanErr(t, ctx, feedErrChan, node, execNode) + dataDir := node.Stack.DataDir() + + l2info.GenerateAccount("User2") + var txs []*types.Transaction + for i := 0; i < txCount; i++ { + tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) + txs = append(txs, tx) + err := client.SendTransaction(ctx, tx) + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + if have, want := receipt.BlockNumber.Uint64(), uint64(i)+1; have != want { + Fatal(t, "internal test error - tx got included in unexpected block number, have:", have, "want:", want) + } + } + genesis := uint64(0) + lastBlock, err := client.BlockNumber(ctx) + Require(t, err) + if want := genesis + uint64(txCount); lastBlock < want { + Fatal(t, "internal test error - not enough blocks produced during preparation, want:", want, "have:", lastBlock) + } + expectedBalance, err := client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + Require(t, err) + + node.StopAndWait() + cancel1() + t.Log("stopped first node") + + l2info, stack, chainDb, arbDb, blockchain = createL2BlockChain(t, l2info, dataDir, params.ArbitrumDevTestChainConfig(), &execConfig.Caching) + + execNode, err = gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) + Require(t, err) + + node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan) + Require(t, err) + Require(t, node.Start(ctx)) + client = ClientForStack(t, stack) + defer node.StopAndWait() + bc := execNode.Backend.ArbInterface().BlockChain() + gas := skipGas + blocks := skipBlocks + for i := genesis + 1; i <= genesis+uint64(txCount); i++ { + block := bc.GetBlockByNumber(i) + if block == nil { + Fatal(t, "header not found for block number:", i) + continue + } + gas += block.GasUsed() + blocks++ + _, err := bc.StateAt(block.Root()) + if (skipBlocks == 0 && skipGas == 0) || (skipBlocks != 0 && blocks > skipBlocks) || (skipGas != 0 && gas > skipGas) { + if err != nil { + t.Log("blocks:", blocks, "skipBlocks:", skipBlocks, "gas:", gas, "skipGas:", skipGas) + } + Require(t, err, "state not found, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash(), "err:", err) + gas = 0 + blocks = 0 + } else { + if err == nil { + t.Log("blocks:", blocks, "skipBlocks:", skipBlocks, "gas:", gas, "skipGas:", skipGas) + Fatal(t, "state shouldn't be available, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) + } + expectedErr := &trie.MissingNodeError{} + if !errors.As(err, &expectedErr) { + Fatal(t, "getting state failed with unexpected error, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) + } + } + } + for i := genesis + 1; i <= genesis+uint64(txCount); i += i % 10 { + _, err = client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(i)) + if err != nil { + t.Log("skipBlocks:", skipBlocks, "skipGas:", skipGas) + } + Require(t, err) + } + + balance, err := client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) + Require(t, err) + if balance.Cmp(expectedBalance) != 0 { + Fatal(t, "unexpected balance result for last block, want: ", expectedBalance, " have: ", balance) + } +} + +func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { + cacheConfig := gethexec.DefaultCachingConfig + cacheConfig.Archive = true + //// test defaults + testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) + + cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 127 + cacheConfig.MaxAmountOfGasToSkipStateSaving = 0 + testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) + + cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 0 + cacheConfig.MaxAmountOfGasToSkipStateSaving = 15 * 1000 * 1000 + testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) + + cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 127 + cacheConfig.MaxAmountOfGasToSkipStateSaving = 15 * 1000 * 1000 + testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) + + // one test block ~ 925000 gas + testBlockGas := uint64(925000) + skipBlockValues := []uint64{0, 1, 2, 3, 5, 21, 51, 100, 101} + var skipGasValues []uint64 + for _, i := range skipBlockValues { + skipGasValues = append(skipGasValues, i*testBlockGas) + } + for _, skipGas := range skipGasValues { + for _, skipBlocks := range skipBlockValues[:len(skipBlockValues)-2] { + cacheConfig.MaxAmountOfGasToSkipStateSaving = skipGas + cacheConfig.MaxNumberOfBlocksToSkipStateSaving = uint32(skipBlocks) + testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 100) + } + } +} diff --git a/system_tests/reorg_resequencing_test.go b/system_tests/reorg_resequencing_test.go index 7e9cf9fa99..0c04b3395f 100644 --- a/system_tests/reorg_resequencing_test.go +++ b/system_tests/reorg_resequencing_test.go @@ -19,29 +19,28 @@ func TestReorgResequencing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - execNode := getExecNode(t, node) - - startMsgCount, err := node.TxStreamer.GetMessageCount() + startMsgCount, err := builder.L2.ConsensusNode.TxStreamer.GetMessageCount() Require(t, err) - l2info.GenerateAccount("Intermediate") - l2info.GenerateAccount("User1") - l2info.GenerateAccount("User2") - l2info.GenerateAccount("User3") - l2info.GenerateAccount("User4") - TransferBalance(t, "Owner", "User1", big.NewInt(params.Ether), l2info, client, ctx) - TransferBalance(t, "Owner", "Intermediate", big.NewInt(params.Ether*3), l2info, client, ctx) - TransferBalance(t, "Intermediate", "User2", big.NewInt(params.Ether), l2info, client, ctx) - TransferBalance(t, "Intermediate", "User3", big.NewInt(params.Ether), l2info, client, ctx) + builder.L2Info.GenerateAccount("Intermediate") + builder.L2Info.GenerateAccount("User1") + builder.L2Info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User3") + builder.L2Info.GenerateAccount("User4") + builder.L2.TransferBalance(t, "Owner", "User1", big.NewInt(params.Ether), builder.L2Info) + builder.L2.TransferBalance(t, "Owner", "Intermediate", big.NewInt(params.Ether*3), builder.L2Info) + builder.L2.TransferBalance(t, "Intermediate", "User2", big.NewInt(params.Ether), builder.L2Info) + builder.L2.TransferBalance(t, "Intermediate", "User3", big.NewInt(params.Ether), builder.L2Info) // Intermediate does not have exactly 1 ether because of fees accountsWithBalance := []string{"User1", "User2", "User3"} verifyBalances := func(scenario string) { for _, account := range accountsWithBalance { - balance, err := client.BalanceAt(ctx, l2info.GetAddress(account), nil) + balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress(account), nil) Require(t, err) if balance.Int64() != params.Ether { Fatal(t, "expected account", account, "to have a balance of 1 ether but instead it has", balance, "wei "+scenario) @@ -50,15 +49,15 @@ func TestReorgResequencing(t *testing.T) { } verifyBalances("before reorg") - err = node.TxStreamer.ReorgTo(startMsgCount) + err = builder.L2.ConsensusNode.TxStreamer.ReorgTo(startMsgCount) Require(t, err) - _, err = execNode.ExecEngine.HeadMessageNumberSync(t).Await(ctx) + _, err = builder.L2.ExecNode.ExecEngine.HeadMessageNumberSync(t).Await(ctx) Require(t, err) verifyBalances("after empty reorg") - prevMessage, err := node.TxStreamer.GetMessage(startMsgCount - 1) + prevMessage, err := builder.L2.ConsensusNode.TxStreamer.GetMessage(startMsgCount - 1) Require(t, err) delayedIndexHash := common.BigToHash(big.NewInt(int64(prevMessage.DelayedMessagesRead))) newMessage := &arbostypes.L1IncomingMessage{ @@ -70,24 +69,24 @@ func TestReorgResequencing(t *testing.T) { RequestId: &delayedIndexHash, L1BaseFee: common.Big0, }, - L2msg: append(l2info.GetAddress("User4").Bytes(), math.U256Bytes(big.NewInt(params.Ether))...), + L2msg: append(builder.L2Info.GetAddress("User4").Bytes(), math.U256Bytes(big.NewInt(params.Ether))...), } - err = node.TxStreamer.AddMessages(startMsgCount, true, []arbostypes.MessageWithMetadata{{ + err = builder.L2.ConsensusNode.TxStreamer.AddMessages(startMsgCount, true, []arbostypes.MessageWithMetadata{{ Message: newMessage, DelayedMessagesRead: prevMessage.DelayedMessagesRead + 1, }}) Require(t, err) - _, err = execNode.ExecEngine.HeadMessageNumberSync(t).Await(ctx) + _, err = builder.L2.ExecNode.ExecEngine.HeadMessageNumberSync(t).Await(ctx) Require(t, err) accountsWithBalance = append(accountsWithBalance, "User4") verifyBalances("after reorg with new deposit") - err = node.TxStreamer.ReorgTo(startMsgCount) + err = builder.L2.ConsensusNode.TxStreamer.ReorgTo(startMsgCount) Require(t, err) - _, err = execNode.ExecEngine.HeadMessageNumberSync(t).Await(ctx) + _, err = builder.L2.ExecNode.ExecEngine.HeadMessageNumberSync(t).Await(ctx) Require(t, err) verifyBalances("after second empty reorg") diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 7b0c3a7563..3400af335d 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -6,20 +6,23 @@ package arbtest import ( "context" "math/big" + "strings" "testing" "time" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/arbos/retryables" "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/execution/gethexec" - "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" @@ -29,64 +32,71 @@ import ( ) func retryableSetup(t *testing.T) ( - *BlockchainTestInfo, - *BlockchainTestInfo, - *ethclient.Client, - *ethclient.Client, + *NodeBuilder, *bridgegen.Inbox, - func(*types.Receipt) common.Hash, + func(*types.Receipt) *types.Transaction, context.Context, func(), ) { ctx, cancel := context.WithCancel(context.Background()) - l2info, l2node, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1(t, ctx, true) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.Build(t) - l2info.GenerateAccount("User2") - l2info.GenerateAccount("Beneficiary") - l2info.GenerateAccount("Burn") + builder.L2Info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("Beneficiary") + builder.L2Info.GenerateAccount("Burn") - delayedInbox, err := bridgegen.NewInbox(l1info.GetAddress("Inbox"), l1client) + delayedInbox, err := bridgegen.NewInbox(builder.L1Info.GetAddress("Inbox"), builder.L1.Client) Require(t, err) - delayedBridge, err := arbnode.NewDelayedBridge(l1client, l1info.GetAddress("Bridge"), 0) + delayedBridge, err := arbnode.NewDelayedBridge(builder.L1.Client, builder.L1Info.GetAddress("Bridge"), 0) Require(t, err) - lookupSubmitRetryableL2TxHash := func(l1Receipt *types.Receipt) common.Hash { + lookupL2Tx := func(l1Receipt *types.Receipt) *types.Transaction { messages, err := delayedBridge.LookupMessagesInRange(ctx, l1Receipt.BlockNumber, l1Receipt.BlockNumber, nil) Require(t, err) if len(messages) == 0 { - Fatal(t, "didn't find message for retryable submission") + Fatal(t, "didn't find message for submission") } var submissionTxs []*types.Transaction + msgTypes := map[uint8]bool{ + arbostypes.L1MessageType_SubmitRetryable: true, + arbostypes.L1MessageType_EthDeposit: true, + arbostypes.L1MessageType_L2Message: true, + } + txTypes := map[uint8]bool{ + types.ArbitrumSubmitRetryableTxType: true, + types.ArbitrumDepositTxType: true, + types.ArbitrumContractTxType: true, + } for _, message := range messages { - if message.Message.Header.Kind != arbostypes.L1MessageType_SubmitRetryable { + if !msgTypes[message.Message.Header.Kind] { continue } txs, err := arbos.ParseL2Transactions(message.Message, params.ArbitrumDevTestChainConfig().ChainID, nil) Require(t, err) for _, tx := range txs { - if tx.Type() == types.ArbitrumSubmitRetryableTxType { + if txTypes[tx.Type()] { submissionTxs = append(submissionTxs, tx) } } } if len(submissionTxs) != 1 { - Fatal(t, "expected 1 tx from retryable submission, found", len(submissionTxs)) + Fatal(t, "expected 1 tx from submission, found", len(submissionTxs)) } - - return submissionTxs[0].Hash() + return submissionTxs[0] } // burn some gas so that the faucet's Callvalue + Balance never exceeds a uint256 discard := arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) - TransferBalance(t, "Faucet", "Burn", discard, l2info, l2client, ctx) + builder.L2.TransferBalance(t, "Faucet", "Burn", discard, builder.L2Info) teardown := func() { // check the integrity of the RPC - blockNum, err := l2client.BlockNumber(ctx) + blockNum, err := builder.L2.Client.BlockNumber(ctx) Require(t, err, "failed to get L2 block number") for number := uint64(0); number < blockNum; number++ { - block, err := l2client.BlockByNumber(ctx, arbmath.UintToBig(number)) + block, err := builder.L2.Client.BlockByNumber(ctx, arbmath.UintToBig(number)) Require(t, err, "failed to get L2 block", number, "of", blockNum) if block.Number().Uint64() != number { Fatal(t, "block number mismatch", number, block.Number().Uint64()) @@ -95,19 +105,20 @@ func retryableSetup(t *testing.T) ( cancel() - l2node.StopAndWait() - requireClose(t, l1stack) + builder.L2.ConsensusNode.StopAndWait() + requireClose(t, builder.L1.Stack) } - return l2info, l1info, l2client, l1client, delayedInbox, lookupSubmitRetryableL2TxHash, ctx, teardown + return builder, delayedInbox, lookupL2Tx, ctx, teardown } func TestRetryableNoExist(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, node, l2client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), l2client) + arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), builder.L2.Client) Require(t, err) _, err = arbRetryableTx.GetTimeout(&bind.CallOpts{}, common.Hash{}) if err.Error() != "execution reverted: error NoTicketWithID()" { @@ -117,20 +128,20 @@ func TestRetryableNoExist(t *testing.T) { func TestSubmitRetryableImmediateSuccess(t *testing.T) { t.Parallel() - l2info, l1info, l2client, l1client, delayedInbox, lookupSubmitRetryableL2TxHash, ctx, teardown := retryableSetup(t) + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) defer teardown() - user2Address := l2info.GetAddress("User2") - beneficiaryAddress := l2info.GetAddress("Beneficiary") + user2Address := builder.L2Info.GetAddress("User2") + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") deposit := arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) callValue := big.NewInt(1e6) - nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2client) + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) Require(t, err, "failed to deploy NodeInterface") - // estimate the gas needed to auto-redeem the retryable - usertxoptsL2 := l2info.GetDefaultTransactOpts("Faucet", ctx) + // estimate the gas needed to auto redeem the retryable + usertxoptsL2 := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) usertxoptsL2.NoSend = true usertxoptsL2.GasMargin = 0 tx, err := nodeInterface.EstimateRetryableTicket( @@ -147,8 +158,8 @@ func TestSubmitRetryableImmediateSuccess(t *testing.T) { estimate := tx.Gas() colors.PrintBlue("estimate: ", estimate) - // submit & auto-redeem the retryable using the gas estimate - usertxoptsL1 := l1info.GetDefaultTransactOpts("Faucet", ctx) + // submit & auto redeem the retryable using the gas estimate + usertxoptsL1 := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) usertxoptsL1.Value = deposit l1tx, err := delayedInbox.CreateRetryableTicket( &usertxoptsL1, @@ -163,21 +174,21 @@ func TestSubmitRetryableImmediateSuccess(t *testing.T) { ) Require(t, err) - l1receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) Require(t, err) - if l1receipt.Status != types.ReceiptStatusSuccessful { - Fatal(t, "l1receipt indicated failure") + if l1Receipt.Status != types.ReceiptStatusSuccessful { + Fatal(t, "l1Receipt indicated failure") } - waitForL1DelayBlocks(t, ctx, l1client, l1info) + waitForL1DelayBlocks(t, ctx, builder) - receipt, err := WaitForTx(ctx, l2client, lookupSubmitRetryableL2TxHash(l1receipt), time.Second*5) + receipt, err := builder.L2.EnsureTxSucceeded(lookupL2Tx(l1Receipt)) Require(t, err) if receipt.Status != types.ReceiptStatusSuccessful { Fatal(t) } - l2balance, err := l2client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if !arbmath.BigEquals(l2balance, big.NewInt(1e6)) { @@ -187,18 +198,18 @@ func TestSubmitRetryableImmediateSuccess(t *testing.T) { func TestSubmitRetryableFailThenRetry(t *testing.T) { t.Parallel() - l2info, l1info, l2client, l1client, delayedInbox, lookupSubmitRetryableL2TxHash, ctx, teardown := retryableSetup(t) + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) defer teardown() - ownerTxOpts := l2info.GetDefaultTransactOpts("Owner", ctx) - usertxopts := l1info.GetDefaultTransactOpts("Faucet", ctx) + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + usertxopts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) usertxopts.Value = arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) - simpleAddr, simple := deploySimple(t, ctx, ownerTxOpts, l2client) + simpleAddr, simple := builder.L2.DeploySimple(t, ownerTxOpts) simpleABI, err := mocksgen.SimpleMetaData.GetAbi() Require(t, err) - beneficiaryAddress := l2info.GetAddress("Beneficiary") + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") l1tx, err := delayedInbox.CreateRetryableTicket( &usertxopts, simpleAddr, @@ -213,45 +224,42 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { ) Require(t, err) - l1receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) Require(t, err) - if l1receipt.Status != types.ReceiptStatusSuccessful { - Fatal(t, "l1receipt indicated failure") + if l1Receipt.Status != types.ReceiptStatusSuccessful { + Fatal(t, "l1Receipt indicated failure") } - waitForL1DelayBlocks(t, ctx, l1client, l1info) + waitForL1DelayBlocks(t, ctx, builder) - receipt, err := WaitForTx(ctx, l2client, lookupSubmitRetryableL2TxHash(l1receipt), time.Second*5) + receipt, err := builder.L2.EnsureTxSucceeded(lookupL2Tx(l1Receipt)) Require(t, err) - if receipt.Status != types.ReceiptStatusSuccessful { - Fatal(t) - } if len(receipt.Logs) != 2 { Fatal(t, len(receipt.Logs)) } ticketId := receipt.Logs[0].Topics[1] firstRetryTxId := receipt.Logs[1].Topics[2] - // get receipt for the auto-redeem, make sure it failed - receipt, err = WaitForTx(ctx, l2client, firstRetryTxId, time.Second*5) + // get receipt for the auto redeem, make sure it failed + receipt, err = WaitForTx(ctx, builder.L2.Client, firstRetryTxId, time.Second*5) Require(t, err) if receipt.Status != types.ReceiptStatusFailed { Fatal(t, receipt.GasUsed) } - arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), l2client) + arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), builder.L2.Client) Require(t, err) tx, err := arbRetryableTx.Redeem(&ownerTxOpts, ticketId) Require(t, err) - receipt, err = EnsureTxSucceeded(ctx, l2client, tx) + receipt, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) retryTxId := receipt.Logs[0].Topics[2] // check the receipt for the retry - receipt, err = WaitForTx(ctx, l2client, retryTxId, time.Second*1) + receipt, err = WaitForTx(ctx, builder.L2.Client, retryTxId, time.Second*1) Require(t, err) - if receipt.Status != 1 { + if receipt.Status != types.ReceiptStatusSuccessful { Fatal(t, receipt.Status) } @@ -279,31 +287,38 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { func TestSubmissionGasCosts(t *testing.T) { t.Parallel() - l2info, l1info, l2client, l1client, delayedInbox, _, ctx, teardown := retryableSetup(t) + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) defer teardown() + infraFeeAddr, networkFeeAddr := setupFeeAddresses(t, ctx, builder) + elevateL2Basefee(t, ctx, builder) - usertxopts := l1info.GetDefaultTransactOpts("Faucet", ctx) + usertxopts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) usertxopts.Value = arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) - l2info.GenerateAccount("Refund") - l2info.GenerateAccount("Receive") - faucetAddress := util.RemapL1Address(l1info.GetAddress("Faucet")) - beneficiaryAddress := l2info.GetAddress("Beneficiary") - feeRefundAddress := l2info.GetAddress("Refund") - receiveAddress := l2info.GetAddress("Receive") + builder.L2Info.GenerateAccount("Refund") + builder.L2Info.GenerateAccount("Receive") + faucetAddress := util.RemapL1Address(builder.L1Info.GetAddress("Faucet")) + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") + feeRefundAddress := builder.L2Info.GetAddress("Refund") + receiveAddress := builder.L2Info.GetAddress("Receive") colors.PrintBlue("Faucet ", faucetAddress) colors.PrintBlue("Receive ", receiveAddress) colors.PrintBlue("Beneficiary ", beneficiaryAddress) colors.PrintBlue("Fee Refund ", feeRefundAddress) - fundsBeforeSubmit, err := l2client.BalanceAt(ctx, faucetAddress, nil) + fundsBeforeSubmit, err := builder.L2.Client.BalanceAt(ctx, faucetAddress, nil) + Require(t, err) + + infraBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) + Require(t, err) + networkBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) usefulGas := params.TxGas excessGasLimit := uint64(808) - maxSubmissionFee := big.NewInt(1e13) + maxSubmissionFee := big.NewInt(1e14) retryableGas := arbmath.UintToBig(usefulGas + excessGasLimit) // will only burn the intrinsic cost retryableL2CallValue := big.NewInt(1e4) retryableCallData := []byte{} @@ -321,32 +336,54 @@ func TestSubmissionGasCosts(t *testing.T) { ) Require(t, err) - l1receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) + Require(t, err) + if l1Receipt.Status != types.ReceiptStatusSuccessful { + Fatal(t, "l1Receipt indicated failure") + } + + waitForL1DelayBlocks(t, ctx, builder) + + submissionTxOuter := lookupL2Tx(l1Receipt) + submissionReceipt, err := builder.L2.EnsureTxSucceeded(submissionTxOuter) Require(t, err) - if l1receipt.Status != types.ReceiptStatusSuccessful { - Fatal(t, "l1receipt indicated failure") + if len(submissionReceipt.Logs) != 2 { + Fatal(t, "Unexpected number of logs:", len(submissionReceipt.Logs)) } + firstRetryTxId := submissionReceipt.Logs[1].Topics[2] + // get receipt for the auto redeem + redeemReceipt, err := WaitForTx(ctx, builder.L2.Client, firstRetryTxId, time.Second*5) + Require(t, err) + if redeemReceipt.Status != types.ReceiptStatusSuccessful { + Fatal(t, "first retry tx failed") + } + redeemBlock, err := builder.L2.Client.HeaderByNumber(ctx, redeemReceipt.BlockNumber) + Require(t, err) - waitForL1DelayBlocks(t, ctx, l1client, l1info) - l2BaseFee := GetBaseFee(t, l2client, ctx) + l2BaseFee := redeemBlock.BaseFee excessGasPrice := arbmath.BigSub(gasFeeCap, l2BaseFee) excessWei := arbmath.BigMulByUint(l2BaseFee, excessGasLimit) excessWei.Add(excessWei, arbmath.BigMul(excessGasPrice, retryableGas)) - fundsAfterSubmit, err := l2client.BalanceAt(ctx, faucetAddress, nil) + fundsAfterSubmit, err := builder.L2.Client.BalanceAt(ctx, faucetAddress, nil) + Require(t, err) + beneficiaryFunds, err := builder.L2.Client.BalanceAt(ctx, beneficiaryAddress, nil) + Require(t, err) + refundFunds, err := builder.L2.Client.BalanceAt(ctx, feeRefundAddress, nil) Require(t, err) - beneficiaryFunds, err := l2client.BalanceAt(ctx, beneficiaryAddress, nil) + receiveFunds, err := builder.L2.Client.BalanceAt(ctx, receiveAddress, nil) Require(t, err) - refundFunds, err := l2client.BalanceAt(ctx, feeRefundAddress, nil) + + infraBalanceAfter, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) Require(t, err) - receiveFunds, err := l2client.BalanceAt(ctx, receiveAddress, nil) + networkBalanceAfter, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) colors.PrintBlue("CallGas ", retryableGas) colors.PrintMint("Gas cost ", arbmath.BigMul(retryableGas, l2BaseFee)) colors.PrintBlue("Payment ", usertxopts.Value) - colors.PrintMint("Faucet before ", fundsBeforeSubmit) + colors.PrintMint("Faucet before ", fundsAfterSubmit) colors.PrintMint("Faucet after ", fundsAfterSubmit) // the retryable should pay the receiver the supplied callvalue @@ -385,13 +422,434 @@ func TestSubmissionGasCosts(t *testing.T) { colors.PrintRed("Off by ", arbmath.BigSub(expectedGasChange, diff)) Fatal(t, "Supplied gas was improperly deducted\n", fundsBeforeSubmit, "\n", fundsAfterSubmit) } + + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), builder.L2.Client) + Require(t, err) + minimumBaseFee, err := arbGasInfo.GetMinimumGasPrice(&bind.CallOpts{Context: ctx}) + Require(t, err) + + expectedFee := arbmath.BigMulByUint(l2BaseFee, usefulGas) + expectedInfraFee := arbmath.BigMulByUint(minimumBaseFee, usefulGas) + expectedNetworkFee := arbmath.BigSub(expectedFee, expectedInfraFee) + + infraFee := arbmath.BigSub(infraBalanceAfter, infraBalanceBefore) + networkFee := arbmath.BigSub(networkBalanceAfter, networkBalanceBefore) + fee := arbmath.BigAdd(infraFee, networkFee) + + colors.PrintMint("paid infra fee: ", infraFee) + colors.PrintMint("paid network fee: ", networkFee) + colors.PrintMint("paid fee: ", fee) + + if !arbmath.BigEquals(infraFee, expectedInfraFee) { + Fatal(t, "Unexpected infra fee paid, want:", expectedInfraFee, "have:", infraFee) + } + if !arbmath.BigEquals(networkFee, expectedNetworkFee) { + Fatal(t, "Unexpected network fee paid, want:", expectedNetworkFee, "have:", networkFee) + } } -func waitForL1DelayBlocks(t *testing.T, ctx context.Context, l1client *ethclient.Client, l1info *BlockchainTestInfo) { +func waitForL1DelayBlocks(t *testing.T, ctx context.Context, builder *NodeBuilder) { // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } } + +func TestDepositETH(t *testing.T) { + t.Parallel() + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + defer teardown() + + faucetAddr := builder.L1Info.GetAddress("Faucet") + + oldBalance, err := builder.L2.Client.BalanceAt(ctx, faucetAddr, nil) + if err != nil { + t.Fatalf("BalanceAt(%v) unexpected error: %v", faucetAddr, err) + } + + txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + txOpts.Value = big.NewInt(13) + + l1tx, err := delayedInbox.DepositEth0(&txOpts) + if err != nil { + t.Fatalf("DepositEth0() unexected error: %v", err) + } + + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) + if err != nil { + t.Fatalf("EnsureTxSucceeded() unexpected error: %v", err) + } + if l1Receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("Got transaction status: %v, want: %v", l1Receipt.Status, types.ReceiptStatusSuccessful) + } + waitForL1DelayBlocks(t, ctx, builder) + + l2Receipt, err := builder.L2.EnsureTxSucceeded(lookupL2Tx(l1Receipt)) + if err != nil { + t.Fatalf("EnsureTxSucceeded unexpected error: %v", err) + } + newBalance, err := builder.L2.Client.BalanceAt(ctx, faucetAddr, l2Receipt.BlockNumber) + if err != nil { + t.Fatalf("BalanceAt(%v) unexpected error: %v", faucetAddr, err) + } + if got := new(big.Int); got.Sub(newBalance, oldBalance).Cmp(txOpts.Value) != 0 { + t.Errorf("Got transferred: %v, want: %v", got, txOpts.Value) + } +} + +func TestArbitrumContractTx(t *testing.T) { + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + defer teardown() + faucetL2Addr := util.RemapL1Address(builder.L1Info.GetAddress("Faucet")) + builder.L2.TransferBalanceTo(t, "Faucet", faucetL2Addr, big.NewInt(1e18), builder.L2Info) + + l2TxOpts := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + l2ContractAddr, _ := builder.L2.DeploySimple(t, l2TxOpts) + l2ContractABI, err := abi.JSON(strings.NewReader(mocksgen.SimpleABI)) + if err != nil { + t.Fatalf("Error parsing contract ABI: %v", err) + } + data, err := l2ContractABI.Pack("checkCalls", true, true, false, false, false, false) + if err != nil { + t.Fatalf("Error packing method's call data: %v", err) + } + unsignedTx := types.NewTx(&types.ArbitrumContractTx{ + ChainId: builder.L2Info.Signer.ChainID(), + From: faucetL2Addr, + GasFeeCap: builder.L2Info.GasPrice.Mul(builder.L2Info.GasPrice, big.NewInt(2)), + Gas: 1e6, + To: &l2ContractAddr, + Value: common.Big0, + Data: data, + }) + txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + l1tx, err := delayedInbox.SendContractTransaction( + &txOpts, + arbmath.UintToBig(unsignedTx.Gas()), + unsignedTx.GasFeeCap(), + *unsignedTx.To(), + unsignedTx.Value(), + unsignedTx.Data(), + ) + if err != nil { + t.Fatalf("Error sending unsigned transaction: %v", err) + } + receipt, err := builder.L1.EnsureTxSucceeded(l1tx) + if err != nil { + t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", l1tx.Hash(), err) + } + if receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("L1 transaction: %v has failed", l1tx.Hash()) + } + waitForL1DelayBlocks(t, ctx, builder) + _, err = builder.L2.EnsureTxSucceeded(lookupL2Tx(receipt)) + if err != nil { + t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", unsignedTx.Hash(), err) + } +} + +func TestL1FundedUnsignedTransaction(t *testing.T) { + t.Parallel() + ctx := context.Background() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + faucetL2Addr := util.RemapL1Address(builder.L1Info.GetAddress("Faucet")) + // Transfer balance to Faucet's corresponding L2 address, so that there is + // enough balance on its' account for executing L2 transaction. + builder.L2.TransferBalanceTo(t, "Faucet", faucetL2Addr, big.NewInt(1e18), builder.L2Info) + + l2TxOpts := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + contractAddr, _ := builder.L2.DeploySimple(t, l2TxOpts) + contractABI, err := abi.JSON(strings.NewReader(mocksgen.SimpleABI)) + if err != nil { + t.Fatalf("Error parsing contract ABI: %v", err) + } + data, err := contractABI.Pack("checkCalls", true, true, false, false, false, false) + if err != nil { + t.Fatalf("Error packing method's call data: %v", err) + } + nonce, err := builder.L2.Client.NonceAt(ctx, faucetL2Addr, nil) + if err != nil { + t.Fatalf("Error getting nonce at address: %v, error: %v", faucetL2Addr, err) + } + unsignedTx := types.NewTx(&types.ArbitrumUnsignedTx{ + ChainId: builder.L2Info.Signer.ChainID(), + From: faucetL2Addr, + Nonce: nonce, + GasFeeCap: builder.L2Info.GasPrice, + Gas: 1e6, + To: &contractAddr, + Value: common.Big0, + Data: data, + }) + + delayedInbox, err := bridgegen.NewInbox(builder.L1Info.GetAddress("Inbox"), builder.L1.Client) + if err != nil { + t.Fatalf("Error getting Go binding of L1 Inbox contract: %v", err) + } + + txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + l1tx, err := delayedInbox.SendUnsignedTransaction( + &txOpts, + arbmath.UintToBig(unsignedTx.Gas()), + unsignedTx.GasFeeCap(), + arbmath.UintToBig(unsignedTx.Nonce()), + *unsignedTx.To(), + unsignedTx.Value(), + unsignedTx.Data(), + ) + if err != nil { + t.Fatalf("Error sending unsigned transaction: %v", err) + } + receipt, err := builder.L1.EnsureTxSucceeded(l1tx) + if err != nil { + t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", l1tx.Hash(), err) + } + if receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("L1 transaction: %v has failed", l1tx.Hash()) + } + waitForL1DelayBlocks(t, ctx, builder) + receipt, err = builder.L2.EnsureTxSucceeded(unsignedTx) + if err != nil { + t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", unsignedTx.Hash(), err) + } + if receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("L2 transaction: %v has failed", receipt.TxHash) + } +} + +func TestRetryableSubmissionAndRedeemFees(t *testing.T) { + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + defer teardown() + infraFeeAddr, networkFeeAddr := setupFeeAddresses(t, ctx, builder) + + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + simpleAddr, simple := builder.L2.DeploySimple(t, ownerTxOpts) + simpleABI, err := mocksgen.SimpleMetaData.GetAbi() + Require(t, err) + + elevateL2Basefee(t, ctx, builder) + + infraBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) + Require(t, err) + networkBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) + Require(t, err) + + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") + deposit := arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) + callValue := common.Big0 + usertxoptsL1 := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + usertxoptsL1.Value = deposit + baseFee := builder.L2.GetBaseFee(t) + l1tx, err := delayedInbox.CreateRetryableTicket( + &usertxoptsL1, + simpleAddr, + callValue, + big.NewInt(1e16), + beneficiaryAddress, + beneficiaryAddress, + // send enough L2 gas for intrinsic but not compute + big.NewInt(int64(params.TxGas+params.TxDataNonZeroGasEIP2028*4)), + big.NewInt(baseFee.Int64()*2), + simpleABI.Methods["incrementRedeem"].ID, + ) + Require(t, err) + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) + Require(t, err) + if l1Receipt.Status != types.ReceiptStatusSuccessful { + Fatal(t, "l1Receipt indicated failure") + } + + waitForL1DelayBlocks(t, ctx, builder) + + submissionTxOuter := lookupL2Tx(l1Receipt) + submissionReceipt, err := builder.L2.EnsureTxSucceeded(submissionTxOuter) + Require(t, err) + if len(submissionReceipt.Logs) != 2 { + Fatal(t, len(submissionReceipt.Logs)) + } + ticketId := submissionReceipt.Logs[0].Topics[1] + firstRetryTxId := submissionReceipt.Logs[1].Topics[2] + // get receipt for the auto redeem, make sure it failed + autoRedeemReceipt, err := WaitForTx(ctx, builder.L2.Client, firstRetryTxId, time.Second*5) + Require(t, err) + if autoRedeemReceipt.Status != types.ReceiptStatusFailed { + Fatal(t, "first retry tx shouldn't have succeeded") + } + + infraBalanceAfterSubmission, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) + Require(t, err) + networkBalanceAfterSubmission, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) + Require(t, err) + + usertxoptsL2 := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), builder.L2.Client) + Require(t, err) + tx, err := arbRetryableTx.Redeem(&usertxoptsL2, ticketId) + Require(t, err) + redeemReceipt, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + retryTxId := redeemReceipt.Logs[0].Topics[2] + + // check the receipt for the retry + retryReceipt, err := WaitForTx(ctx, builder.L2.Client, retryTxId, time.Second*1) + Require(t, err) + if retryReceipt.Status != types.ReceiptStatusSuccessful { + Fatal(t, "retry failed") + } + + infraBalanceAfterRedeem, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) + Require(t, err) + networkBalanceAfterRedeem, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) + Require(t, err) + + // verify that the increment happened, so we know the retry succeeded + counter, err := simple.Counter(&bind.CallOpts{}) + Require(t, err) + + if counter != 1 { + Fatal(t, "Unexpected counter:", counter) + } + + if len(retryReceipt.Logs) != 1 { + Fatal(t, "Unexpected log count:", len(retryReceipt.Logs)) + } + parsed, err := simple.ParseRedeemedEvent(*retryReceipt.Logs[0]) + Require(t, err) + aliasedSender := util.RemapL1Address(usertxoptsL1.From) + if parsed.Caller != aliasedSender { + Fatal(t, "Unexpected caller", parsed.Caller, "expected", aliasedSender) + } + if parsed.Redeemer != usertxoptsL2.From { + Fatal(t, "Unexpected redeemer", parsed.Redeemer, "expected", usertxoptsL2.From) + } + + infraSubmissionFee := arbmath.BigSub(infraBalanceAfterSubmission, infraBalanceBefore) + networkSubmissionFee := arbmath.BigSub(networkBalanceAfterSubmission, networkBalanceBefore) + infraRedeemFee := arbmath.BigSub(infraBalanceAfterRedeem, infraBalanceAfterSubmission) + networkRedeemFee := arbmath.BigSub(networkBalanceAfterRedeem, networkBalanceAfterSubmission) + + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), builder.L2.Client) + Require(t, err) + minimumBaseFee, err := arbGasInfo.GetMinimumGasPrice(&bind.CallOpts{Context: ctx}) + Require(t, err) + submissionBaseFee := builder.L2.GetBaseFeeAt(t, submissionReceipt.BlockNumber) + submissionTx, ok := submissionTxOuter.GetInner().(*types.ArbitrumSubmitRetryableTx) + if !ok { + Fatal(t, "inner tx isn't ArbitrumSubmitRetryableTx") + } + // submission + auto redeemed retry expected fees + retryableSubmissionFee := retryables.RetryableSubmissionFee(len(submissionTx.RetryData), submissionTx.L1BaseFee) + expectedSubmissionFee := arbmath.BigMulByUint(submissionBaseFee, autoRedeemReceipt.GasUsed) + expectedInfraSubmissionFee := arbmath.BigMulByUint(minimumBaseFee, autoRedeemReceipt.GasUsed) + expectedNetworkSubmissionFee := arbmath.BigAdd( + arbmath.BigSub(expectedSubmissionFee, expectedInfraSubmissionFee), + retryableSubmissionFee, + ) + + retryTxOuter, _, err := builder.L2.Client.TransactionByHash(ctx, retryTxId) + Require(t, err) + retryTx, ok := retryTxOuter.GetInner().(*types.ArbitrumRetryTx) + if !ok { + Fatal(t, "inner tx isn't ArbitrumRetryTx") + } + redeemBaseFee := builder.L2.GetBaseFeeAt(t, redeemReceipt.BlockNumber) + + t.Log("redeem base fee:", redeemBaseFee) + // redeem & retry expected fees + redeemGasUsed := redeemReceipt.GasUsed - redeemReceipt.GasUsedForL1 - retryTx.Gas + retryReceipt.GasUsed + expectedRedeemFee := arbmath.BigMulByUint(redeemBaseFee, redeemGasUsed) + expectedInfraRedeemFee := arbmath.BigMulByUint(minimumBaseFee, redeemGasUsed) + expectedNetworkRedeemFee := arbmath.BigSub(expectedRedeemFee, expectedInfraRedeemFee) + + t.Log("submission gas: ", submissionReceipt.GasUsed) + t.Log("auto redeemed retry gas:", autoRedeemReceipt.GasUsed) + t.Log("redeem gas: ", redeemReceipt.GasUsed) + t.Log("retry gas: ", retryReceipt.GasUsed) + colors.PrintMint("submission and auto redeemed retry - paid infra fee: ", infraSubmissionFee) + colors.PrintBlue("submission and auto redeemed retry - expected infra fee: ", expectedInfraSubmissionFee) + colors.PrintMint("submission and auto redeemed retry - paid network fee: ", networkSubmissionFee) + colors.PrintBlue("submission and auto redeemed retry - expected network fee: ", expectedNetworkSubmissionFee) + colors.PrintMint("redeem and retry - paid infra fee: ", infraRedeemFee) + colors.PrintBlue("redeem and retry - expected infra fee: ", expectedInfraRedeemFee) + colors.PrintMint("redeem and retry - paid network fee: ", networkRedeemFee) + colors.PrintBlue("redeem and retry - expected network fee: ", expectedNetworkRedeemFee) + if !arbmath.BigEquals(infraSubmissionFee, expectedInfraSubmissionFee) { + Fatal(t, "Unexpected infra fee paid by submission and auto redeem, want:", expectedInfraSubmissionFee, "have:", infraSubmissionFee) + } + if !arbmath.BigEquals(networkSubmissionFee, expectedNetworkSubmissionFee) { + Fatal(t, "Unexpected network fee paid by submission and auto redeem, want:", expectedNetworkSubmissionFee, "have:", networkSubmissionFee) + } + if !arbmath.BigEquals(infraRedeemFee, expectedInfraRedeemFee) { + Fatal(t, "Unexpected infra fee paid by redeem and retry, want:", expectedInfraRedeemFee, "have:", infraRedeemFee) + } + if !arbmath.BigEquals(networkRedeemFee, expectedNetworkRedeemFee) { + Fatal(t, "Unexpected network fee paid by redeem and retry, want:", expectedNetworkRedeemFee, "have:", networkRedeemFee) + } +} + +// elevateL2Basefee by burning gas exceeding speed limit +func elevateL2Basefee(t *testing.T, ctx context.Context, builder *NodeBuilder) { + baseFeeBefore := builder.L2.GetBaseFee(t) + colors.PrintBlue("Elevating base fee...") + arbostestabi, err := precompilesgen.ArbosTestMetaData.GetAbi() + Require(t, err) + _, err = precompilesgen.NewArbosTest(common.HexToAddress("0x69"), builder.L2.Client) + Require(t, err, "failed to deploy ArbosTest") + + burnAmount := gethexec.ConfigDefaultTest().RPC.RPCGasCap + burnTarget := uint64(5 * l2pricing.InitialSpeedLimitPerSecondV6 * l2pricing.InitialBacklogTolerance) + for i := uint64(0); i < (burnTarget+burnAmount)/burnAmount; i++ { + burnArbGas := arbostestabi.Methods["burnArbGas"] + data, err := burnArbGas.Inputs.Pack(arbmath.UintToBig(burnAmount - builder.L2Info.TransferGas)) + Require(t, err) + input := append([]byte{}, burnArbGas.ID...) + input = append(input, data...) + to := common.HexToAddress("0x69") + tx := builder.L2Info.PrepareTxTo("Faucet", &to, burnAmount, big.NewInt(0), input) + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + } + baseFee := builder.L2.GetBaseFee(t) + colors.PrintBlue("New base fee: ", baseFee, " diff:", baseFee.Uint64()-baseFeeBefore.Uint64()) +} + +func setupFeeAddresses(t *testing.T, ctx context.Context, builder *NodeBuilder) (common.Address, common.Address) { + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + ownerCallOpts := builder.L2Info.GetDefaultCallOpts("Owner", ctx) + // make "Owner" a chain owner + arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) + Require(t, err, "failed to deploy ArbDebug") + tx, err := arbdebug.BecomeChainOwner(&ownerTxOpts) + Require(t, err, "failed to deploy ArbDebug") + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + arbowner, err := precompilesgen.NewArbOwner(common.HexToAddress("70"), builder.L2.Client) + Require(t, err) + arbownerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("6b"), builder.L2.Client) + Require(t, err) + builder.L2Info.GenerateAccount("InfraFee") + builder.L2Info.GenerateAccount("NetworkFee") + networkFeeAddr := builder.L2Info.GetAddress("NetworkFee") + infraFeeAddr := builder.L2Info.GetAddress("InfraFee") + tx, err = arbowner.SetNetworkFeeAccount(&ownerTxOpts, networkFeeAddr) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + networkFeeAccount, err := arbownerPublic.GetNetworkFeeAccount(ownerCallOpts) + Require(t, err) + tx, err = arbowner.SetInfraFeeAccount(&ownerTxOpts, infraFeeAddr) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + infraFeeAccount, err := arbownerPublic.GetInfraFeeAccount(ownerCallOpts) + Require(t, err) + t.Log("Infra fee account: ", infraFeeAccount) + t.Log("Network fee account: ", networkFeeAccount) + return infraFeeAddr, networkFeeAddr +} diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index c14a1e6aea..886a0528c7 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -14,7 +14,6 @@ import ( "github.com/go-redis/redis/v8" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -47,28 +46,30 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := arbnode.ConfigDefaultL2Test() - nodeConfig.SeqCoordinator.Enable = true - nodeConfig.SeqCoordinator.RedisUrl = redisutil.CreateTestRedis(ctx, t) + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + builder.nodeConfig.SeqCoordinator.Enable = true + builder.nodeConfig.SeqCoordinator.RedisUrl = redisutil.CreateTestRedis(ctx, t) - l2Info := NewArbTestInfo(t, params.ArbitrumDevTestChainConfig().ChainID) + l2Info := builder.L2Info // stdio protocol makes sure forwarder initialization doesn't fail nodeNames := []string{"stdio://A", "stdio://B", "stdio://C", "stdio://D", "stdio://E"} - nodes := make([]*arbnode.Node, len(nodeNames)) + testNodes := make([]*TestClient, len(nodeNames)) // init DB to known state - initRedisForTest(t, ctx, nodeConfig.SeqCoordinator.RedisUrl, nodeNames) + initRedisForTest(t, ctx, builder.nodeConfig.SeqCoordinator.RedisUrl, nodeNames) createStartNode := func(nodeNum int) { - nodeConfig.SeqCoordinator.MyUrlImpl = nodeNames[nodeNum] - _, node, _ := CreateTestL2WithConfig(t, ctx, l2Info, nodeConfig, nil, false) - nodes[nodeNum] = node + builder.nodeConfig.SeqCoordinator.MyUrl = nodeNames[nodeNum] + builder.L2Info = l2Info + builder.Build(t) + testNodes[nodeNum] = builder.L2 } trySequencing := func(nodeNum int) bool { - node := nodes[nodeNum] + node := testNodes[nodeNum].ConsensusNode curMsgs, err := node.TxStreamer.GetMessageCountSync(t) Require(t, err) emptyMessage := arbostypes.MessageWithMetadata{ @@ -97,14 +98,15 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { // node(n) has higher prio than node(n+1), so should be impossible for more than one to succeed trySequencingEverywhere := func() int { succeeded := -1 - for nodeNum, node := range nodes { + for nodeNum, testNode := range testNodes { + node := testNode.ConsensusNode if node == nil { continue } if trySequencing(nodeNum) { if succeeded >= 0 { t.Fatal("sequnced succeeded in parallel", - "index1:", succeeded, "debug", nodes[succeeded].SeqCoordinator.DebugPrint(), + "index1:", succeeded, "debug", testNodes[succeeded].ConsensusNode.SeqCoordinator.DebugPrint(), "index2:", nodeNum, "debug", node.SeqCoordinator.DebugPrint(), "now", time.Now().UnixMilli()) } @@ -115,7 +117,8 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { } waitForMsgEverywhere := func(msgNum arbutil.MessageIndex) { - for _, currentNode := range nodes { + for _, testNode := range testNodes { + currentNode := testNode.ConsensusNode if currentNode == nil { continue } @@ -128,7 +131,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { if attempts > 10 { Fatal(t, "timeout waiting for msg ", msgNum, " debug: ", currentNode.SeqCoordinator.DebugPrint()) } - <-time.After(nodeConfig.SeqCoordinator.UpdateInterval / 3) + <-time.After(builder.nodeConfig.SeqCoordinator.UpdateInterval / 3) } } } @@ -136,16 +139,16 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { var needsStop []*arbnode.Node killNode := func(nodeNum int) { if nodeNum%3 == 0 { - nodes[nodeNum].SeqCoordinator.PrepareForShutdown() - needsStop = append(needsStop, nodes[nodeNum]) + testNodes[nodeNum].ConsensusNode.SeqCoordinator.PrepareForShutdown() + needsStop = append(needsStop, testNodes[nodeNum].ConsensusNode) } else { - nodes[nodeNum].StopAndWait() + testNodes[nodeNum].ConsensusNode.StopAndWait() } - nodes[nodeNum] = nil + testNodes[nodeNum].ConsensusNode = nil } nodeForwardTarget := func(nodeNum int) int { - execNode := getExecNode(t, nodes[nodeNum]) + execNode := testNodes[nodeNum].ExecNode fwTarget := execNode.TxPublisher.(*gethexec.TxPreChecker).TransactionPublisher.(*gethexec.Sequencer).ForwardTarget() if fwTarget == "" { return -1 @@ -177,7 +180,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { t.Log("Starting other nodes") - for i := 1; i < len(nodes); i++ { + for i := 1; i < len(testNodes); i++ { createStartNode(i) } @@ -188,7 +191,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { for { // all remaining nodes know which is the chosen one - for i := currentSequencer + 1; i < len(nodes); i++ { + for i := currentSequencer + 1; i < len(testNodes); i++ { for attempts := 1; nodeForwardTarget(i) != currentSequencer; attempts++ { if attempts > 10 { t.Fatal("initial forward target not set") @@ -197,7 +200,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { } } - // sequencing suceeds only on the leder + // sequencing succeeds only on the leder for i := arbutil.MessageIndex(0); i < messagesPerRound; i++ { if sequencer := trySequencingEverywhere(); sequencer != currentSequencer { Fatal(t, "unexpected sequencer. expected: ", currentSequencer, " got ", sequencer) @@ -205,7 +208,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { sequencedMesssages++ } - if currentSequencer == len(nodes)-1 { + if currentSequencer == len(testNodes)-1 { addNodes = true } if addNodes { @@ -232,7 +235,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { } if sequencer == -1 || (addNodes && (sequencer == currentSequencer+1)) { - time.Sleep(nodeConfig.SeqCoordinator.LockoutDuration / 5) + time.Sleep(builder.nodeConfig.SeqCoordinator.LockoutDuration / 5) continue } if sequencer == currentSequencer { @@ -257,7 +260,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { waitForMsgEverywhere(sequencedMesssages) } - for nodeNum := range nodes { + for nodeNum := range testNodes { killNode(nodeNum) } for _, node := range needsStop { @@ -270,21 +273,19 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.SeqCoordinator.Enable = true - nodeConfig.SeqCoordinator.RedisUrl = redisutil.CreateTestRedis(ctx, t) - nodeConfig.BatchPoster.Enable = false + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.SeqCoordinator.Enable = true + builder.nodeConfig.SeqCoordinator.RedisUrl = redisutil.CreateTestRedis(ctx, t) + builder.nodeConfig.BatchPoster.Enable = false nodeNames := []string{"stdio://A", "stdio://B"} + initRedisForTest(t, ctx, builder.nodeConfig.SeqCoordinator.RedisUrl, nodeNames) + builder.nodeConfig.SeqCoordinator.MyUrl = nodeNames[0] - initRedisForTest(t, ctx, nodeConfig.SeqCoordinator.RedisUrl, nodeNames) + cleanup := builder.Build(t) + defer cleanup() - nodeConfig.SeqCoordinator.MyUrlImpl = nodeNames[0] - l2Info, nodeA, clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, params.ArbitrumDevTestChainConfig(), nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() - - redisClient, err := redisutil.RedisClientFromURL(nodeConfig.SeqCoordinator.RedisUrl) + redisClient, err := redisutil.RedisClientFromURL(builder.nodeConfig.SeqCoordinator.RedisUrl) Require(t, err) defer redisClient.Close() @@ -292,44 +293,45 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { for { err := redisClient.Get(ctx, redisutil.CHOSENSEQ_KEY).Err() if errors.Is(err, redis.Nil) { - time.Sleep(nodeConfig.SeqCoordinator.UpdateInterval) + time.Sleep(builder.nodeConfig.SeqCoordinator.UpdateInterval) continue } Require(t, err) break } - l2Info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - nodeConfigDup := *nodeConfig - nodeConfig = &nodeConfigDup + nodeConfigDup := *builder.nodeConfig + builder.nodeConfig = &nodeConfigDup - nodeConfig.SeqCoordinator.MyUrlImpl = nodeNames[1] + builder.nodeConfig.SeqCoordinator.MyUrl = nodeNames[1] if !successCase { - nodeConfig.SeqCoordinator.Signing.ECDSA.AcceptSequencer = false - nodeConfig.SeqCoordinator.Signing.ECDSA.AllowedAddresses = []string{l2Info.GetAddress("User2").Hex()} + builder.nodeConfig.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false + builder.nodeConfig.SeqCoordinator.Signer.ECDSA.AllowedAddresses = []string{builder.L2Info.GetAddress("User2").Hex()} } - clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2Info.ArbInitData, nodeConfig, nil, nil) - defer nodeB.StopAndWait() - tx := l2Info.PrepareTx("Owner", "User2", l2Info.TransferGas, big.NewInt(1e12), nil) + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: builder.nodeConfig}) + defer cleanupB() + + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) - err = clientA.SendTransaction(ctx, tx) + err = builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) if successCase { - _, err = WaitForTx(ctx, clientB, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, testClientB.Client, tx.Hash(), time.Second*5) Require(t, err) - l2balance, err := clientB.BalanceAt(ctx, l2Info.GetAddress("User2"), nil) + l2balance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) } } else { - _, err = WaitForTx(ctx, clientB, tx.Hash(), time.Second) + _, err = WaitForTx(ctx, testClientB.Client, tx.Hash(), time.Second) if err == nil { Fatal(t, "tx received by node with different seq coordinator signing key") } diff --git a/system_tests/seq_nonce_test.go b/system_tests/seq_nonce_test.go index d70f47a146..f0e3dcffd7 100644 --- a/system_tests/seq_nonce_test.go +++ b/system_tests/seq_nonce_test.go @@ -15,7 +15,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -24,12 +23,13 @@ func TestSequencerParallelNonces(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := gethexec.ConfigDefaultTest() - config.Sequencer.NonceFailureCacheExpiry = time.Minute - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + builder.execConfig.Sequencer.NonceFailureCacheExpiry = time.Minute + cleanup := builder.Build(t) + defer cleanup() - l2info.GenerateAccount("Destination") + builder.L2Info.GenerateAccount("Destination") wg := sync.WaitGroup{} for thread := 0; thread < 10; thread++ { @@ -37,11 +37,11 @@ func TestSequencerParallelNonces(t *testing.T) { go func() { defer wg.Done() for i := 0; i < 10; i++ { - tx := l2info.PrepareTx("Owner", "Destination", l2info.TransferGas, common.Big1, nil) + tx := builder.L2Info.PrepareTx("Owner", "Destination", builder.L2Info.TransferGas, common.Big1, nil) // Sleep a random amount of time up to 20 milliseconds time.Sleep(time.Millisecond * time.Duration(rand.Intn(20))) t.Log("Submitting transaction with nonce", tx.Nonce()) - err := client.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) t.Log("Got response for transaction with nonce", tx.Nonce()) } @@ -49,8 +49,8 @@ func TestSequencerParallelNonces(t *testing.T) { } wg.Wait() - addr := l2info.GetAddress("Destination") - balance, err := client.BalanceAt(ctx, addr, nil) + addr := builder.L2Info.GetAddress("Destination") + balance, err := builder.L2.Client.BalanceAt(ctx, addr, nil) Require(t, err) if !arbmath.BigEquals(balance, big.NewInt(100)) { Fatal(t, "Unexpected user balance", balance) @@ -62,15 +62,16 @@ func TestSequencerNonceTooHigh(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := gethexec.ConfigDefaultTest() - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + cleanup := builder.Build(t) + defer cleanup() - l2info.GetInfoWithPrivKey("Owner").Nonce++ + builder.L2Info.GetInfoWithPrivKey("Owner").Nonce++ before := time.Now() - tx := l2info.PrepareTx("Owner", "Owner", l2info.TransferGas, common.Big0, nil) - err := client.SendTransaction(ctx, tx) + tx := builder.L2Info.PrepareTx("Owner", "Owner", builder.L2Info.TransferGas, common.Big0, nil) + err := builder.L2.Client.SendTransaction(ctx, tx) if err == nil { Fatal(t, "No error when nonce was too high") } @@ -78,7 +79,7 @@ func TestSequencerNonceTooHigh(t *testing.T) { Fatal(t, "Unexpected transaction error", err) } elapsed := time.Since(before) - if elapsed > 2*config.Sequencer.NonceFailureCacheExpiry { + if elapsed > 2*builder.execConfig.Sequencer.NonceFailureCacheExpiry { Fatal(t, "Sequencer took too long to respond with nonce too high") } } @@ -88,19 +89,20 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := gethexec.ConfigDefaultTest() - config.Sequencer.NonceFailureCacheSize = 5 - config.Sequencer.NonceFailureCacheExpiry = time.Minute - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + builder.execConfig.Sequencer.NonceFailureCacheSize = 5 + builder.execConfig.Sequencer.NonceFailureCacheExpiry = time.Minute + cleanup := builder.Build(t) + defer cleanup() count := 15 var completed uint64 for i := 0; i < count; i++ { - l2info.GetInfoWithPrivKey("Owner").Nonce++ - tx := l2info.PrepareTx("Owner", "Owner", l2info.TransferGas, common.Big0, nil) + builder.L2Info.GetInfoWithPrivKey("Owner").Nonce++ + tx := builder.L2Info.PrepareTx("Owner", "Owner", builder.L2Info.TransferGas, common.Big0, nil) go func() { - err := client.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) if err == nil { Fatal(t, "No error when nonce was too high") } @@ -110,7 +112,7 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) { for wait := 9; wait >= 0; wait-- { got := int(atomic.LoadUint64(&completed)) - expected := count - config.Sequencer.NonceFailureCacheSize + expected := count - builder.execConfig.Sequencer.NonceFailureCacheSize if got == expected { break } diff --git a/system_tests/seq_pause_test.go b/system_tests/seq_pause_test.go index 3817768517..6ce464d8da 100644 --- a/system_tests/seq_pause_test.go +++ b/system_tests/seq_pause_test.go @@ -16,13 +16,13 @@ func TestSequencerPause(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info1, nodeA, client := CreateTestL2(t, ctx) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() const numUsers = 100 - execA := getExecNode(t, nodeA) - prechecker, ok := execA.TxPublisher.(*gethexec.TxPreChecker) + prechecker, ok := builder.L2.ExecNode.TxPublisher.(*gethexec.TxPreChecker) if !ok { t.Error("prechecker not found on node") } @@ -35,15 +35,15 @@ func TestSequencerPause(t *testing.T) { for num := 0; num < numUsers; num++ { userName := fmt.Sprintf("My_User_%d", num) - l2info1.GenerateAccount(userName) + builder.L2Info.GenerateAccount(userName) users = append(users, userName) } for _, userName := range users { - tx := l2info1.PrepareTx("Owner", userName, l2info1.TransferGas, big.NewInt(1e16), nil) - err := client.SendTransaction(ctx, tx) + tx := builder.L2Info.PrepareTx("Owner", userName, builder.L2Info.TransferGas, big.NewInt(1e16), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } @@ -52,7 +52,7 @@ func TestSequencerPause(t *testing.T) { var txs types.Transactions for _, userName := range users { - tx := l2info1.PrepareTx(userName, "Owner", l2info1.TransferGas, big.NewInt(2), nil) + tx := builder.L2Info.PrepareTx(userName, "Owner", builder.L2Info.TransferGas, big.NewInt(2), nil) txs = append(txs, tx) } @@ -63,7 +63,7 @@ func TestSequencerPause(t *testing.T) { }(tx) } - _, err := EnsureTxSucceededWithTimeout(ctx, client, txs[0], time.Second) + _, err := builder.L2.EnsureTxSucceededWithTimeout(txs[0], time.Second) if err == nil { t.Error("tx passed while sequencer paused") } @@ -71,7 +71,7 @@ func TestSequencerPause(t *testing.T) { sequencer.Activate() for _, tx := range txs { - _, err := EnsureTxSucceeded(ctx, client, tx) + _, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) } } diff --git a/system_tests/seq_reject_test.go b/system_tests/seq_reject_test.go index 34a14c660e..76bdfc2612 100644 --- a/system_tests/seq_reject_test.go +++ b/system_tests/seq_reject_test.go @@ -17,7 +17,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/colors" @@ -28,21 +27,21 @@ func TestSequencerRejection(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - seqNodeConfig := arbnode.ConfigDefaultL2Test() - seqNodeConfig.Feed.Output = *newBroadcasterConfigTest() feedErrChan := make(chan error, 10) - l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, nil, true) - defer nodeA.StopAndWait() - - clientNodeConfig := arbnode.ConfigDefaultL2Test() - port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port - clientNodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) - - _, nodeB, client2 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, nil, false) - defer nodeB.StopAndWait() - - auth := l2info1.GetDefaultTransactOpts("Owner", ctx) - simpleAddr, _ := deploySimple(t, ctx, auth, client1) + builderSeq := NewNodeBuilder(ctx).DefaultConfig(t, false) + builderSeq.nodeConfig.Feed.Output = *newBroadcasterConfigTest() + cleanupSeq := builderSeq.Build(t) + defer cleanupSeq() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + port := builderSeq.L2.ConsensusNode.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port + builder.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) + cleanup := builder.Build(t) + defer cleanup() + + auth := builderSeq.L2Info.GetDefaultTransactOpts("Owner", ctx) + simpleAddr, _ := builderSeq.L2.DeploySimple(t, auth) simpleAbi, err := mocksgen.SimpleMetaData.GetAbi() Require(t, err) noopId := simpleAbi.Methods["noop"].ID @@ -51,7 +50,7 @@ func TestSequencerRejection(t *testing.T) { // Generate the accounts before hand to avoid races for user := 0; user < 9; user++ { name := fmt.Sprintf("User%v", user) - l2info1.GenerateAccount(name) + builderSeq.L2Info.GenerateAccount(name) } wg := sync.WaitGroup{} @@ -59,24 +58,24 @@ func TestSequencerRejection(t *testing.T) { for user := 0; user < 9; user++ { user := user name := fmt.Sprintf("User%v", user) - tx := l2info1.PrepareTx("Owner", name, l2info1.TransferGas, big.NewInt(params.Ether), nil) + tx := builderSeq.L2Info.PrepareTx("Owner", name, builderSeq.L2Info.TransferGas, big.NewInt(params.Ether), nil) - err := client1.SendTransaction(ctx, tx) + err := builderSeq.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client1, tx) + _, err = builderSeq.L2.EnsureTxSucceeded(tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client2, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) wg.Add(1) go func() { defer wg.Done() - info := l2info1.GetInfoWithPrivKey(name) + info := builderSeq.L2Info.GetInfoWithPrivKey(name) txData := &types.DynamicFeeTx{ To: &simpleAddr, - Gas: l2info1.TransferGas + 10000, - GasFeeCap: arbmath.BigMulByUint(l2info1.GasPrice, 100), + Gas: builderSeq.L2Info.TransferGas + 10000, + GasFeeCap: arbmath.BigMulByUint(builderSeq.L2Info.GasPrice, 100), Value: common.Big0, } for atomic.LoadInt32(&stopBackground) == 0 { @@ -92,8 +91,8 @@ func TestSequencerRejection(t *testing.T) { txData.Nonce = 1 << 32 expectedErr = "nonce too high" } - tx = l2info1.SignTxAs(name, txData) - err = client1.SendTransaction(ctx, tx) + tx = builderSeq.L2Info.SignTxAs(name, txData) + err = builderSeq.L2.Client.SendTransaction(ctx, tx) if err != nil && (expectedErr == "" || !strings.Contains(err.Error(), expectedErr)) { Require(t, err, "failed to send tx for user", user) } @@ -102,7 +101,7 @@ func TestSequencerRejection(t *testing.T) { } for i := 100; i >= 0; i-- { - block, err := client1.BlockNumber(ctx) + block, err := builderSeq.L2.Client.BlockNumber(ctx) Require(t, err) if block >= 200 { break @@ -120,11 +119,11 @@ func TestSequencerRejection(t *testing.T) { atomic.StoreInt32(&stopBackground, 1) wg.Wait() - header1, err := client1.HeaderByNumber(ctx, nil) + header1, err := builderSeq.L2.Client.HeaderByNumber(ctx, nil) Require(t, err) for i := 100; i >= 0; i-- { - header2, err := client2.HeaderByNumber(ctx, header1.Number) + header2, err := builder.L2.Client.HeaderByNumber(ctx, header1.Number) if err != nil { select { case err := <-feedErrChan: @@ -132,7 +131,7 @@ func TestSequencerRejection(t *testing.T) { case <-time.After(time.Millisecond * 100): } if i == 0 { - client2Block, _ := client2.BlockNumber(ctx) + client2Block, _ := builder.L2.Client.BlockNumber(ctx) Fatal(t, "client2 failed to reach client1 block ", header1.Number, ", only reached block", client2Block) } continue diff --git a/system_tests/seq_whitelist_test.go b/system_tests/seq_whitelist_test.go index 36e309a5d7..efa30171ac 100644 --- a/system_tests/seq_whitelist_test.go +++ b/system_tests/seq_whitelist_test.go @@ -9,31 +9,30 @@ import ( "testing" "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/execution/gethexec" ) func TestSequencerWhitelist(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := gethexec.ConfigDefaultTest() - config.Sequencer.SenderWhitelist = GetTestAddressForAccountName(t, "Owner").String() + "," + GetTestAddressForAccountName(t, "User").String() - l2info, l2node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, true) - defer l2node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.execConfig.Sequencer.SenderWhitelist = GetTestAddressForAccountName(t, "Owner").String() + "," + GetTestAddressForAccountName(t, "User").String() + cleanup := builder.Build(t) + defer cleanup() - l2info.GenerateAccount("User") - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User") + builder.L2Info.GenerateAccount("User2") // Owner is on the whitelist - TransferBalance(t, "Owner", "User", big.NewInt(params.Ether), l2info, client, ctx) - TransferBalance(t, "Owner", "User2", big.NewInt(params.Ether), l2info, client, ctx) + builder.L2.TransferBalance(t, "Owner", "User", big.NewInt(params.Ether), builder.L2Info) + builder.L2.TransferBalance(t, "Owner", "User2", big.NewInt(params.Ether), builder.L2Info) // User is on the whitelist - TransferBalance(t, "User", "User2", big.NewInt(params.Ether/10), l2info, client, ctx) + builder.L2.TransferBalance(t, "User", "User2", big.NewInt(params.Ether/10), builder.L2Info) // User2 is *not* on the whitelist, therefore this should fail - tx := l2info.PrepareTx("User2", "User", l2info.TransferGas, big.NewInt(params.Ether/10), nil) - err := client.SendTransaction(ctx, tx) + tx := builder.L2Info.PrepareTx("User2", "User", builder.L2Info.TransferGas, big.NewInt(params.Ether/10), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) if err == nil { Fatal(t, "transaction from user not on whitelist accepted") } diff --git a/system_tests/seqcompensation_test.go b/system_tests/seqcompensation_test.go index 362acf6a30..156ced6bfc 100644 --- a/system_tests/seqcompensation_test.go +++ b/system_tests/seqcompensation_test.go @@ -18,19 +18,19 @@ func TestSequencerCompensation(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil) - defer nodeB.StopAndWait() + TestClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) - err := l2clientA.SendTransaction(ctx, tx) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) // give the inbox reader a bit of time to pick up the delayed message @@ -38,22 +38,22 @@ func TestSequencerCompensation(t *testing.T) { // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } - _, err = WaitForTx(ctx, l2clientB, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, TestClientB.Client, tx.Hash(), time.Second*5) Require(t, err) // clientB sees balance means sequencer message was sent - l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := TestClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { Fatal(t, "Unexpected balance:", l2balance) } - initialSeqBalance, err := l2clientB.BalanceAt(ctx, l1pricing.BatchPosterAddress, big.NewInt(0)) + initialSeqBalance, err := TestClientB.Client.BalanceAt(ctx, l1pricing.BatchPosterAddress, big.NewInt(0)) Require(t, err) if initialSeqBalance.Sign() != 0 { Fatal(t, "Unexpected initial sequencer balance:", initialSeqBalance) diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index bb3c6e9fcd..611f15b00a 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -13,7 +13,6 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/broadcastclient" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/relay" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/wsbroadcastserver" @@ -28,9 +27,9 @@ func newBroadcasterConfigTest() *wsbroadcastserver.BroadcasterConfig { func newBroadcastClientConfigTest(port int) *broadcastclient.Config { return &broadcastclient.Config{ - URLs: []string{fmt.Sprintf("ws://localhost:%d/feed", port)}, + URL: []string{fmt.Sprintf("ws://localhost:%d/feed", port)}, Timeout: 200 * time.Millisecond, - Verifier: signature.VerifierConfig{ + Verify: signature.VerifierConfig{ Dangerous: signature.DangerousVerifierConfig{ AcceptMissing: true, }, @@ -43,30 +42,33 @@ func TestSequencerFeed(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - seqNodeConfig := arbnode.ConfigDefaultL2Test() - seqNodeConfig.Feed.Output = *newBroadcasterConfigTest() - l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, nil, true) - defer nodeA.StopAndWait() - clientNodeConfig := arbnode.ConfigDefaultL2Test() - port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port - clientNodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) + builderSeq := NewNodeBuilder(ctx).DefaultConfig(t, false) + builderSeq.nodeConfig.Feed.Output = *newBroadcasterConfigTest() + cleanupSeq := builderSeq.Build(t) + defer cleanupSeq() + seqInfo, seqNode, seqClient := builderSeq.L2Info, builderSeq.L2.ConsensusNode, builderSeq.L2.Client - _, nodeB, client2 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, nil, false) - defer nodeB.StopAndWait() + port := seqNode.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) + builder.takeOwnership = false + cleanup := builder.Build(t) + defer cleanup() + client := builder.L2.Client - l2info1.GenerateAccount("User2") + seqInfo.GenerateAccount("User2") - tx := l2info1.PrepareTx("Owner", "User2", l2info1.TransferGas, big.NewInt(1e12), nil) + tx := seqInfo.PrepareTx("Owner", "User2", seqInfo.TransferGas, big.NewInt(1e12), nil) - err := client1.SendTransaction(ctx, tx) + err := seqClient.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client1, tx) + _, err = builderSeq.L2.EnsureTxSucceeded(tx) Require(t, err) - _, err = WaitForTx(ctx, client2, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, client, tx.Hash(), time.Second*5) Require(t, err) - l2balance, err := client2.BalanceAt(ctx, l2info1.GetAddress("User2"), nil) + l2balance, err := client.BalanceAt(ctx, seqInfo.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) @@ -78,19 +80,20 @@ func TestRelayedSequencerFeed(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - seqNodeConfig := arbnode.ConfigDefaultL2Test() - seqNodeConfig.Feed.Output = *newBroadcasterConfigTest() - l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, nil, true) - defer nodeA.StopAndWait() + builderSeq := NewNodeBuilder(ctx).DefaultConfig(t, false) + builderSeq.nodeConfig.Feed.Output = *newBroadcasterConfigTest() + cleanupSeq := builderSeq.Build(t) + defer cleanupSeq() + seqInfo, seqNode, seqClient := builderSeq.L2Info, builderSeq.L2.ConsensusNode, builderSeq.L2.Client - bigChainId, err := client1.ChainID(ctx) + bigChainId, err := seqClient.ChainID(ctx) Require(t, err) config := relay.ConfigDefault - port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port + port := seqNode.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port config.Node.Feed.Input = *newBroadcastClientConfigTest(port) config.Node.Feed.Output = *newBroadcasterConfigTest() - config.L2.ChainId = bigChainId.Uint64() + config.Chain.ID = bigChainId.Uint64() feedErrChan := make(chan error, 10) currentRelay, err := relay.NewRelay(&config, feedErrChan) @@ -99,25 +102,28 @@ func TestRelayedSequencerFeed(t *testing.T) { Require(t, err) defer currentRelay.StopAndWait() - clientNodeConfig := arbnode.ConfigDefaultL2Test() port = currentRelay.GetListenerAddr().(*net.TCPAddr).Port - clientNodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) - _, nodeC, client3 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, nil, false) - defer nodeC.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) + builder.takeOwnership = false + cleanup := builder.Build(t) + defer cleanup() + node, exec, client := builder.L2.ConsensusNode, builder.L2.ExecNode, builder.L2.Client + StartWatchChanErr(t, ctx, feedErrChan, node, exec) - l2info1.GenerateAccount("User2") + seqInfo.GenerateAccount("User2") - tx := l2info1.PrepareTx("Owner", "User2", l2info1.TransferGas, big.NewInt(1e12), nil) + tx := seqInfo.PrepareTx("Owner", "User2", seqInfo.TransferGas, big.NewInt(1e12), nil) - err = client1.SendTransaction(ctx, tx) + err = seqClient.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client1, tx) + _, err = builderSeq.L2.EnsureTxSucceeded(tx) Require(t, err) - _, err = WaitForTx(ctx, client3, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, client, tx.Hash(), time.Second*5) Require(t, err) - l2balance, err := client3.BalanceAt(ctx, l2info1.GetAddress("User2"), nil) + l2balance, err := client.BalanceAt(ctx, seqInfo.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) @@ -135,20 +141,26 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigA.BatchPoster.Enable = true nodeConfigA.Feed.Output.Enable = false - l2infoA, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfigA, nil, chainConfig, nil) - defer requireClose(t, l1stack, "unable to close l1stack") - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig = nodeConfigA + builder.chainConfig = chainConfig + builder.L2Info = nil + cleanup := builder.Build(t) + defer cleanup() - authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) + l2clientA := builder.L2.Client + + authorizeDASKeyset(t, ctx, dasSignerKey, builder.L1Info, builder.L1.Client) // The lying sequencer nodeConfigC := arbnode.ConfigDefaultL1Test() nodeConfigC.BatchPoster.Enable = false nodeConfigC.DataAvailability = nodeConfigA.DataAvailability - nodeConfigC.DataAvailability.AggregatorConfig.Enable = false + nodeConfigC.DataAvailability.RPCAggregator.Enable = false nodeConfigC.Feed.Output = *newBroadcasterConfigTest() - l2clientC, nodeC := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigC, gethexec.ConfigDefaultTest(), nil) - defer nodeC.StopAndWait() + testClientC, cleanupC := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfigC}) + defer cleanupC() + l2clientC, nodeC := testClientC.Client, testClientC.ConsensusNode port := nodeC.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port @@ -157,16 +169,17 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigB.Feed.Output.Enable = false nodeConfigB.Feed.Input = *newBroadcastClientConfigTest(port) nodeConfigB.DataAvailability = nodeConfigA.DataAvailability - nodeConfigB.DataAvailability.AggregatorConfig.Enable = false - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigB, nil, nil) - defer nodeB.StopAndWait() + nodeConfigB.DataAvailability.RPCAggregator.Enable = false + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfigB}) + defer cleanupB() + l2clientB := testClientB.Client - l2infoA.GenerateAccount("FraudUser") - l2infoA.GenerateAccount("RealUser") + builder.L2Info.GenerateAccount("FraudUser") + builder.L2Info.GenerateAccount("RealUser") - fraudTx := l2infoA.PrepareTx("Owner", "FraudUser", l2infoA.TransferGas, big.NewInt(1e12), nil) - l2infoA.GetInfoWithPrivKey("Owner").Nonce -= 1 // Use same l2info object for different l2s - realTx := l2infoA.PrepareTx("Owner", "RealUser", l2infoA.TransferGas, big.NewInt(1e12), nil) + fraudTx := builder.L2Info.PrepareTx("Owner", "FraudUser", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + builder.L2Info.GetInfoWithPrivKey("Owner").Nonce -= 1 // Use same l2info object for different l2s + realTx := builder.L2Info.PrepareTx("Owner", "RealUser", builder.L2Info.TransferGas, big.NewInt(1e12), nil) for i := 0; i < 10; i++ { err := l2clientC.SendTransaction(ctx, fraudTx) @@ -179,7 +192,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { } } - _, err := EnsureTxSucceeded(ctx, l2clientC, fraudTx) + _, err := testClientC.EnsureTxSucceeded(fraudTx) if err != nil { t.Fatal("error ensuring fraud transaction succeeded:", err) } @@ -189,7 +202,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { if err != nil { t.Fatal("error waiting for tx:", err) } - l2balance, err := l2clientB.BalanceAt(ctx, l2infoA.GetAddress("FraudUser"), nil) + l2balance, err := l2clientB.BalanceAt(ctx, builder.L2Info.GetAddress("FraudUser"), nil) if err != nil { t.Fatal("error getting balance:", err) } @@ -203,7 +216,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { t.Fatal("error sending real transaction:", err) } - _, err = EnsureTxSucceeded(ctx, l2clientA, realTx) + _, err = builder.L2.EnsureTxSucceeded(realTx) if err != nil { t.Fatal("error ensuring real transaction succeeded:", err) } @@ -213,7 +226,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { if err != nil { t.Fatal("error waiting for transaction to get to node b:", err) } - l2balanceFraudAcct, err := l2clientB.BalanceAt(ctx, l2infoA.GetAddress("FraudUser"), nil) + l2balanceFraudAcct, err := l2clientB.BalanceAt(ctx, builder.L2Info.GetAddress("FraudUser"), nil) if err != nil { t.Fatal("error getting fraud balance:", err) } @@ -221,7 +234,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { t.Fatal("Unexpected balance (fraud acct should be empty) was:", l2balanceFraudAcct) } - l2balanceRealAcct, err := l2clientB.BalanceAt(ctx, l2infoA.GetAddress("RealUser"), nil) + l2balanceRealAcct, err := l2clientB.BalanceAt(ctx, builder.L2Info.GetAddress("RealUser"), nil) if err != nil { t.Fatal("error getting real balance:", err) } diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index feabc74bbb..69aeab0c83 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -6,15 +6,19 @@ package arbtest import ( "bytes" "context" + "errors" "fmt" "math/big" "math/rand" "testing" "time" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient/gethclient" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" @@ -38,28 +42,128 @@ type blockTestState struct { const seqInboxTestIters = 40 +func encodeAddBatch(seqABI *abi.ABI, seqNum *big.Int, message []byte, afterDelayedMsgRead *big.Int, gasRefunder common.Address) ([]byte, error) { + method, ok := seqABI.Methods["addSequencerL2BatchFromOrigin0"] + if !ok { + return nil, errors.New("failed to find add addSequencerL2BatchFromOrigin0 method") + } + inputData, err := method.Inputs.Pack( + seqNum, + message, + afterDelayedMsgRead, + gasRefunder, + new(big.Int).SetUint64(uint64(1)), + new(big.Int).SetUint64(uint64(1)), + ) + if err != nil { + return nil, err + } + fullData := append([]byte{}, method.ID...) + fullData = append(fullData, inputData...) + return fullData, nil +} +func diffAccessList(accessed, al types.AccessList) string { + m := make(map[common.Address]map[common.Hash]bool) + for i := 0; i < len(al); i++ { + if _, ok := m[al[i].Address]; !ok { + m[al[i].Address] = make(map[common.Hash]bool) + } + for _, slot := range al[i].StorageKeys { + m[al[i].Address][slot] = true + } + } + + diff := "" + for i := 0; i < len(accessed); i++ { + addr := accessed[i].Address + if _, ok := m[addr]; !ok { + diff += fmt.Sprintf("contract address: %q wasn't accessed\n", addr) + continue + } + for j := 0; j < len(accessed[i].StorageKeys); j++ { + slot := accessed[i].StorageKeys[j] + if _, ok := m[addr][slot]; !ok { + diff += fmt.Sprintf("storage slot: %v for contract: %v wasn't accessed\n", slot, addr) + } + } + } + return diff +} + +func deployGasRefunder(ctx context.Context, t *testing.T, builder *NodeBuilder) common.Address { + t.Helper() + abi, err := bridgegen.GasRefunderMetaData.GetAbi() + if err != nil { + t.Fatalf("Error getting gas refunder abi: %v", err) + } + fauOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + addr, tx, _, err := bind.DeployContract(&fauOpts, *abi, common.FromHex(bridgegen.GasRefunderBin), builder.L1.Client) + if err != nil { + t.Fatalf("Error getting gas refunder contract deployment transaction: %v", err) + } + if _, err := builder.L1.EnsureTxSucceeded(tx); err != nil { + t.Fatalf("Error deploying gas refunder contract: %v", err) + } + tx = builder.L1Info.PrepareTxTo("Faucet", &addr, 30000, big.NewInt(9223372036854775807), nil) + if err := builder.L1.Client.SendTransaction(ctx, tx); err != nil { + t.Fatalf("Error sending gas refunder funding transaction") + } + if _, err := builder.L1.EnsureTxSucceeded(tx); err != nil { + t.Fatalf("Error funding gas refunder") + } + contract, err := bridgegen.NewGasRefunder(addr, builder.L1.Client) + if err != nil { + t.Fatalf("Error getting gas refunder contract binding: %v", err) + } + tx, err = contract.AllowContracts(&fauOpts, []common.Address{builder.L1Info.GetAddress("SequencerInbox")}) + if err != nil { + t.Fatalf("Error creating transaction for altering allowlist in refunder: %v", err) + } + if _, err := builder.L1.EnsureTxSucceeded(tx); err != nil { + t.Fatalf("Error addting sequencer inbox in gas refunder allowlist: %v", err) + } + + tx, err = contract.AllowRefundees(&fauOpts, []common.Address{builder.L1Info.GetAddress("Sequencer")}) + if err != nil { + t.Fatalf("Error creating transaction for altering allowlist in refunder: %v", err) + } + if _, err := builder.L1.EnsureTxSucceeded(tx); err != nil { + t.Fatalf("Error addting sequencer in gas refunder allowlist: %v", err) + } + return addr +} + func testSequencerInboxReaderImpl(t *testing.T, validator bool) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := arbnode.ConfigDefaultL1Test() - conf.InboxReader.HardReorg = true + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.InboxReader.HardReorg = true if validator { - conf.BlockValidator.Enable = true + builder.nodeConfig.BlockValidator.Enable = true } - l2Info, arbNode, _, l1Info, l1backend, l1Client, l1stack := createTestNodeOnL1WithConfig(t, ctx, false, conf, nil, nil, nil) - execNode := getExecNode(t, arbNode) - l2Backend := execNode.Backend - defer requireClose(t, l1stack) - defer arbNode.StopAndWait() + builder.isSequencer = false + cleanup := builder.Build(t) + defer cleanup() + + l2Backend := builder.L2.ExecNode.Backend - l1BlockChain := l1backend.BlockChain() + l1BlockChain := builder.L1.L1Backend.BlockChain() - seqInbox, err := bridgegen.NewSequencerInbox(l1Info.GetAddress("SequencerInbox"), l1Client) + rpcC, err := builder.L1.Stack.Attach() + if err != nil { + t.Fatalf("Error connecting to l1 node: %v", err) + } + gethClient := gethclient.New(rpcC) + + seqInbox, err := bridgegen.NewSequencerInbox(builder.L1Info.GetAddress("SequencerInbox"), builder.L1.Client) Require(t, err) - seqOpts := l1Info.GetDefaultTransactOpts("Sequencer", ctx) + seqOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) + + gasRefunderAddr := deployGasRefunder(ctx, t, builder) - ownerAddress := l2Info.GetAddress("Owner") + ownerAddress := builder.L2Info.GetAddress("Owner") var startL2BlockNumber uint64 = 0 startState, _, err := l2Backend.APIBackend().StateAndHeaderByNumber(ctx, rpc.LatestBlockNumber) @@ -92,10 +196,15 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { } var faucetTxs []*types.Transaction for _, acct := range accounts { - l1Info.GenerateAccount(acct) - faucetTxs = append(faucetTxs, l1Info.PrepareTx("Faucet", acct, 30000, big.NewInt(1e16), nil)) + builder.L1Info.GenerateAccount(acct) + faucetTxs = append(faucetTxs, builder.L1Info.PrepareTx("Faucet", acct, 30000, big.NewInt(1e16), nil)) + } + builder.L1.SendWaitTestTransactions(t, faucetTxs) + + seqABI, err := bridgegen.SequencerInboxMetaData.GetAbi() + if err != nil { + t.Fatalf("Error getting sequencer inbox abi: %v", err) } - SendWaitTestTransactions(t, ctx, l1Client, faucetTxs) for i := 1; i < seqInboxTestIters; i++ { if i%10 == 0 { @@ -107,7 +216,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { // The miner usually collects transactions from deleted blocks and puts them in the mempool. // However, this code doesn't run on reorgs larger than 64 blocks for performance reasons. // Therefore, we make a bunch of small blocks to prevent the code from running. - padAddr := l1Info.GetAddress("ReorgPadding") + padAddr := builder.L1Info.GetAddress("ReorgPadding") for j := uint64(0); j < 70; j++ { rawTx := &types.DynamicFeeTx{ To: &padAddr, @@ -116,12 +225,12 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { Value: new(big.Int), Nonce: j, } - tx := l1Info.SignTxAs("ReorgPadding", rawTx) - Require(t, l1Client.SendTransaction(ctx, tx)) - _, _ = EnsureTxSucceeded(ctx, l1Client, tx) + tx := builder.L1Info.SignTxAs("ReorgPadding", rawTx) + Require(t, builder.L1.Client.SendTransaction(ctx, tx)) + _, _ = builder.L1.EnsureTxSucceeded(tx) } reorgTargetNumber := blockStates[reorgTo].l1BlockNumber - currentHeader, err := l1Client.HeaderByNumber(ctx, nil) + currentHeader, err := builder.L1.Client.HeaderByNumber(ctx, nil) Require(t, err) if currentHeader.Number.Int64()-int64(reorgTargetNumber) < 65 { Fatal(t, "Less than 65 blocks of difference between current block", currentHeader.Number, "and target", reorgTargetNumber) @@ -136,10 +245,10 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { // Sometimes, this causes it to drop the next tx. // To work around this, we create a sacrificial tx, which may or may not succeed. // Whichever happens, by the end of this block, the miner will have processed the reorg. - tx := l1Info.PrepareTx(fmt.Sprintf("ReorgSacrifice%v", i/10), "Faucet", 30000, big.NewInt(0), nil) - err = l1Client.SendTransaction(ctx, tx) + tx := builder.L1Info.PrepareTx(fmt.Sprintf("ReorgSacrifice%v", i/10), "Faucet", 30000, big.NewInt(0), nil) + err = builder.L1.Client.SendTransaction(ctx, tx) Require(t, err) - _, _ = WaitForTx(ctx, l1Client, tx.Hash(), time.Second) + _, _ = WaitForTx(ctx, builder.L1.Client, tx.Hash(), time.Second) } else { state := blockStates[len(blockStates)-1] newBalances := make(map[common.Address]*big.Int) @@ -167,10 +276,10 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { var dest common.Address if j == 0 && amount.Cmp(reserveAmount) >= 0 { name := accountName(len(state.accounts)) - if !l2Info.HasAccount(name) { - l2Info.GenerateAccount(name) + if !builder.L2Info.HasAccount(name) { + builder.L2Info.GenerateAccount(name) } - dest = l2Info.GetAddress(name) + dest = builder.L2Info.GetAddress(name) state.accounts = append(state.accounts, dest) state.balances[dest] = big.NewInt(0) } else { @@ -185,7 +294,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { Nonce: state.nonces[source], } state.nonces[source]++ - tx := l2Info.SignTxAs(accountName(sourceNum), rawTx) + tx := builder.L2Info.SignTxAs(accountName(sourceNum), rawTx) txData, err := tx.MarshalBinary() Require(t, err) var segment []byte @@ -205,7 +314,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { seqNonce := len(blockStates) - 1 for j := 0; ; j++ { - haveNonce, err := l1Client.PendingNonceAt(ctx, seqOpts.From) + haveNonce, err := builder.L1.Client.PendingNonceAt(ctx, seqOpts.From) Require(t, err) if haveNonce == uint64(seqNonce) { break @@ -217,23 +326,60 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { } seqOpts.Nonce = big.NewInt(int64(seqNonce)) var tx *types.Transaction + before, err := builder.L1.Client.BalanceAt(ctx, seqOpts.From, nil) + if err != nil { + t.Fatalf("BalanceAt(%v) unexpected error: %v", seqOpts.From, err) + } + + data, err := encodeAddBatch(seqABI, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr) + if err != nil { + t.Fatalf("Error encoding batch data: %v", err) + } + si := builder.L1Info.GetAddress("SequencerInbox") + wantAL, _, _, err := gethClient.CreateAccessList(ctx, ethereum.CallMsg{ + From: seqOpts.From, + To: &si, + Data: data, + }) + if err != nil { + t.Fatalf("Error creating access list: %v", err) + } + accessed := arbnode.AccessList(&arbnode.AccessListOpts{ + SequencerInboxAddr: builder.L1Info.GetAddress("SequencerInbox"), + BridgeAddr: builder.L1Info.GetAddress("Bridge"), + DataPosterAddr: seqOpts.From, + GasRefunderAddr: gasRefunderAddr, + SequencerInboxAccs: len(blockStates), + AfterDelayedMessagesRead: 1, + }) + if diff := diffAccessList(accessed, *wantAL); diff != "" { + t.Errorf("Access list mistmatch:\n%s\n", diff) + } if i%5 == 0 { - tx, err = seqInbox.AddSequencerL2Batch(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), common.Address{}, big.NewInt(0), big.NewInt(0)) + tx, err = seqInbox.AddSequencerL2Batch(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, big.NewInt(0), big.NewInt(0)) } else { - tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), common.Address{}) + tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr) } Require(t, err) - txRes, err := EnsureTxSucceeded(ctx, l1Client, tx) + txRes, err := builder.L1.EnsureTxSucceeded(tx) if err != nil { // Geth's clique miner is finicky. // Unfortunately this is so rare that I haven't had an opportunity to test this workaround. // Specifically, I suspect there's a race where it thinks there's no txs to put in the new block, // if a new tx arrives at the same time as it tries to create a block. // Resubmit the transaction in an attempt to get the miner going again. - _ = l1Client.SendTransaction(ctx, tx) - txRes, err = EnsureTxSucceeded(ctx, l1Client, tx) + _ = builder.L1.Client.SendTransaction(ctx, tx) + txRes, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) } + after, err := builder.L1.Client.BalanceAt(ctx, seqOpts.From, nil) + if err != nil { + t.Fatalf("BalanceAt(%v) unexpected error: %v", seqOpts.From, err) + } + txCost := txRes.EffectiveGasPrice.Uint64() * txRes.GasUsed + if diff := before.Int64() - after.Int64(); diff >= int64(txCost) { + t.Errorf("Transaction: %v was not refunded, balance diff: %v, cost: %v", tx.Hash(), diff, txCost) + } state.l2BlockNumber += uint64(numMessages) state.l1BlockNumber = txRes.BlockNumber.Uint64() @@ -268,9 +414,9 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if validator && i%15 == 0 { for i := 0; ; i++ { - expectedPos, err := execNode.ExecEngine.BlockNumberToMessageIndex(expectedBlockNumber) + expectedPos, err := builder.L2.ExecNode.ExecEngine.BlockNumberToMessageIndex(expectedBlockNumber) Require(t, err) - lastValidated := arbNode.BlockValidator.Validated(t) + lastValidated := builder.L2.ConsensusNode.BlockValidator.Validated(t) if lastValidated == expectedPos+1 { break } else if i >= 1000 { diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 05e9b6cbeb..6b120e3ec0 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -16,51 +16,41 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbos/l2pricing" - "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/staker/validatorwallet" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/validator/valnode" ) -func makeBackgroundTxs(ctx context.Context, l2info *BlockchainTestInfo, l2clientA arbutil.L1Interface, l2clientB arbutil.L1Interface, faultyStaker bool) error { +func makeBackgroundTxs(ctx context.Context, builder *NodeBuilder) error { for i := uint64(0); ctx.Err() == nil; i++ { - l2info.Accounts["BackgroundUser"].Nonce = i - tx := l2info.PrepareTx("BackgroundUser", "BackgroundUser", l2info.TransferGas, common.Big0, nil) - err := l2clientA.SendTransaction(ctx, tx) + builder.L2Info.Accounts["BackgroundUser"].Nonce = i + tx := builder.L2Info.PrepareTx("BackgroundUser", "BackgroundUser", builder.L2Info.TransferGas, common.Big0, nil) + err := builder.L2.Client.SendTransaction(ctx, tx) if err != nil { return err } - _, err = EnsureTxSucceeded(ctx, l2clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) if err != nil { return err } - if faultyStaker { - // Create a different transaction for the second node - l2info.Accounts["BackgroundUser"].Nonce = i - tx = l2info.PrepareTx("BackgroundUser", "BackgroundUser", l2info.TransferGas, common.Big1, nil) - err = l2clientB.SendTransaction(ctx, tx) - if err != nil { - return err - } - _, err = EnsureTxSucceeded(ctx, l2clientB, tx) - if err != nil { - return err - } - } } return nil } @@ -70,23 +60,32 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs - l2chainConfig := params.ArbitrumDevTestChainConfig() - l2info := NewBlockChainTestInfo( + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.L2Info = NewBlockChainTestInfo( t, - types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), + types.NewArbitrumSigner(types.NewLondonSigner(builder.chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), transferGas, ) - _, l2nodeA, l2clientA, _, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, nil, l2chainConfig, nil, l2info) - defer requireClose(t, l1stack) - defer l2nodeA.StopAndWait() - execNodeA := getExecNode(t, l2nodeA) + cleanupA := builder.Build(t) + defer cleanupA() + + l2nodeA := builder.L2.ConsensusNode + execNodeA := builder.L2.ExecNode if faultyStaker { - l2info.GenerateGenesisAccount("FaultyAddr", common.Big1) + builder.L2Info.GenerateGenesisAccount("FaultyAddr", common.Big1) } - l2clientB, l2nodeB := Create2ndNodeWithConfig(t, ctx, l2nodeA, l1stack, l1info, &l2info.ArbInitData, arbnode.ConfigDefaultL1Test(), gethexec.ConfigDefaultTest(), nil) - defer l2nodeB.StopAndWait() - execNodeB := getExecNode(t, l2nodeB) + config := arbnode.ConfigDefaultL1Test() + config.Sequencer = false + config.DelayedSequencer.Enable = false + config.BatchPoster.Enable = false + builder.execConfig.Sequencer.Enable = false + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: config}) + defer cleanupB() + + l2nodeB := testClientB.ConsensusNode + execNodeB := testClientB.ExecNode nodeAGenesis := execNodeA.Backend.APIBackend().CurrentHeader().Hash() nodeBGenesis := execNodeB.Backend.APIBackend().CurrentHeader().Hash() @@ -100,44 +99,61 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } } - BridgeBalance(t, "Faucet", big.NewInt(1).Mul(big.NewInt(params.Ether), big.NewInt(10000)), l1info, l2info, l1client, l2clientA, ctx) + builder.BridgeBalance(t, "Faucet", big.NewInt(1).Mul(big.NewInt(params.Ether), big.NewInt(10000))) - deployAuth := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + deployAuth := builder.L1Info.GetDefaultTransactOpts("RollupOwner", ctx) balance := big.NewInt(params.Ether) balance.Mul(balance, big.NewInt(100)) - l1info.GenerateAccount("ValidatorA") - TransferBalance(t, "Faucet", "ValidatorA", balance, l1info, l1client, ctx) - l1authA := l1info.GetDefaultTransactOpts("ValidatorA", ctx) + builder.L1Info.GenerateAccount("ValidatorA") + builder.L1.TransferBalance(t, "Faucet", "ValidatorA", balance, builder.L1Info) + l1authA := builder.L1Info.GetDefaultTransactOpts("ValidatorA", ctx) - l1info.GenerateAccount("ValidatorB") - TransferBalance(t, "Faucet", "ValidatorB", balance, l1info, l1client, ctx) - l1authB := l1info.GetDefaultTransactOpts("ValidatorB", ctx) + builder.L1Info.GenerateAccount("ValidatorB") + builder.L1.TransferBalance(t, "Faucet", "ValidatorB", balance, builder.L1Info) + l1authB := builder.L1Info.GetDefaultTransactOpts("ValidatorB", ctx) - valWalletAddrAPtr, err := staker.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l2nodeA.L1Reader, true) + valWalletAddrAPtr, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l2nodeA.L1Reader, true) Require(t, err) valWalletAddrA := *valWalletAddrAPtr - valWalletAddrCheck, err := staker.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l2nodeA.L1Reader, true) + valWalletAddrCheck, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l2nodeA.L1Reader, true) Require(t, err) if valWalletAddrA == *valWalletAddrCheck { Require(t, err, "didn't cache validator wallet address", valWalletAddrA.String(), "vs", valWalletAddrCheck.String()) } - rollup, err := rollupgen.NewRollupAdminLogic(l2nodeA.DeployInfo.Rollup, l1client) + rollup, err := rollupgen.NewRollupAdminLogic(l2nodeA.DeployInfo.Rollup, builder.L1.Client) Require(t, err) - tx, err := rollup.SetValidator(&deployAuth, []common.Address{valWalletAddrA, l1authB.From}, []bool{true, true}) - Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1client, tx) + + upgradeExecutor, err := upgrade_executorgen.NewUpgradeExecutor(l2nodeA.DeployInfo.UpgradeExecutor, builder.L1.Client) + Require(t, err, "unable to bind upgrade executor") + rollupABI, err := abi.JSON(strings.NewReader(rollupgen.RollupAdminLogicABI)) + Require(t, err, "unable to parse rollup ABI") + + setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddrA, l1authB.From}, []bool{true, true}) + Require(t, err, "unable to generate setValidator calldata") + tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setValidatorCalldata) + Require(t, err, "unable to set validators") + _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) - tx, err = rollup.SetMinimumAssertionPeriod(&deployAuth, big.NewInt(1)) + setMinAssertPeriodCalldata, err := rollupABI.Pack("setMinimumAssertionPeriod", big.NewInt(1)) + Require(t, err, "unable to generate setMinimumAssertionPeriod calldata") + tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setMinAssertPeriodCalldata) + Require(t, err, "unable to set minimum assertion period") + _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1client, tx) + + validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, builder.L1.Client) Require(t, err) - valConfig := staker.L1ValidatorConfig{} + valConfig := staker.TestL1ValidatorConfig - valWalletA, err := staker.NewContractValidatorWallet(nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}) + dpA, err := arbnode.StakerDataposter(ctx, rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + if err != nil { + t.Fatalf("Error creating validator dataposter: %v", err) + } + valWalletA, err := validatorwallet.NewContract(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfig.ExtraGas }) Require(t, err) if honestStakerInactive { valConfig.Strategy = "Defensive" @@ -169,6 +185,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) nil, statelessA, nil, + nil, l2nodeA.DeployInfo.ValidatorUtils, nil, ) @@ -180,7 +197,11 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } Require(t, err) - valWalletB, err := staker.NewEoaValidatorWallet(l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), &l1authB) + dpB, err := arbnode.StakerDataposter(ctx, rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeB.L1Reader, &l1authB, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + if err != nil { + t.Fatalf("Error creating validator dataposter: %v", err) + } + valWalletB, err := validatorwallet.NewEOA(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), &l1authB, func() uint64 { return 0 }) Require(t, err) valConfig.Strategy = "MakeNodes" statelessB, err := staker.NewStatelessBlockValidator( @@ -204,6 +225,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) nil, statelessB, nil, + nil, l2nodeB.DeployInfo.ValidatorUtils, nil, ) @@ -214,9 +236,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) err = valWalletB.Initialize(ctx) Require(t, err) } - - valWalletC, err := staker.NewContractValidatorWallet(nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, nil, 0, func(common.Address) {}) - Require(t, err) + valWalletC := validatorwallet.NewNoOp(builder.L1.Client, l2nodeA.DeployInfo.Rollup) valConfig.Strategy = "Watchtower" stakerC, err := staker.NewStaker( l2nodeA.L1Reader, @@ -226,6 +246,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) nil, statelessA, nil, + nil, l2nodeA.DeployInfo.ValidatorUtils, nil, ) @@ -237,18 +258,12 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) err = stakerC.Initialize(ctx) Require(t, err) - l2info.GenerateAccount("BackgroundUser") - tx = l2info.PrepareTx("Faucet", "BackgroundUser", l2info.TransferGas, balance, nil) - err = l2clientA.SendTransaction(ctx, tx) + builder.L2Info.GenerateAccount("BackgroundUser") + tx = builder.L2Info.PrepareTx("Faucet", "BackgroundUser", builder.L2Info.TransferGas, balance, nil) + err = builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - if faultyStaker { - err = l2clientB.SendTransaction(ctx, tx) - Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2clientB, tx) - Require(t, err) - } // Continually make L2 transactions in a background thread backgroundTxsCtx, cancelBackgroundTxs := context.WithCancel(ctx) @@ -259,7 +274,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) })() go (func() { defer close(backgroundTxsShutdownChan) - err := makeBackgroundTxs(backgroundTxsCtx, l2info, l2clientA, l2clientB, faultyStaker) + err := makeBackgroundTxs(backgroundTxsCtx, builder) if !errors.Is(err, context.Canceled) { log.Warn("error making background txs", "err", err) } @@ -302,23 +317,28 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) if !challengeMangerTimedOut { // Upgrade the ChallengeManager contract to an implementation which says challenges are always timed out - mockImpl, _, _, err := mocksgen.DeployTimedOutChallengeManager(&deployAuth, l1client) + mockImpl, tx, _, err := mocksgen.DeployTimedOutChallengeManager(&deployAuth, builder.L1.Client) Require(t, err) + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + managerAddr := valWalletA.ChallengeManagerAddress() // 0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103 proxyAdminSlot := common.BigToHash(arbmath.BigSub(crypto.Keccak256Hash([]byte("eip1967.proxy.admin")).Big(), common.Big1)) - proxyAdminBytes, err := l1client.StorageAt(ctx, managerAddr, proxyAdminSlot, nil) + proxyAdminBytes, err := builder.L1.Client.StorageAt(ctx, managerAddr, proxyAdminSlot, nil) Require(t, err) proxyAdminAddr := common.BytesToAddress(proxyAdminBytes) if proxyAdminAddr == (common.Address{}) { Fatal(t, "failed to get challenge manager proxy admin") } - proxyAdmin, err := mocksgen.NewProxyAdminForBinding(proxyAdminAddr, l1client) + proxyAdminABI, err := abi.JSON(strings.NewReader(mocksgen.ProxyAdminForBindingABI)) + Require(t, err) + upgradeCalldata, err := proxyAdminABI.Pack("upgrade", managerAddr, mockImpl) Require(t, err) - tx, err := proxyAdmin.Upgrade(&deployAuth, managerAddr, mockImpl) + tx, err = upgradeExecutor.ExecuteCall(&deployAuth, proxyAdminAddr, upgradeCalldata) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1client, tx) + _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) challengeMangerTimedOut = true @@ -338,13 +358,13 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } Require(t, err, "Staker", stakerName, "failed to act") if tx != nil { - _, err = EnsureTxSucceeded(ctx, l1client, tx) + _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err, "EnsureTxSucceeded failed for staker", stakerName, "tx") } if faultyStaker { - challengeAddr, err := rollup.CurrentChallenge(&bind.CallOpts{}, valWalletAddrA) + conflictInfo, err := validatorUtils.FindStakerConflict(&bind.CallOpts{}, l2nodeA.DeployInfo.Rollup, l1authA.From, l1authB.From, big.NewInt(1024)) Require(t, err) - if challengeAddr != 0 { + if staker.ConflictType(conflictInfo.Ty) == staker.CONFLICT_TYPE_FOUND { cancelBackgroundTxs() } } @@ -357,6 +377,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) if isHonestZombie { Fatal(t, "staker A became a zombie") } + fmt.Printf("watchtower staker acting:\n") watchTx, err := stakerC.Act(ctx) if err != nil && !strings.Contains(err.Error(), "catch up") { Require(t, err, "watchtower staker failed to act") @@ -373,7 +394,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err) } for j := 0; j < 5; j++ { - TransferBalance(t, "Faucet", "Faucet", common.Big0, l1info, l1client, ctx) + builder.L1.TransferBalance(t, "Faucet", "Faucet", common.Big0, builder.L1Info) } } diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index a8209499df..b14215fbf0 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -174,7 +174,7 @@ func FuzzStateTransition(f *testing.F) { binary.BigEndian.PutUint64(seqBatch[32:40], uint64(len(delayedMessages))) if compressSeqMsg { seqBatch = append(seqBatch, arbstate.BrotliMessageHeaderByte) - seqMsgCompressed, err := arbcompress.CompressFast(seqMsg) + seqMsgCompressed, err := arbcompress.CompressLevel(seqMsg, 0) if err != nil { panic(fmt.Sprintf("failed to compress sequencer message: %v", err)) } diff --git a/system_tests/transfer_test.go b/system_tests/transfer_test.go index 2e3317907b..a270cca76b 100644 --- a/system_tests/transfer_test.go +++ b/system_tests/transfer_test.go @@ -13,23 +13,24 @@ import ( func TestTransfer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, l2node, client := CreateTestL2(t, ctx) - defer l2node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) - err := client.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - bal, err := client.BalanceAt(ctx, l2info.GetAddress("Owner"), nil) + bal, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("Owner"), nil) Require(t, err) fmt.Println("Owner balance is: ", bal) - bal2, err := client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + bal2, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if bal2.Cmp(big.NewInt(1e12)) != 0 { Fatal(t, "Unexpected recipient balance: ", bal2) diff --git a/system_tests/triedb_race_test.go b/system_tests/triedb_race_test.go new file mode 100644 index 0000000000..6d9415df83 --- /dev/null +++ b/system_tests/triedb_race_test.go @@ -0,0 +1,80 @@ +package arbtest + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/arbitrum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +func TestTrieDBCommitRace(t *testing.T) { + _ = testhelpers.InitTestLog(t, log.LvlError) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + builder.execConfig.Sequencer.MaxBlockSpeed = 0 + builder.execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + builder.execConfig.Caching.Archive = true + builder.execConfig.Caching.BlockCount = 127 + builder.execConfig.Caching.BlockAge = 0 + builder.execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 127 + builder.execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User2") + bc := builder.L2.ExecNode.Backend.ArbInterface().BlockChain() + + var wg sync.WaitGroup + quit := make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + default: + builder.L2.TransferBalance(t, "Faucet", "User2", common.Big1, builder.L2Info) + case <-quit: + return + } + } + }() + api := builder.L2.ExecNode.Backend.APIBackend() + blockNumber := 1 + for i := 0; i < 5; i++ { + var roots []common.Hash + for len(roots) < 1024 { + select { + default: + block, err := api.BlockByNumber(ctx, rpc.BlockNumber(blockNumber)) + if err == nil && block != nil { + root := block.Root() + if statedb, err := bc.StateAt(root); err == nil { + err := statedb.Database().TrieDB().Reference(root, common.Hash{}) + Require(t, err) + roots = append(roots, root) + } + blockNumber += 1 + } + case <-quit: + return + } + } + t.Log("dereferencing...") + for _, root := range roots { + err := bc.TrieDB().Dereference(root) + Require(t, err) + time.Sleep(1) + } + } + close(quit) + wg.Wait() +} diff --git a/system_tests/twonodes_test.go b/system_tests/twonodes_test.go index 38e04a832e..c8e348cffb 100644 --- a/system_tests/twonodes_test.go +++ b/system_tests/twonodes_test.go @@ -20,24 +20,27 @@ func testTwoNodesSimple(t *testing.T, dasModeStr string) { chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, dasModeStr) defer lifecycleManager.StopAndWaitUntil(time.Second) - l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, nil, chainConfig, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() - - authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig = l1NodeConfigA + builder.chainConfig = chainConfig + builder.L2Info = nil + cleanup := builder.Build(t) + defer cleanup() + + authorizeDASKeyset(t, ctx, dasSignerKey, builder.L1Info, builder.L1.Client) l1NodeConfigBDataAvailability := l1NodeConfigA.DataAvailability - l1NodeConfigBDataAvailability.AggregatorConfig.Enable = false - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, &l1NodeConfigBDataAvailability) - defer nodeB.StopAndWait() + l1NodeConfigBDataAvailability.RPCAggregator.Enable = false + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{dasConfig: &l1NodeConfigBDataAvailability}) + defer cleanupB() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) - err := l2clientA.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) // give the inbox reader a bit of time to pick up the delayed message @@ -45,15 +48,15 @@ func testTwoNodesSimple(t *testing.T, dasModeStr string) { // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } - _, err = WaitForTx(ctx, l2clientB, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, testClientB.Client, tx.Hash(), time.Second*5) Require(t, err) - l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go index f8f41a3ba0..09203e3bcd 100644 --- a/system_tests/twonodeslong_test.go +++ b/system_tests/twonodeslong_test.go @@ -42,32 +42,36 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, dasModeStr) defer lifecycleManager.StopAndWaitUntil(time.Second) - l2info, nodeA, l2client, l1info, l1backend, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, nil, chainConfig, nil) - defer requireClose(t, l1stack) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig = l1NodeConfigA + builder.chainConfig = chainConfig + builder.L2Info = nil + builder.Build(t) + defer requireClose(t, builder.L1.Stack) - authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) + authorizeDASKeyset(t, ctx, dasSignerKey, builder.L1Info, builder.L1.Client) l1NodeConfigBDataAvailability := l1NodeConfigA.DataAvailability - l1NodeConfigBDataAvailability.AggregatorConfig.Enable = false - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, &l1NodeConfigBDataAvailability) - defer nodeB.StopAndWait() + l1NodeConfigBDataAvailability.RPCAggregator.Enable = false + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{dasConfig: &l1NodeConfigBDataAvailability}) + defer cleanupB() - l2info.GenerateAccount("DelayedFaucet") - l2info.GenerateAccount("DelayedReceiver") - l2info.GenerateAccount("DirectReceiver") + builder.L2Info.GenerateAccount("DelayedFaucet") + builder.L2Info.GenerateAccount("DelayedReceiver") + builder.L2Info.GenerateAccount("DirectReceiver") - l2info.GenerateAccount("ErrorTxSender") + builder.L2Info.GenerateAccount("ErrorTxSender") - SendWaitTestTransactions(t, ctx, l2client, []*types.Transaction{ - l2info.PrepareTx("Faucet", "ErrorTxSender", l2info.TransferGas, big.NewInt(l2pricing.InitialBaseFeeWei*int64(l2info.TransferGas)), nil), + builder.L2.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L2Info.PrepareTx("Faucet", "ErrorTxSender", builder.L2Info.TransferGas, big.NewInt(l2pricing.InitialBaseFeeWei*int64(builder.L2Info.TransferGas)), nil), }) delayedMsgsToSendMax := big.NewInt(int64(largeLoops * avgDelayedMessagesPerLoop * 10)) delayedFaucetNeeds := new(big.Int).Mul(new(big.Int).Add(fundsPerDelayed, new(big.Int).SetUint64(l2pricing.InitialBaseFeeWei*100000)), delayedMsgsToSendMax) - SendWaitTestTransactions(t, ctx, l2client, []*types.Transaction{ - l2info.PrepareTx("Faucet", "DelayedFaucet", l2info.TransferGas, delayedFaucetNeeds, nil), + builder.L2.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L2Info.PrepareTx("Faucet", "DelayedFaucet", builder.L2Info.TransferGas, delayedFaucetNeeds, nil), }) - delayedFaucetBalance, err := l2client.BalanceAt(ctx, l2info.GetAddress("DelayedFaucet"), nil) + delayedFaucetBalance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("DelayedFaucet"), nil) Require(t, err) if delayedFaucetBalance.Cmp(delayedFaucetNeeds) != 0 { @@ -85,17 +89,17 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { randNum := rand.Int() % avgTotalL1MessagesPerLoop var l1tx *types.Transaction if randNum < avgDelayedMessagesPerLoop { - delayedTx := l2info.PrepareTx("DelayedFaucet", "DelayedReceiver", 30001, fundsPerDelayed, nil) - l1tx = WrapL2ForDelayed(t, delayedTx, l1info, "User", 100000) + delayedTx := builder.L2Info.PrepareTx("DelayedFaucet", "DelayedReceiver", 30001, fundsPerDelayed, nil) + l1tx = WrapL2ForDelayed(t, delayedTx, builder.L1Info, "User", 100000) delayedTxs = append(delayedTxs, delayedTx) delayedTransfers++ } else { - l1tx = l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) + l1tx = builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) } l1Txs = append(l1Txs, l1tx) } // adding multiple messages in the same AddLocal to get them in the same L1 block - errs := l1backend.TxPool().AddLocals(l1Txs) + errs := builder.L1.L1Backend.TxPool().AddLocals(l1Txs) for _, err := range errs { if err != nil { Fatal(t, err) @@ -104,26 +108,26 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { l2TxsThisTime := rand.Int() % (avgL2MsgsPerLoop * 2) l2Txs := make([]*types.Transaction, 0, l2TxsThisTime) for len(l2Txs) < l2TxsThisTime { - l2Txs = append(l2Txs, l2info.PrepareTx("Faucet", "DirectReceiver", l2info.TransferGas, fundsPerDirect, nil)) + l2Txs = append(l2Txs, builder.L2Info.PrepareTx("Faucet", "DirectReceiver", builder.L2Info.TransferGas, fundsPerDirect, nil)) } - SendWaitTestTransactions(t, ctx, l2client, l2Txs) + builder.L2.SendWaitTestTransactions(t, l2Txs) directTransfers += int64(l2TxsThisTime) if len(l1Txs) > 0 { - _, err := EnsureTxSucceeded(ctx, l1client, l1Txs[len(l1Txs)-1]) + _, err := builder.L1.EnsureTxSucceeded(l1Txs[len(l1Txs)-1]) if err != nil { Fatal(t, err) } } // create bad tx on delayed inbox - l2info.GetInfoWithPrivKey("ErrorTxSender").Nonce = 10 - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - WrapL2ForDelayed(t, l2info.PrepareTx("ErrorTxSender", "DelayedReceiver", 30002, delayedFaucetNeeds, nil), l1info, "User", 100000), + builder.L2Info.GetInfoWithPrivKey("ErrorTxSender").Nonce = 10 + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + WrapL2ForDelayed(t, builder.L2Info.PrepareTx("ErrorTxSender", "DelayedReceiver", 30002, delayedFaucetNeeds, nil), builder.L1Info, "User", 100000), }) extrBlocksThisTime := rand.Int() % (avgExtraBlocksPerLoop * 2) for i := 0; i < extrBlocksThisTime; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } } @@ -137,45 +141,45 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { for i := 0; i < finalPropagateLoops; i++ { var tx *types.Transaction for j := 0; j < 30; j++ { - tx = l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) - err := l1client.SendTransaction(ctx, tx) + tx = builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) + err := builder.L1.Client.SendTransaction(ctx, tx) if err != nil { Fatal(t, err) } - _, err = EnsureTxSucceeded(ctx, l1client, tx) + _, err = builder.L1.EnsureTxSucceeded(tx) if err != nil { Fatal(t, err) } } } - _, err = EnsureTxSucceededWithTimeout(ctx, l2client, delayedTxs[len(delayedTxs)-1], time.Second*10) + _, err = builder.L2.EnsureTxSucceededWithTimeout(delayedTxs[len(delayedTxs)-1], time.Second*10) Require(t, err, "Failed waiting for Tx on main node") - _, err = EnsureTxSucceededWithTimeout(ctx, l2clientB, delayedTxs[len(delayedTxs)-1], time.Second*10) + _, err = testClientB.EnsureTxSucceededWithTimeout(delayedTxs[len(delayedTxs)-1], time.Second*10) Require(t, err, "Failed waiting for Tx on secondary node") - delayedBalance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("DelayedReceiver"), nil) + delayedBalance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("DelayedReceiver"), nil) Require(t, err) - directBalance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("DirectReceiver"), nil) + directBalance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("DirectReceiver"), nil) Require(t, err) delayedExpectd := new(big.Int).Mul(fundsPerDelayed, big.NewInt(delayedTransfers)) directExpectd := new(big.Int).Mul(fundsPerDirect, big.NewInt(directTransfers)) if (delayedBalance.Cmp(delayedExpectd) != 0) || (directBalance.Cmp(directExpectd) != 0) { t.Error("delayed balance", delayedBalance, "expected", delayedExpectd, "transfers", delayedTransfers) t.Error("direct balance", directBalance, "expected", directExpectd, "transfers", directTransfers) - ownerBalance, _ := l2clientB.BalanceAt(ctx, l2info.GetAddress("Owner"), nil) - delayedFaucetBalance, _ := l2clientB.BalanceAt(ctx, l2info.GetAddress("DelayedFaucet"), nil) + ownerBalance, _ := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("Owner"), nil) + delayedFaucetBalance, _ := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("DelayedFaucet"), nil) t.Error("owner balance", ownerBalance, "delayed faucet", delayedFaucetBalance) Fatal(t, "Unexpected balance") } - nodeA.StopAndWait() + builder.L2.ConsensusNode.StopAndWait() - if nodeB.BlockValidator != nil { - lastBlockHeader, err := l2clientB.HeaderByNumber(ctx, nil) + if testClientB.ConsensusNode.BlockValidator != nil { + lastBlockHeader, err := testClientB.Client.HeaderByNumber(ctx, nil) Require(t, err) timeout := getDeadlineTimeout(t, time.Minute*30) // messageindex is same as block number here - if !nodeB.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(lastBlockHeader.Number.Uint64()), timeout) { + if !testClientB.ConsensusNode.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(lastBlockHeader.Number.Uint64()), timeout) { Fatal(t, "did not validate all blocks") } } diff --git a/system_tests/validation_mock_test.go b/system_tests/validation_mock_test.go index 25245f0f2e..0da793a8c5 100644 --- a/system_tests/validation_mock_test.go +++ b/system_tests/validation_mock_test.go @@ -33,12 +33,13 @@ var sendRootKey = common.HexToHash("0x55667788") var batchNumKey = common.HexToHash("0x99aabbcc") var posInBatchKey = common.HexToHash("0xddeeff") -func globalstateFromTestPreimages(preimages map[common.Hash][]byte) validator.GoGlobalState { +func globalstateFromTestPreimages(preimages map[arbutil.PreimageType]map[common.Hash][]byte) validator.GoGlobalState { + keccakPreimages := preimages[arbutil.Keccak256PreimageType] return validator.GoGlobalState{ - Batch: new(big.Int).SetBytes(preimages[batchNumKey]).Uint64(), - PosInBatch: new(big.Int).SetBytes(preimages[posInBatchKey]).Uint64(), - BlockHash: common.BytesToHash(preimages[blockHashKey]), - SendRoot: common.BytesToHash(preimages[sendRootKey]), + Batch: new(big.Int).SetBytes(keccakPreimages[batchNumKey]).Uint64(), + PosInBatch: new(big.Int).SetBytes(keccakPreimages[posInBatchKey]).Uint64(), + BlockHash: common.BytesToHash(keccakPreimages[blockHashKey]), + SendRoot: common.BytesToHash(keccakPreimages[sendRootKey]), } } @@ -208,7 +209,9 @@ func TestValidationServerAPI(t *testing.T) { valInput := validator.ValidationInput{ StartState: startState, - Preimages: globalstateToTestPreimages(endState), + Preimages: map[arbutil.PreimageType]map[common.Hash][]byte{ + arbutil.Keccak256PreimageType: globalstateToTestPreimages(endState), + }, } valRun := client.Launch(&valInput, wasmRoot) res, err := valRun.Await(ctx) @@ -272,7 +275,9 @@ func TestValidationClientRoom(t *testing.T) { valInput := validator.ValidationInput{ StartState: startState, - Preimages: globalstateToTestPreimages(endState), + Preimages: map[arbutil.PreimageType]map[common.Hash][]byte{ + arbutil.Keccak256PreimageType: globalstateToTestPreimages(endState), + }, } valRuns := make([]validator.ValidationRun, 0, 4) @@ -324,7 +329,7 @@ func TestExecutionKeepAlive(t *testing.T) { defer cancel() _, validationDefault := createMockValidationNode(t, ctx, nil) shortTimeoutConfig := server_arb.DefaultArbitratorSpawnerConfig - shortTimeoutConfig.ExecRunTimeout = time.Second + shortTimeoutConfig.ExecutionRunTimeout = time.Second _, validationShortTO := createMockValidationNode(t, ctx, &shortTimeoutConfig) configFetcher := StaticFetcherFrom(t, &rpcclient.TestClientConfig) diff --git a/util/arbmath/math.go b/util/arbmath/math.go index a9758db1c0..467ee58a14 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -175,6 +175,11 @@ func BigAddByUint(augend *big.Int, addend uint64) *big.Int { return new(big.Int).Add(augend, UintToBig(addend)) } +// BigSub subtracts a uint from a huge +func BigSubByUint(minuend *big.Int, subtrahend uint64) *big.Int { + return new(big.Int).Sub(minuend, UintToBig(subtrahend)) +} + // BigMulByFrac multiply a huge by a rational func BigMulByFrac(value *big.Int, numerator, denominator int64) *big.Int { value = new(big.Int).Set(value) diff --git a/util/contracts/address_verifier.go b/util/contracts/address_verifier.go new file mode 100644 index 0000000000..eb2f862210 --- /dev/null +++ b/util/contracts/address_verifier.go @@ -0,0 +1,99 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package contracts + +import ( + "context" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/solgen/go/bridgegen" +) + +type AddressVerifier struct { + seqInboxCaller *bridgegen.SequencerInboxCaller + cache map[common.Address]bool + cacheExpiry time.Time + mutex sync.Mutex +} + +// Note that we only cache positive instances, not negative ones. That's because we're willing to accept the +// consequences of a false positive (accepting a Store from a recently retired batch poster), but we don't want +// to accept the consequences of a false negative (rejecting a Store from a recently added batch poster). + +var addressVerifierLifetime = time.Hour + +func NewAddressVerifier(seqInboxCaller *bridgegen.SequencerInboxCaller) *AddressVerifier { + return &AddressVerifier{ + seqInboxCaller: seqInboxCaller, + cache: make(map[common.Address]bool), + cacheExpiry: time.Now().Add(addressVerifierLifetime), + } +} + +func (av *AddressVerifier) IsBatchPosterOrSequencer(ctx context.Context, addr common.Address) (bool, error) { + av.mutex.Lock() + if time.Now().After(av.cacheExpiry) { + if err := av.flushCache_locked(ctx); err != nil { + av.mutex.Unlock() + return false, err + } + } + if av.cache[addr] { + av.mutex.Unlock() + return true, nil + } + av.mutex.Unlock() + + result, err := av.seqInboxCaller.IsBatchPoster(&bind.CallOpts{Context: ctx}, addr) + if err != nil { + return false, err + } + if !result { + var err error + result, err = av.seqInboxCaller.IsSequencer(&bind.CallOpts{Context: ctx}, addr) + if err != nil { + return false, err + } + } + if result { + av.mutex.Lock() + av.cache[addr] = true + av.mutex.Unlock() + return true, nil + } + return result, nil +} + +func (av *AddressVerifier) FlushCache(ctx context.Context) error { + av.mutex.Lock() + defer av.mutex.Unlock() + return av.flushCache_locked(ctx) +} + +func (av *AddressVerifier) flushCache_locked(ctx context.Context) error { + av.cache = make(map[common.Address]bool) + av.cacheExpiry = time.Now().Add(addressVerifierLifetime) + return nil +} + +func NewMockAddressVerifier(validAddr common.Address) *MockAddressVerifier { + return &MockAddressVerifier{ + validAddr: validAddr, + } +} + +type MockAddressVerifier struct { + validAddr common.Address +} + +func (bpv *MockAddressVerifier) IsBatchPosterOrSequencer(_ context.Context, addr common.Address) (bool, error) { + return addr == bpv.validAddr, nil +} + +type AddressVerifierInterface interface { + IsBatchPosterOrSequencer(ctx context.Context, addr common.Address) (bool, error) +} diff --git a/util/contracts/batch_poster_verifier.go b/util/contracts/batch_poster_verifier.go deleted file mode 100644 index da9354ed3e..0000000000 --- a/util/contracts/batch_poster_verifier.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package contracts - -import ( - "context" - "sync" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/solgen/go/bridgegen" -) - -type BatchPosterVerifier struct { - seqInboxCaller *bridgegen.SequencerInboxCaller - cache map[common.Address]bool - cacheExpiry time.Time - mutex sync.Mutex -} - -// Note that we only cache positive instances, not negative ones. That's because we're willing to accept the -// consequences of a false positive (accepting a Store from a recently retired batch poster), but we don't want -// to accept the consequences of a false negative (rejecting a Store from a recently added batch poster). - -var batchPosterVerifierLifetime = time.Hour - -func NewBatchPosterVerifier(seqInboxCaller *bridgegen.SequencerInboxCaller) *BatchPosterVerifier { - return &BatchPosterVerifier{ - seqInboxCaller: seqInboxCaller, - cache: make(map[common.Address]bool), - cacheExpiry: time.Now().Add(batchPosterVerifierLifetime), - } -} - -func (bpv *BatchPosterVerifier) IsBatchPoster(ctx context.Context, addr common.Address) (bool, error) { - bpv.mutex.Lock() - if time.Now().After(bpv.cacheExpiry) { - if err := bpv.flushCache_locked(ctx); err != nil { - bpv.mutex.Unlock() - return false, err - } - } - if bpv.cache[addr] { - bpv.mutex.Unlock() - return true, nil - } - bpv.mutex.Unlock() - - isBatchPoster, err := bpv.seqInboxCaller.IsBatchPoster(&bind.CallOpts{Context: ctx}, addr) - if err != nil { - return false, err - } - if isBatchPoster { - bpv.mutex.Lock() - bpv.cache[addr] = true - bpv.mutex.Unlock() - } - return isBatchPoster, nil -} - -func (bpv *BatchPosterVerifier) FlushCache(ctx context.Context) error { - bpv.mutex.Lock() - defer bpv.mutex.Unlock() - return bpv.flushCache_locked(ctx) -} - -func (bpv *BatchPosterVerifier) flushCache_locked(ctx context.Context) error { - bpv.cache = make(map[common.Address]bool) - bpv.cacheExpiry = time.Now().Add(batchPosterVerifierLifetime) - return nil -} - -func NewMockBatchPosterVerifier(validAddr common.Address) *MockBatchPosterVerifier { - return &MockBatchPosterVerifier{ - validAddr: validAddr, - } -} - -type MockBatchPosterVerifier struct { - validAddr common.Address -} - -func (bpv *MockBatchPosterVerifier) IsBatchPoster(_ context.Context, addr common.Address) (bool, error) { - return addr == bpv.validAddr, nil -} - -type BatchPosterVerifierInterface interface { - IsBatchPoster(ctx context.Context, addr common.Address) (bool, error) -} diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index a4113447a7..0b867336c0 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -18,18 +18,21 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" ) +type ArbSysInterface interface { + ArbBlockNumber(*bind.CallOpts) (*big.Int, error) +} + type HeaderReader struct { stopwaiter.StopWaiter config ConfigFetcher client arbutil.L1Interface isParentChainArbitrum bool - arbSys *precompilesgen.ArbSys + arbSys ArbSysInterface chanMutex sync.RWMutex // All fields below require the chanMutex @@ -41,15 +44,15 @@ type HeaderReader struct { lastPendingCallBlockNr uint64 requiresPendingCallUpdates int - safe cachedBlockNumber - finalized cachedBlockNumber + safe cachedHeader + finalized cachedHeader } -type cachedBlockNumber struct { +type cachedHeader struct { mutex sync.Mutex rpcBlockNum *big.Int headWhenCached *types.Header - blockNumber uint64 + header *types.Header } type Config struct { @@ -79,6 +82,7 @@ func AddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".poll-only", DefaultConfig.PollOnly, "do not attempt to subscribe to header events") f.Bool(prefix+".use-finality-data", DefaultConfig.UseFinalityData, "use l1 data about finalized/safe blocks") f.Duration(prefix+".poll-interval", DefaultConfig.PollInterval, "interval when polling endpoint") + f.Duration(prefix+".subscribe-err-interval", DefaultConfig.SubscribeErrInterval, "interval for subscribe error") f.Duration(prefix+".tx-timeout", DefaultConfig.TxTimeout, "timeout when waiting for a transaction") f.Duration(prefix+".old-header-timeout", DefaultConfig.OldHeaderTimeout, "warns if the latest l1 block is at least this old") } @@ -92,19 +96,18 @@ var TestConfig = Config{ UseFinalityData: false, } -func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher) (*HeaderReader, error) { +func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher, arbSysPrecompile ArbSysInterface) (*HeaderReader, error) { isParentChainArbitrum := false - var arbSys *precompilesgen.ArbSys - codeAt, err := client.CodeAt(ctx, types.ArbSysAddress, nil) - if err != nil { - return nil, err - } - if len(codeAt) != 0 { - isParentChainArbitrum = true - arbSys, err = precompilesgen.NewArbSys(types.ArbSysAddress, client) + var arbSys ArbSysInterface + if arbSysPrecompile != nil { + codeAt, err := client.CodeAt(ctx, types.ArbSysAddress, nil) if err != nil { return nil, err } + if len(codeAt) != 0 { + isParentChainArbitrum = true + arbSys = arbSysPrecompile + } } return &HeaderReader{ client: client, @@ -113,8 +116,8 @@ func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher) arbSys: arbSys, outChannels: make(map[chan<- *types.Header]struct{}), outChannelsBehind: make(map[chan<- *types.Header]struct{}), - safe: cachedBlockNumber{rpcBlockNum: big.NewInt(rpc.SafeBlockNumber.Int64())}, - finalized: cachedBlockNumber{rpcBlockNum: big.NewInt(rpc.FinalizedBlockNumber.Int64())}, + safe: cachedHeader{rpcBlockNum: big.NewInt(rpc.SafeBlockNumber.Int64())}, + finalized: cachedHeader{rpcBlockNum: big.NewInt(rpc.FinalizedBlockNumber.Int64())}, }, nil } @@ -313,7 +316,7 @@ func (s *HeaderReader) logIfHeaderIsOld() { headerTime := time.Since(l1Timetamp) if headerTime >= s.config().OldHeaderTimeout { s.setError(fmt.Errorf("latest header is at least %v old", headerTime)) - log.Warn( + log.Error( "latest L1 block is old", "l1Block", storedHeader.Number, "l1Timestamp", l1Timetamp, "age", headerTime, ) @@ -396,47 +399,79 @@ func headerIndicatesFinalitySupport(header *types.Header) bool { return false } -func (s *HeaderReader) getCached(ctx context.Context, c *cachedBlockNumber) (uint64, error) { +func HeadersEqual(ha, hb *types.Header) bool { + if (ha == nil) != (hb == nil) { + return false + } + return (ha == nil && hb == nil) || ha.Hash() == hb.Hash() +} + +func (s *HeaderReader) getCached(ctx context.Context, c *cachedHeader) (*types.Header, error) { c.mutex.Lock() defer c.mutex.Unlock() currentHead, err := s.LastHeader(ctx) if err != nil { - return 0, err + return nil, err } - if currentHead == c.headWhenCached { - return c.blockNumber, nil + if HeadersEqual(currentHead, c.headWhenCached) { + return c.header, nil } if !s.config().UseFinalityData || !headerIndicatesFinalitySupport(currentHead) { - return 0, ErrBlockNumberNotSupported + return nil, ErrBlockNumberNotSupported } header, err := s.client.HeaderByNumber(ctx, c.rpcBlockNum) if err != nil { - return 0, err + return nil, err } - c.blockNumber = header.Number.Uint64() - return c.blockNumber, nil + c.header = header + c.headWhenCached = currentHead + return c.header, nil +} + +func (s *HeaderReader) LatestSafeBlockHeader(ctx context.Context) (*types.Header, error) { + header, err := s.getCached(ctx, &s.safe) + if errors.Is(err, ErrBlockNumberNotSupported) { + return nil, fmt.Errorf("%w: safe block not found", ErrBlockNumberNotSupported) + } + return header, err } func (s *HeaderReader) LatestSafeBlockNr(ctx context.Context) (uint64, error) { - blockNum, err := s.getCached(ctx, &s.safe) + header, err := s.LatestSafeBlockHeader(ctx) + if err != nil { + return 0, err + } + return header.Number.Uint64(), nil +} + +func (s *HeaderReader) LatestFinalizedBlockHeader(ctx context.Context) (*types.Header, error) { + header, err := s.getCached(ctx, &s.finalized) if errors.Is(err, ErrBlockNumberNotSupported) { - err = errors.New("safe block not found") + return nil, fmt.Errorf("%w: finalized block not found", ErrBlockNumberNotSupported) } - return blockNum, err + return header, err } func (s *HeaderReader) LatestFinalizedBlockNr(ctx context.Context) (uint64, error) { - blockNum, err := s.getCached(ctx, &s.finalized) - if errors.Is(err, ErrBlockNumberNotSupported) { - err = errors.New("finalized block not found") + header, err := s.LatestFinalizedBlockHeader(ctx) + if err != nil { + return 0, err } - return blockNum, err + return header.Number.Uint64(), nil } func (s *HeaderReader) Client() arbutil.L1Interface { return s.client } +func (s *HeaderReader) UseFinalityData() bool { + return s.config().UseFinalityData +} + +func (s *HeaderReader) IsParentChainArbitrum() bool { + return s.isParentChainArbitrum +} + func (s *HeaderReader) Start(ctxIn context.Context) { s.StopWaiter.Start(ctxIn, s) s.LaunchThread(s.broadcastLoop) diff --git a/util/jsonapi/preimages.go b/util/jsonapi/preimages.go index d669b7046e..a0dfe8579f 100644 --- a/util/jsonapi/preimages.go +++ b/util/jsonapi/preimages.go @@ -16,8 +16,8 @@ type PreimagesMapJson struct { Map map[common.Hash][]byte } -func NewPreimagesMapJson(inner map[common.Hash][]byte) PreimagesMapJson { - return PreimagesMapJson{inner} +func NewPreimagesMapJson(inner map[common.Hash][]byte) *PreimagesMapJson { + return &PreimagesMapJson{inner} } func (m *PreimagesMapJson) MarshalJSON() ([]byte, error) { diff --git a/util/redisutil/redis_coordinator.go b/util/redisutil/redis_coordinator.go index 0ee92fef17..357dfb2e93 100644 --- a/util/redisutil/redis_coordinator.go +++ b/util/redisutil/redis_coordinator.go @@ -76,6 +76,32 @@ func (c *RedisCoordinator) CurrentChosenSequencer(ctx context.Context) (string, return current, nil } +// GetPriorities returns the priority list of sequencers +func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, error) { + prioritiesString, err := rc.Client.Get(ctx, PRIORITIES_KEY).Result() + if err != nil { + if errors.Is(err, redis.Nil) { + err = errors.New("sequencer priorities unset") + } + return []string{}, err + } + prioritiesList := strings.Split(prioritiesString, ",") + return prioritiesList, nil +} + +// GetLiveliness returns a map whose keys are sequencers that have their liveliness set to OK +func (rc *RedisCoordinator) GetLiveliness(ctx context.Context) ([]string, error) { + livelinessList, _, err := rc.Client.Scan(ctx, 0, WANTS_LOCKOUT_KEY_PREFIX+"*", 0).Result() + if err != nil { + return []string{}, err + } + for i, elem := range livelinessList { + url := strings.TrimPrefix(elem, WANTS_LOCKOUT_KEY_PREFIX) + livelinessList[i] = url + } + return livelinessList, nil +} + func MessageKeyFor(pos arbutil.MessageIndex) string { return fmt.Sprintf("%s%d", MESSAGE_KEY_PREFIX, pos) } diff --git a/util/signature/sign_verify.go b/util/signature/sign_verify.go index 9a594ccbeb..5ed852bfbc 100644 --- a/util/signature/sign_verify.go +++ b/util/signature/sign_verify.go @@ -31,6 +31,12 @@ func SignVerifyConfigAddOptions(prefix string, f *flag.FlagSet) { } var DefaultSignVerifyConfig = SignVerifyConfig{ + ECDSA: DefultFeedVerifierConfig, + SymmetricFallback: false, + SymmetricSign: false, + Symmetric: EmptySimpleHmacConfig, +} +var TestSignVerifyConfig = SignVerifyConfig{ ECDSA: VerifierConfig{ AcceptSequencer: true, }, @@ -39,7 +45,7 @@ var DefaultSignVerifyConfig = SignVerifyConfig{ Symmetric: TestSimpleHmacConfig, } -func NewSignVerify(config *SignVerifyConfig, signerFunc DataSignerFunc, bpValidator contracts.BatchPosterVerifierInterface) (*SignVerify, error) { +func NewSignVerify(config *SignVerifyConfig, signerFunc DataSignerFunc, bpValidator contracts.AddressVerifierInterface) (*SignVerify, error) { var fallback *SimpleHmac if config.SymmetricFallback { var err error diff --git a/util/signature/sign_verify_test.go b/util/signature/sign_verify_test.go index 8ecb6e5ccc..916fc03a20 100644 --- a/util/signature/sign_verify_test.go +++ b/util/signature/sign_verify_test.go @@ -17,7 +17,7 @@ func TestSignVerifyModes(t *testing.T) { signingAddr := crypto.PubkeyToAddress(privateKey.PublicKey) dataSigner := DataSignerFromPrivateKey(privateKey) - config := DefaultSignVerifyConfig + config := TestSignVerifyConfig config.SymmetricFallback = false config.SymmetricSign = false config.ECDSA.AcceptSequencer = false @@ -25,14 +25,14 @@ func TestSignVerifyModes(t *testing.T) { signVerifyECDSA, err := NewSignVerify(&config, dataSigner, nil) Require(t, err) - configSymmetric := DefaultSignVerifyConfig + configSymmetric := TestSignVerifyConfig configSymmetric.SymmetricFallback = true configSymmetric.SymmetricSign = true configSymmetric.ECDSA.AcceptSequencer = false signVerifySymmetric, err := NewSignVerify(&configSymmetric, nil, nil) Require(t, err) - configFallback := DefaultSignVerifyConfig + configFallback := TestSignVerifyConfig configFallback.SymmetricFallback = true configFallback.SymmetricSign = false configFallback.ECDSA.AllowedAddresses = []string{signingAddr.Hex()} diff --git a/util/signature/simple_hmac.go b/util/signature/simple_hmac.go index b1c683742b..4899b5c22c 100644 --- a/util/signature/simple_hmac.go +++ b/util/signature/simple_hmac.go @@ -58,7 +58,7 @@ func SimpleHmacDangerousConfigAddOptions(prefix string, f *flag.FlagSet) { func SimpleHmacConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".signing-key", EmptySimpleHmacConfig.SigningKey, "a 32-byte (64-character) hex string used to sign messages, or a path to a file containing it") - f.String(prefix+".fallback-verification-key", EmptySimpleHmacConfig.SigningKey, "a fallback key used for message verification") + f.String(prefix+".fallback-verification-key", EmptySimpleHmacConfig.FallbackVerificationKey, "a fallback key used for message verification") SimpleHmacDangerousConfigAddOptions(prefix+".dangerous", f) } diff --git a/util/signature/verifier.go b/util/signature/verifier.go index fb0aae9e1e..c2f6529ec6 100644 --- a/util/signature/verifier.go +++ b/util/signature/verifier.go @@ -19,7 +19,7 @@ import ( type Verifier struct { config *VerifierConfig authorizedMap map[common.Address]struct{} - bpValidator contracts.BatchPosterVerifierInterface + addrVerifier contracts.AddressVerifierInterface } type VerifierConfig struct { @@ -37,7 +37,7 @@ var ErrMissingSignature = fmt.Errorf("%w: signature not found", ErrSignatureNotV var ErrSignerNotApproved = fmt.Errorf("%w: signer not approved", ErrSignatureNotVerified) func FeedVerifierConfigAddOptions(prefix string, f *flag.FlagSet) { - f.StringArray(prefix+".allowed-addresses", DefultFeedVerifierConfig.AllowedAddresses, "a list of allowed addresses") + f.StringSlice(prefix+".allowed-addresses", DefultFeedVerifierConfig.AllowedAddresses, "a list of allowed addresses") f.Bool(prefix+".accept-sequencer", DefultFeedVerifierConfig.AcceptSequencer, "accept verified message from sequencer") DangerousFeedVerifierConfigAddOptions(prefix+".dangerous", f) } @@ -62,19 +62,19 @@ var TestingFeedVerifierConfig = VerifierConfig{ }, } -func NewVerifier(config *VerifierConfig, bpValidator contracts.BatchPosterVerifierInterface) (*Verifier, error) { +func NewVerifier(config *VerifierConfig, addrVerifier contracts.AddressVerifierInterface) (*Verifier, error) { authorizedMap := make(map[common.Address]struct{}, len(config.AllowedAddresses)) for _, addrString := range config.AllowedAddresses { addr := common.HexToAddress(addrString) authorizedMap[addr] = struct{}{} } - if bpValidator == nil && !config.Dangerous.AcceptMissing && config.AcceptSequencer { + if addrVerifier == nil && !config.Dangerous.AcceptMissing && config.AcceptSequencer { return nil, errors.New("cannot read batch poster addresses") } return &Verifier{ config: config, authorizedMap: authorizedMap, - bpValidator: bpValidator, + addrVerifier: addrVerifier, }, nil } @@ -107,20 +107,20 @@ func (v *Verifier) verifyClosure(ctx context.Context, sig []byte, hash common.Ha return nil } - if v.config.Dangerous.AcceptMissing && v.bpValidator == nil { + if v.config.Dangerous.AcceptMissing && v.addrVerifier == nil { return nil } - if !v.config.AcceptSequencer || v.bpValidator == nil { + if !v.config.AcceptSequencer || v.addrVerifier == nil { return ErrSignerNotApproved } - batchPoster, err := v.bpValidator.IsBatchPoster(ctx, addr) + batchPosterOrSequencer, err := v.addrVerifier.IsBatchPosterOrSequencer(ctx, addr) if err != nil { return err } - if !batchPoster { + if !batchPosterOrSequencer { return ErrSignerNotApproved } diff --git a/util/signature/verifier_test.go b/util/signature/verifier_test.go index f6644f1238..38c4bbe891 100644 --- a/util/signature/verifier_test.go +++ b/util/signature/verifier_test.go @@ -82,7 +82,7 @@ func TestVerifierBatchPoster(t *testing.T) { signingAddr := crypto.PubkeyToAddress(privateKey.PublicKey) dataSigner := DataSignerFromPrivateKey(privateKey) - bpVerifier := contracts.NewMockBatchPosterVerifier(signingAddr) + bpVerifier := contracts.NewMockAddressVerifier(signingAddr) config := TestingFeedVerifierConfig config.AcceptSequencer = true verifier, err := NewVerifier(&config, bpVerifier) diff --git a/validator/server_api/json.go b/validator/server_api/json.go index 95108757d7..2029741989 100644 --- a/validator/server_api/json.go +++ b/validator/server_api/json.go @@ -7,6 +7,7 @@ import ( "encoding/base64" "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/jsonapi" "github.com/offchainlabs/nitro/validator" ) @@ -20,20 +21,24 @@ type ValidationInputJson struct { Id uint64 HasDelayedMsg bool DelayedMsgNr uint64 - PreimagesB64 jsonapi.PreimagesMapJson + PreimagesB64 map[arbutil.PreimageType]*jsonapi.PreimagesMapJson BatchInfo []BatchInfoJson DelayedMsgB64 string StartState validator.GoGlobalState } func ValidationInputToJson(entry *validator.ValidationInput) *ValidationInputJson { + jsonPreimagesMap := make(map[arbutil.PreimageType]*jsonapi.PreimagesMapJson) + for ty, preimages := range entry.Preimages { + jsonPreimagesMap[ty] = jsonapi.NewPreimagesMapJson(preimages) + } res := &ValidationInputJson{ Id: entry.Id, HasDelayedMsg: entry.HasDelayedMsg, DelayedMsgNr: entry.DelayedMsgNr, DelayedMsgB64: base64.StdEncoding.EncodeToString(entry.DelayedMsg), StartState: entry.StartState, - PreimagesB64: jsonapi.NewPreimagesMapJson(entry.Preimages), + PreimagesB64: jsonPreimagesMap, } for _, binfo := range entry.BatchInfo { encData := base64.StdEncoding.EncodeToString(binfo.Data) @@ -43,12 +48,16 @@ func ValidationInputToJson(entry *validator.ValidationInput) *ValidationInputJso } func ValidationInputFromJson(entry *ValidationInputJson) (*validator.ValidationInput, error) { + preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) + for ty, jsonPreimages := range entry.PreimagesB64 { + preimages[ty] = jsonPreimages.Map + } valInput := &validator.ValidationInput{ Id: entry.Id, HasDelayedMsg: entry.HasDelayedMsg, DelayedMsgNr: entry.DelayedMsgNr, StartState: entry.StartState, - Preimages: entry.PreimagesB64.Map, + Preimages: preimages, } delayed, err := base64.StdEncoding.DecodeString(entry.DelayedMsgB64) if err != nil { diff --git a/validator/server_api/valiation_api.go b/validator/server_api/valiation_api.go index 9e5191ec81..ca5aafcee2 100644 --- a/validator/server_api/valiation_api.go +++ b/validator/server_api/valiation_api.go @@ -91,7 +91,7 @@ func (a *ExecServerAPI) LatestWasmModuleRoot(ctx context.Context) (common.Hash, } func (a *ExecServerAPI) removeOldRuns(ctx context.Context) time.Duration { - oldestKept := time.Now().Add(-1 * a.config().ExecRunTimeout) + oldestKept := time.Now().Add(-1 * a.config().ExecutionRunTimeout) a.runIdLock.Lock() defer a.runIdLock.Unlock() for id, entry := range a.runs { @@ -99,7 +99,7 @@ func (a *ExecServerAPI) removeOldRuns(ctx context.Context) time.Duration { delete(a.runs, id) } } - return a.config().ExecRunTimeout / 5 + return a.config().ExecutionRunTimeout / 5 } func (a *ExecServerAPI) Start(ctx_in context.Context) { diff --git a/validator/server_arb/machine.go b/validator/server_arb/machine.go index 3101362782..e59659c7af 100644 --- a/validator/server_arb/machine.go +++ b/validator/server_arb/machine.go @@ -7,7 +7,7 @@ package server_arb #cgo CFLAGS: -g -Wall -I../../target/include/ #include "arbitrator.h" -ResolvedPreimage preimageResolverC(size_t context, const uint8_t* hash); +ResolvedPreimage preimageResolverC(size_t context, uint8_t preimageType, const uint8_t* hash); */ import "C" import ( @@ -21,6 +21,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/validator" ) @@ -49,7 +51,7 @@ type ArbitratorMachine struct { // Assert that ArbitratorMachine implements MachineInterface var _ MachineInterface = (*ArbitratorMachine)(nil) -var preimageResolvers sync.Map +var preimageResolvers containers.SyncMap[int64, GoPreimageResolver] var lastPreimageResolverId int64 // atomic // Any future calls to this machine will result in a panic @@ -335,10 +337,10 @@ func (m *ArbitratorMachine) AddDelayedInboxMessage(index uint64, data []byte) er } } -type GoPreimageResolver = func(common.Hash) ([]byte, error) +type GoPreimageResolver = func(arbutil.PreimageType, common.Hash) ([]byte, error) //export preimageResolver -func preimageResolver(context C.size_t, ptr unsafe.Pointer) C.ResolvedPreimage { +func preimageResolver(context C.size_t, ty C.uint8_t, ptr unsafe.Pointer) C.ResolvedPreimage { var hash common.Hash input := (*[1 << 30]byte)(ptr)[:32] copy(hash[:], input) @@ -348,14 +350,7 @@ func preimageResolver(context C.size_t, ptr unsafe.Pointer) C.ResolvedPreimage { len: -1, } } - resolverFunc, ok := resolver.(GoPreimageResolver) - if !ok { - log.Warn("preimage resolver has wrong type") - return C.ResolvedPreimage{ - len: -1, - } - } - preimage, err := resolverFunc(hash) + preimage, err := resolver(arbutil.PreimageType(ty), hash) if err != nil { log.Error("preimage resolution failed", "err", err) return C.ResolvedPreimage{ diff --git a/validator/server_arb/preimage_resolver.go b/validator/server_arb/preimage_resolver.go index 24c040c716..cd4ea40e28 100644 --- a/validator/server_arb/preimage_resolver.go +++ b/validator/server_arb/preimage_resolver.go @@ -7,10 +7,10 @@ package server_arb #cgo CFLAGS: -g -Wall -I../../target/include/ #include "arbitrator.h" -extern ResolvedPreimage preimageResolver(size_t context, const uint8_t* hash); +extern ResolvedPreimage preimageResolver(size_t context, uint8_t preimageType, const uint8_t* hash); -ResolvedPreimage preimageResolverC(size_t context, const uint8_t* hash) { - return preimageResolver(context, hash); +ResolvedPreimage preimageResolverC(size_t context, uint8_t preimageType, const uint8_t* hash) { + return preimageResolver(context, preimageType, hash); } */ import "C" diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index f9d0705f59..ab04942871 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -13,6 +13,7 @@ import ( flag "github.com/spf13/pflag" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/offchainlabs/nitro/validator" @@ -23,24 +24,24 @@ import ( ) type ArbitratorSpawnerConfig struct { - Workers int `koanf:"workers" reload:"hot"` - OutputPath string `koanf:"output-path" reload:"hot"` - Execution MachineCacheConfig `koanf:"execution" reload:"hot"` // hot reloading for new executions only - ExecRunTimeout time.Duration `koanf:"execution-run-timeout" reload:"hot"` + Workers int `koanf:"workers" reload:"hot"` + OutputPath string `koanf:"output-path" reload:"hot"` + Execution MachineCacheConfig `koanf:"execution" reload:"hot"` // hot reloading for new executions only + ExecutionRunTimeout time.Duration `koanf:"execution-run-timeout" reload:"hot"` } type ArbitratorSpawnerConfigFecher func() *ArbitratorSpawnerConfig var DefaultArbitratorSpawnerConfig = ArbitratorSpawnerConfig{ - Workers: 0, - OutputPath: "./target/output", - Execution: DefaultMachineCacheConfig, - ExecRunTimeout: time.Minute * 15, + Workers: 0, + OutputPath: "./target/output", + Execution: DefaultMachineCacheConfig, + ExecutionRunTimeout: time.Minute * 15, } func ArbitratorSpawnerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".workers", DefaultArbitratorSpawnerConfig.Workers, "number of concurrent validation threads") - f.Duration(prefix+".execution-run-timeout", DefaultArbitratorSpawnerConfig.ExecRunTimeout, "timeout before discarding execution run") + f.Duration(prefix+".execution-run-timeout", DefaultArbitratorSpawnerConfig.ExecutionRunTimeout, "timeout before discarding execution run") f.String(prefix+".output-path", DefaultArbitratorSpawnerConfig.OutputPath, "path to write machines to") MachineCacheConfigConfigAddOptions(prefix+".execution", f) } @@ -81,9 +82,9 @@ func (s *ArbitratorSpawner) Name() string { } func (v *ArbitratorSpawner) loadEntryToMachine(ctx context.Context, entry *validator.ValidationInput, mach *ArbitratorMachine) error { - resolver := func(hash common.Hash) ([]byte, error) { + resolver := func(ty arbutil.PreimageType, hash common.Hash) ([]byte, error) { // Check if it's a known preimage - if preimage, ok := entry.Preimages[hash]; ok { + if preimage, ok := entry.Preimages[ty][hash]; ok { return preimage, nil } return nil, errors.New("preimage not found") @@ -242,19 +243,25 @@ func (v *ArbitratorSpawner) writeToFile(ctx context.Context, input *validator.Va return err } defer preimageFile.Close() - for _, data := range input.Preimages { - if ctx.Err() != nil { - return ctx.Err() - } - lenbytes := make([]byte, 8) - binary.LittleEndian.PutUint64(lenbytes, uint64(len(data))) - _, err := preimageFile.Write(lenbytes) + for ty, preimages := range input.Preimages { + _, err = preimageFile.Write([]byte{byte(ty)}) if err != nil { return err } - _, err = preimageFile.Write(data) - if err != nil { - return err + for _, data := range preimages { + if ctx.Err() != nil { + return ctx.Err() + } + lenbytes := make([]byte, 8) + binary.LittleEndian.PutUint64(lenbytes, uint64(len(data))) + _, err := preimageFile.Write(lenbytes) + if err != nil { + return err + } + _, err = preimageFile.Write(data) + if err != nil { + return err + } } } diff --git a/validator/server_jit/jit_machine.go b/validator/server_jit/jit_machine.go index 394dae9c9d..f763ce3ea0 100644 --- a/validator/server_jit/jit_machine.go +++ b/validator/server_jit/jit_machine.go @@ -59,10 +59,8 @@ func (machine *JitMachine) close() { } } -type GoPreimageResolver = func(common.Hash) ([]byte, error) - func (machine *JitMachine) prove( - ctxIn context.Context, entry *validator.ValidationInput, resolver GoPreimageResolver, + ctxIn context.Context, entry *validator.ValidationInput, ) (validator.GoGlobalState, error) { ctx, cancel := context.WithCancel(ctxIn) defer cancel() // ensure our cleanup functions run when we're done @@ -144,7 +142,6 @@ func (machine *JitMachine) prove( const successByte = 0x0 const failureByte = 0x1 - const preimageByte = 0x2 const anotherByte = 0x3 const readyByte = 0x4 @@ -185,17 +182,25 @@ func (machine *JitMachine) prove( } // send known preimages - knownPreimages := entry.Preimages - if err := writeUint64(uint64(len(knownPreimages))); err != nil { + preimageTypes := entry.Preimages + if err := writeUint64(uint64(len(preimageTypes))); err != nil { return state, err } - for hash, preimage := range knownPreimages { - if err := writeExact(hash[:]); err != nil { + for ty, preimages := range preimageTypes { + if err := writeUint8(uint8(ty)); err != nil { return state, err } - if err := writeBytes(preimage); err != nil { + if err := writeUint64(uint64(len(preimages))); err != nil { return state, err } + for hash, preimage := range preimages { + if err := writeExact(hash[:]); err != nil { + return state, err + } + if err := writeBytes(preimage); err != nil { + return state, err + } + } } // signal that we are done sending global state @@ -232,28 +237,6 @@ func (machine *JitMachine) prove( return state, err } switch kind[0] { - case preimageByte: - hash, err := readHash() - if err != nil { - return state, err - } - preimage, err := resolver(hash) - if err != nil { - log.Error("Failed to resolve preimage for jit", "hash", hash) - if err := writeUint8(failureByte); err != nil { - return state, err - } - continue - } - - // send the preimage - if err := writeUint8(successByte); err != nil { - return state, err - } - if err := writeBytes(preimage); err != nil { - return state, err - } - case failureByte: length, err := readUint64() if err != nil { diff --git a/validator/server_jit/spawner.go b/validator/server_jit/spawner.go index 6de006b182..ff1749506a 100644 --- a/validator/server_jit/spawner.go +++ b/validator/server_jit/spawner.go @@ -2,7 +2,6 @@ package server_jit import ( "context" - "errors" "fmt" "runtime" "sync/atomic" @@ -70,14 +69,7 @@ func (v *JitSpawner) execute( return validator.GoGlobalState{}, fmt.Errorf("unabled to get WASM machine: %w", err) } - resolver := func(hash common.Hash) ([]byte, error) { - // Check if it's a known preimage - if preimage, ok := entry.Preimages[hash]; ok { - return preimage, nil - } - return nil, errors.New("preimage not found") - } - state, err := machine.prove(ctx, entry, resolver) + state, err := machine.prove(ctx, entry) return state, err } diff --git a/validator/validation_entry.go b/validator/validation_entry.go index 7f3fb2d577..fed1940f1f 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -2,6 +2,7 @@ package validator import ( "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/arbutil" ) type BatchInfo struct { @@ -13,7 +14,7 @@ type ValidationInput struct { Id uint64 HasDelayedMsg bool DelayedMsgNr uint64 - Preimages map[common.Hash][]byte + Preimages map[arbutil.PreimageType]map[common.Hash][]byte BatchInfo []BatchInfo DelayedMsg []byte StartState GoGlobalState diff --git a/validator/valnode/valnode.go b/validator/valnode/valnode.go index ea887d3c39..ea9980e547 100644 --- a/validator/valnode/valnode.go +++ b/validator/valnode/valnode.go @@ -71,6 +71,19 @@ type ValidationNode struct { jitSpawner *server_jit.JitSpawner } +func EnsureValidationExposedViaAuthRPC(stackConf *node.Config) { + found := false + for _, module := range stackConf.AuthModules { + if module == server_api.Namespace { + found = true + break + } + } + if !found { + stackConf.AuthModules = append(stackConf.AuthModules, server_api.Namespace) + } +} + func CreateValidationNode(configFetcher ValidationConfigFetcher, stack *node.Node, fatalErrChan chan error) (*ValidationNode, error) { config := configFetcher() locator, err := server_common.NewMachineLocator(config.Wasm.RootPath) diff --git a/wavmio/higher.go b/wavmio/higher.go index 35aca061d2..81fa4a5e3e 100644 --- a/wavmio/higher.go +++ b/wavmio/higher.go @@ -6,7 +6,10 @@ package wavmio -import "github.com/ethereum/go-ethereum/common" +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/arbutil" +) const INITIAL_CAPACITY = 128 const QUERY_SIZE = 32 @@ -61,9 +64,9 @@ func AdvanceInboxMessage() { setGlobalStateU64(IDX_INBOX_POSITION, pos+1) } -func ResolvePreImage(hash common.Hash) ([]byte, error) { +func ResolveTypedPreimage(ty arbutil.PreimageType, hash common.Hash) ([]byte, error) { return readBuffer(func(offset uint32, buf []byte) uint32 { - return resolvePreImage(hash[:], offset, buf) + return resolveTypedPreimage(uint8(ty), hash[:], offset, buf) }), nil } diff --git a/wavmio/raw.go b/wavmio/raw.go index 37b9961a70..f0462cbbe3 100644 --- a/wavmio/raw.go +++ b/wavmio/raw.go @@ -12,4 +12,4 @@ func getGlobalStateU64(idx uint64) uint64 func setGlobalStateU64(idx uint64, val uint64) func readInboxMessage(msgNum uint64, offset uint32, output []byte) uint32 func readDelayedInboxMessage(seqNum uint64, offset uint32, output []byte) uint32 -func resolvePreImage(hash []byte, offset uint32, output []byte) uint32 +func resolveTypedPreimage(ty uint8, hash []byte, offset uint32, output []byte) uint32 diff --git a/wavmio/raw.s b/wavmio/raw.s index ded91c7ec2..7347d13394 100644 --- a/wavmio/raw.s +++ b/wavmio/raw.s @@ -30,6 +30,6 @@ TEXT ·readDelayedInboxMessage(SB), NOSPLIT, $0 CallImport RET -TEXT ·resolvePreImage(SB), NOSPLIT, $0 +TEXT ·resolveTypedPreimage(SB), NOSPLIT, $0 CallImport RET diff --git a/wavmio/stub.go b/wavmio/stub.go index 05698429f5..0893f35250 100644 --- a/wavmio/stub.go +++ b/wavmio/stub.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbutil" ) // source for arrayFlags: https://stackoverflow.com/questions/28322997/how-to-get-a-list-of-values-into-a-flag-in-golang @@ -134,7 +135,7 @@ func AdvanceInboxMessage() { seqAdvanced++ } -func ResolvePreImage(hash common.Hash) ([]byte, error) { +func ResolveTypedPreimage(ty arbutil.PreimageType, hash common.Hash) ([]byte, error) { val, ok := preimages[hash] if !ok { return []byte{}, errors.New("preimage not found") diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index 913eae81f3..d51b368400 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -23,6 +23,7 @@ import ( flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/arbutil" ) @@ -32,6 +33,8 @@ var ( HTTPHeaderFeedClientVersion = textproto.CanonicalMIMEHeaderKey("Arbitrum-Feed-Client-Version") HTTPHeaderRequestedSequenceNumber = textproto.CanonicalMIMEHeaderKey("Arbitrum-Requested-Sequence-Number") HTTPHeaderChainId = textproto.CanonicalMIMEHeaderKey("Arbitrum-Chain-Id") + upgradeToWSTimer = metrics.NewRegisteredTimer("arb/feed/clients/upgrade/duration", nil) + startWithHeaderTimer = metrics.NewRegisteredTimer("arb/feed/clients/start/duration", nil) ) const ( @@ -60,6 +63,7 @@ type BroadcasterConfig struct { EnableCompression bool `koanf:"enable-compression" reload:"hot"` // if reloaded to false will cause disconnection of clients with enabled compression on next broadcast RequireCompression bool `koanf:"require-compression" reload:"hot"` // if reloaded to true will cause disconnection of clients with disabled compression on next broadcast LimitCatchup bool `koanf:"limit-catchup" reload:"hot"` + MaxCatchup int `koanf:"max-catchup" reload:"hot"` ConnectionLimits ConnectionLimiterConfig `koanf:"connection-limits" reload:"hot"` ClientDelay time.Duration `koanf:"client-delay" reload:"hot"` } @@ -83,7 +87,7 @@ func BroadcasterConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".port", DefaultBroadcasterConfig.Port, "port to bind the relay feed output to") f.Duration(prefix+".ping", DefaultBroadcasterConfig.Ping, "duration for ping interval") f.Duration(prefix+".client-timeout", DefaultBroadcasterConfig.ClientTimeout, "duration to wait before timing out connections to client") - f.Int(prefix+".queue", DefaultBroadcasterConfig.Queue, "queue size") + f.Int(prefix+".queue", DefaultBroadcasterConfig.Queue, "queue size for HTTP to WS upgrade") f.Int(prefix+".workers", DefaultBroadcasterConfig.Workers, "number of threads to reserve for HTTP to WS upgrade") f.Int(prefix+".max-send-queue", DefaultBroadcasterConfig.MaxSendQueue, "maximum number of messages allowed to accumulate before client is disconnected") f.Bool(prefix+".require-version", DefaultBroadcasterConfig.RequireVersion, "don't connect if client version not present") @@ -93,6 +97,7 @@ func BroadcasterConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable-compression", DefaultBroadcasterConfig.EnableCompression, "enable per message deflate compression support") f.Bool(prefix+".require-compression", DefaultBroadcasterConfig.RequireCompression, "require clients to use compression") f.Bool(prefix+".limit-catchup", DefaultBroadcasterConfig.LimitCatchup, "only supply catchup buffer if requested sequence number is reasonable") + f.Int(prefix+".max-catchup", DefaultBroadcasterConfig.MaxCatchup, "the maximum size of the catchup buffer (-1 means unlimited)") ConnectionLimiterConfigAddOptions(prefix+".connection-limits", f) f.Duration(prefix+".client-delay", DefaultBroadcasterConfig.ClientDelay, "delay the first messages sent to each client by this amount") } @@ -117,6 +122,7 @@ var DefaultBroadcasterConfig = BroadcasterConfig{ EnableCompression: true, RequireCompression: false, LimitCatchup: false, + MaxCatchup: -1, ConnectionLimits: DefaultConnectionLimiterConfig, ClientDelay: 0, } @@ -141,6 +147,7 @@ var DefaultTestBroadcasterConfig = BroadcasterConfig{ EnableCompression: true, RequireCompression: false, LimitCatchup: false, + MaxCatchup: -1, ConnectionLimits: DefaultConnectionLimiterConfig, ClientDelay: 0, } @@ -197,7 +204,11 @@ func (s *WSBroadcastServer) Start(ctx context.Context) error { HTTPHeaderChainId: []string{strconv.FormatUint(s.chainId, 10)}, }) - return s.StartWithHeader(ctx, header) + startTime := time.Now() + err := s.StartWithHeader(ctx, header) + elapsed := time.Since(startTime) + startWithHeaderTimer.Update(elapsed) + return err } func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.HandshakeHeader) error { @@ -312,7 +323,10 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands } // Zero-copy upgrade to WebSocket connection. + startTime := time.Now() _, err = upgrader.Upgrade(conn) + elapsed := time.Since(startTime) + upgradeToWSTimer.Update(elapsed) if err != nil { if err.Error() != "" {