diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 69731435bb..4d97b5bfd5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - test-mode: [defaults, race, challenge, stylus] + test-mode: [defaults, race, challenge, stylus, long] steps: - name: Checkout @@ -167,8 +167,14 @@ jobs: if: matrix.test-mode == 'stylus' run: | packages=`go list ./...` - stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -parallel=8 -tags=stylustest -run=TestProgramArbitrator > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") + stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -parallel=8 -tags=stylustest -run="TestProgramArbitrator" > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") + - name: run long stylus tests + if: matrix.test-mode == 'long' + run: | + packages=`go list ./...` + stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -parallel=8 -tags=stylustest -run="TestProgramLong" > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") + - name: Archive detailed run log uses: actions/upload-artifact@v3 with: diff --git a/arbitrator/arbutil/src/evm/api.rs b/arbitrator/arbutil/src/evm/api.rs index f84f92ad96..093e7f2984 100644 --- a/arbitrator/arbutil/src/evm/api.rs +++ b/arbitrator/arbutil/src/evm/api.rs @@ -153,7 +153,7 @@ pub trait EvmApi: Send + 'static { ) -> (eyre::Result, u32, u64); /// Returns the EVM return data. - /// Analogous to `vm.RETURNDATA`. + /// Analogous to `vm.RETURNDATACOPY`. fn get_return_data(&self) -> D; /// Emits an EVM log with the given number of topics and data, the first bytes of which should be the topic data. diff --git a/arbitrator/prover/src/binary.rs b/arbitrator/prover/src/binary.rs index 18f9ecec09..f6c3e9fe8f 100644 --- a/arbitrator/prover/src/binary.rs +++ b/arbitrator/prover/src/binary.rs @@ -627,9 +627,9 @@ impl<'a> WasmBinary<'a> { ink_left: ink_left.as_u32(), ink_status: ink_status.as_u32(), depth_left: depth_left.as_u32(), - init_cost: init.try_into()?, - cached_init_cost: cached_init.try_into()?, - asm_estimate: asm_estimate.try_into()?, + init_cost: init.try_into().wrap_err("init cost too high")?, + cached_init_cost: cached_init.try_into().wrap_err("cached cost too high")?, + asm_estimate: asm_estimate.try_into().wrap_err("asm estimate too large")?, footprint, user_main, }) diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index fd7e22e1b2..5466c7f790 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -1729,7 +1729,7 @@ impl Machine { pub fn jump_into_func(&mut self, module: u32, func: u32, mut args: Vec) -> Result<()> { let Some(source_module) = self.modules.get(module as usize) else { - bail!("no module at offest {}", module.red()) + bail!("no module at offset {}", module.red()) }; let Some(source_func) = source_module.funcs.get(func as usize) else { bail!( diff --git a/arbitrator/stylus/tests/multicall/Cargo.lock b/arbitrator/stylus/tests/multicall/Cargo.lock index 67b375d746..ca70689bf7 100644 --- a/arbitrator/stylus/tests/multicall/Cargo.lock +++ b/arbitrator/stylus/tests/multicall/Cargo.lock @@ -17,16 +17,29 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e416903084d3392ebd32d94735c395d6709415b76c7728e594d3f996f2b03e65" dependencies = [ + "alloy-rlp", "bytes", - "cfg-if", + "cfg-if 1.0.0", "const-hex", "derive_more", "hex-literal", "itoa", + "proptest", "ruint", + "serde", "tiny-keccak", ] +[[package]] +name = "alloy-rlp" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +dependencies = [ + "arrayvec", + "bytes", +] + [[package]] name = "alloy-sol-macro" version = "0.3.1" @@ -51,20 +64,48 @@ dependencies = [ "alloy-primitives", "alloy-sol-macro", "const-hex", + "serde", ] +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" + [[package]] name = "block-buffer" version = "0.10.4" @@ -86,6 +127,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + [[package]] name = "cfg-if" version = "1.0.0" @@ -98,7 +145,7 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "268f52aae268980d03dd9544c1ea591965b2735b038d6998d6e4ab37c8c24445" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "hex", "serde", @@ -184,6 +231,28 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "fastrand" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "generic-array" version = "0.14.7" @@ -194,6 +263,17 @@ dependencies = [ "version_check", ] +[[package]] +name = "getrandom" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi", +] + [[package]] name = "heck" version = "0.4.1" @@ -241,9 +321,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libm" @@ -251,18 +331,44 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +[[package]] +name = "linux-raw-sys" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" + [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +[[package]] +name = "memory_units" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" + +[[package]] +name = "mini-alloc" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9993556d3850cdbd0da06a3dc81297edcfa050048952d84d75e8b944e8f5af" +dependencies = [ + "cfg-if 1.0.0", + "wee_alloc", +] + [[package]] name = "multicall" version = "0.1.0" dependencies = [ + "alloy-primitives", + "alloy-sol-types", "hex", + "mini-alloc", "stylus-sdk", + "wee_alloc", ] [[package]] @@ -296,15 +402,26 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" dependencies = [ - "bitflags", + "bit-set", + "bitflags 1.3.2", "byteorder", + "lazy_static", "num-traits", "rand", "rand_chacha", "rand_xorshift", + "regex-syntax 0.6.29", + "rusty-fork", + "tempfile", "unarray", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.29" @@ -320,6 +437,8 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ + "libc", + "rand_chacha", "rand_core", ] @@ -338,6 +457,9 @@ name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] [[package]] name = "rand_xorshift" @@ -357,7 +479,7 @@ dependencies = [ "aho-corasick", "memchr", "regex-automata", - "regex-syntax", + "regex-syntax 0.7.4", ] [[package]] @@ -368,9 +490,15 @@ checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.4", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.7.4" @@ -406,6 +534,31 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.38.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +dependencies = [ + "bitflags 2.5.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "semver" version = "1.0.17" @@ -417,6 +570,20 @@ name = "serde" version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.171" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] [[package]] name = "sha3" @@ -434,7 +601,7 @@ version = "0.4.2" dependencies = [ "alloy-primitives", "alloy-sol-types", - "cfg-if", + "cfg-if 1.0.0", "convert_case 0.6.0", "lazy_static", "proc-macro2", @@ -451,7 +618,7 @@ version = "0.4.2" dependencies = [ "alloy-primitives", "alloy-sol-types", - "cfg-if", + "cfg-if 1.0.0", "derivative", "hex", "keccak-const", @@ -492,6 +659,18 @@ dependencies = [ "syn 2.0.25", ] +[[package]] +name = "tempfile" +version = "3.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +dependencies = [ + "cfg-if 1.0.0", + "fastrand", + "rustix", + "windows-sys", +] + [[package]] name = "tiny-keccak" version = "2.0.2" @@ -537,6 +716,121 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wee_alloc" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "memory_units", + "winapi", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" + [[package]] name = "zeroize" version = "1.6.0" diff --git a/arbitrator/stylus/tests/multicall/Cargo.toml b/arbitrator/stylus/tests/multicall/Cargo.toml index 2ab5728d12..3bc48c6826 100644 --- a/arbitrator/stylus/tests/multicall/Cargo.toml +++ b/arbitrator/stylus/tests/multicall/Cargo.toml @@ -4,8 +4,12 @@ version = "0.1.0" edition = "2021" [dependencies] +alloy-primitives = "0.3.1" +alloy-sol-types = "0.3.1" +mini-alloc = "0.4.2" stylus-sdk = { path = "../../../langs/rust/stylus-sdk", features = ["reentrant"] } hex = "0.4.3" +wee_alloc = "0.4.5" [profile.release] codegen-units = 1 diff --git a/arbitrator/stylus/tests/multicall/src/main.rs b/arbitrator/stylus/tests/multicall/src/main.rs index 1f255cd99c..fd6929b8f1 100644 --- a/arbitrator/stylus/tests/multicall/src/main.rs +++ b/arbitrator/stylus/tests/multicall/src/main.rs @@ -3,13 +3,28 @@ #![no_main] +extern crate alloc; + use stylus_sdk::{ + storage::{StorageCache, GlobalStorage}, alloy_primitives::{Address, B256}, + alloy_sol_types::sol, call::RawCall, console, + evm, prelude::*, }; +use wee_alloc::WeeAlloc; + +#[global_allocator] +static ALLOC: WeeAlloc = WeeAlloc::INIT; + +sol!{ + event Called(address addr, uint8 count, bool success, bytes return_data); + event Storage(bytes32 slot, bytes32 data, bool write); +} + #[entrypoint] fn user_main(input: Vec) -> Result, Vec> { let mut input = input.as_slice(); @@ -19,7 +34,7 @@ fn user_main(input: Vec) -> Result, Vec> { // combined output of all calls let mut output = vec![]; - console!("Calling {count} contract(s)"); + console!("Performing {count} action(s)"); for _ in 0..count { let length = u32::from_be_bytes(input[..4].try_into().unwrap()) as usize; input = &input[4..]; @@ -30,35 +45,75 @@ fn user_main(input: Vec) -> Result, Vec> { let kind = curr[0]; curr = &curr[1..]; - let mut value = None; - if kind == 0 { - value = Some(B256::try_from(&curr[..32]).unwrap()); - curr = &curr[32..]; - } + if kind & 0xf0 == 0 { + // caller + let mut value = None; + if kind & 0x3 == 0 { + value = Some(B256::try_from(&curr[..32]).unwrap()); + curr = &curr[32..]; + }; - let addr = Address::try_from(&curr[..20]).unwrap(); - let data = &curr[20..]; - match value { - Some(value) if !value.is_zero() => console!( - "Calling {addr} with {} bytes and value {} {kind}", - data.len(), - hex::encode(value) - ), - _ => console!("Calling {addr} with {} bytes {kind}", curr.len()), - } + let addr = Address::try_from(&curr[..20]).unwrap(); + let data = &curr[20..]; + match value { + Some(value) if !value.is_zero() => console!( + "Calling {addr} with {} bytes and value {} {kind}", + data.len(), + hex::encode(value) + ), + _ => console!("Calling {addr} with {} bytes {kind}", curr.len()), + } - let raw_call = match kind { - 0 => RawCall::new_with_value(value.unwrap_or_default().into()), - 1 => RawCall::new_delegate(), - 2 => RawCall::new_static(), - x => panic!("unknown call kind {x}"), - }; - let return_data = unsafe { raw_call.call(addr, data)? }; - - if !return_data.is_empty() { - console!("Contract {addr} returned {} bytes", return_data.len()); + let raw_call = match kind & 0x3 { + 0 => RawCall::new_with_value(value.unwrap_or_default().into()), + 1 => RawCall::new_delegate(), + 2 => RawCall::new_static(), + x => panic!("unknown call kind {x}"), + }; + let (success, return_data) = match unsafe { raw_call.call(addr, data) } { + Ok(return_data) => (true, return_data), + Err(revert_data) => { + if kind & 0x4 == 0 { + return Err(revert_data) + } + (false, vec![]) + }, + }; + + if !return_data.is_empty() { + console!("Contract {addr} returned {} bytes", return_data.len()); + } + if kind & 0x8 != 0 { + evm::log(Called { addr, count, success, return_data: return_data.clone() }) + } + output.extend(return_data); + } else if kind & 0xf0 == 0x10 { + // storage + let slot = B256::try_from(&curr[..32]).unwrap(); + curr = &curr[32..]; + let data; + let write; + if kind & 0x7 == 0 { + console!("writing slot {}", curr.len()); + data = B256::try_from(&curr[..32]).unwrap(); + write = true; + unsafe { StorageCache::set_word(slot.into(), data.into()) }; + StorageCache::flush(); + } else if kind & 0x7 == 1{ + console!("reading slot"); + write = false; + data = StorageCache::get_word(slot.into()); + output.extend(data.clone()); + } else { + panic!("unknown storage kind {kind}") + } + if kind & 0x8 != 0 { + console!("slot: {}, data: {}, write {write}", slot, data); + evm::log(Storage { slot: slot.into(), data: data.into(), write }) + } + } else { + panic!("unknown action {kind}") } - output.extend(return_data); input = next; } diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 0a9a45cc1e..ee00cdc618 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -38,10 +38,10 @@ import ( "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" - "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" @@ -98,7 +98,7 @@ type BatchPoster struct { bridgeAddr common.Address gasRefunderAddr common.Address building *buildingBatch - daWriter das.DataAvailabilityServiceWriter + dapWriter daprovider.Writer dataPoster *dataposter.DataPoster redisLock *redislock.Simple messagesPerBatch *arbmath.MovingAverage[uint64] @@ -129,7 +129,7 @@ const ( type BatchPosterConfig struct { Enable bool `koanf:"enable"` - DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` + DisableDapFallbackStoreDataOnChain bool `koanf:"disable-dap-fallback-store-data-on-chain" reload:"hot"` // Max batch size. MaxSize int `koanf:"max-size" reload:"hot"` // Maximum 4844 blob enabled batch size. @@ -189,7 +189,7 @@ type BatchPosterConfigFetcher func() *BatchPosterConfig func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") - f.Bool(prefix+".disable-das-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDasFallbackStoreDataOnChain, "If unable to batch to DAS, disable fallback storing data on chain") + f.Bool(prefix+".disable-dap-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDapFallbackStoreDataOnChain, "If unable to batch to DA provider, disable fallback storing data on chain") f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") f.Int(prefix+".max-4844-batch-size", DefaultBatchPosterConfig.Max4844BatchSize, "maximum 4844 blob enabled batch size") f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxDelay, "maximum batch posting delay") @@ -214,7 +214,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, - DisableDasFallbackStoreDataOnChain: false, + DisableDapFallbackStoreDataOnChain: false, // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxSize: 100000, Max4844BatchSize: blobs.BlobEncodableData*(params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) - 2000, @@ -277,7 +277,7 @@ type BatchPosterOpts struct { Config BatchPosterConfigFetcher DeployInfo *chaininfo.RollupAddresses TransactOpts *bind.TransactOpts - DAWriter das.DataAvailabilityServiceWriter + DAPWriter daprovider.Writer ParentChainID *big.Int } @@ -323,7 +323,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e seqInboxAddr: opts.DeployInfo.SequencerInbox, gasRefunderAddr: opts.Config().gasRefunder, bridgeAddr: opts.DeployInfo.Bridge, - daWriter: opts.DAWriter, + dapWriter: opts.DAPWriter, redisLock: redisLock, } b.messagesPerBatch, err = arbmath.NewMovingAverage[uint64](20) @@ -883,7 +883,7 @@ func (s *batchSegments) CloseAndGetBytes() ([]byte, error) { } compressedBytes := s.compressedBuffer.Bytes() fullMsg := make([]byte, 1, len(compressedBytes)+1) - fullMsg[0] = arbstate.BrotliMessageHeaderByte + fullMsg[0] = daprovider.BrotliMessageHeaderByte fullMsg = append(fullMsg, compressedBytes...) return fullMsg, nil } @@ -1063,7 +1063,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } var use4844 bool config := b.config() - if config.Post4844Blobs && b.daWriter == nil && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + if config.Post4844Blobs && b.dapWriter == nil && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { arbOSVersion, err := b.arbOSVersionGetter.ArbOSVersionForMessageNumber(arbutil.MessageIndex(arbmath.SaturatingUSub(uint64(batchPosition.MessageCount), 1))) if err != nil { return false, err @@ -1246,7 +1246,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) return false, nil } - if b.daWriter != nil { + if b.dapWriter != nil { if !b.redisLock.AttemptLock(ctx) { return false, errAttemptLockFailed } @@ -1258,17 +1258,9 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if nonce != gotNonce || !bytes.Equal(batchPositionBytes, gotMeta) { return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce) } - - cert, err := b.daWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}) // b.daWriter will append signature if enabled - if errors.Is(err, das.BatchToDasFailed) { - if config.DisableDasFallbackStoreDataOnChain { - return false, errors.New("unable to batch to DAS and fallback storing data on chain is disabled") - } - log.Warn("Falling back to storing data on chain", "err", err) - } else if err != nil { + sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}, config.DisableDapFallbackStoreDataOnChain) + if err != nil { return false, err - } else { - sequencerMsg = das.Serialize(cert) } } diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 614711249b..7bc18a2121 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -592,7 +592,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u targetBlobCost := arbmath.BigMulByUint(newBlobFeeCap, blobGasUsed) targetNonBlobCost := arbmath.BigSub(targetMaxCost, targetBlobCost) newBaseFeeCap := arbmath.BigDivByUint(targetNonBlobCost, gasLimit) - if lastTx != nil && numBlobs > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { + if lastTx != nil && numBlobs > 0 && lastTx.GasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { // Increase the non-blob fee cap to the minimum rbf increase newBaseFeeCap = arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease) newNonBlobCost := arbmath.BigMulByUint(newBaseFeeCap, gasLimit) @@ -665,6 +665,14 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u return lastTx.GasFeeCap(), lastTx.GasTipCap(), lastTx.BlobGasFeeCap(), nil } + // Ensure we bid at least 1 wei to prevent division by zero + if newBaseFeeCap.Sign() == 0 { + newBaseFeeCap = big.NewInt(1) + } + if newBlobFeeCap.Sign() == 0 { + newBlobFeeCap = big.NewInt(1) + } + return newBaseFeeCap, newTipCap, newBlobFeeCap, nil } @@ -844,21 +852,25 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti if err != nil { return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } - if precedingTx != nil && // precedingTx == nil -> the actual preceding tx was already confirmed - (precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent) { - latestBlockNumber, err := p.client.BlockNumber(ctx) - if err != nil { - return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) - } - prevBlockNumber := arbmath.SaturatingUSub(latestBlockNumber, 1) - reorgResistantNonce, err := p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) - if err != nil { - return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) - } + if precedingTx != nil { // precedingTx == nil -> the actual preceding tx was already confirmed + var latestBlockNumber, prevBlockNumber, reorgResistantNonce uint64 + if precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent { + latestBlockNumber, err = p.client.BlockNumber(ctx) + if err != nil { + return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) + } + prevBlockNumber = arbmath.SaturatingUSub(latestBlockNumber, 1) + reorgResistantNonce, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) + if err != nil { + return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) + } - if precedingTx.FullTx.Nonce() > reorgResistantNonce { - log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) - return nil + if precedingTx.FullTx.Nonce() > reorgResistantNonce { + log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) + return nil + } + } else { + log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantNonce", reorgResistantNonce) } } } @@ -930,8 +942,8 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa } newTx := *prevTx - if arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease || - (prevTx.FullTx.BlobGasFeeCap() != nil && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { + if (prevTx.FullTx.GasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease) || + (prevTx.FullTx.BlobGasFeeCap() != nil && prevTx.FullTx.BlobGasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { log.Debug( "no need to replace by fee transaction", "nonce", prevTx.FullTx.Nonce(), diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index beb2656e2b..9ad984ae6c 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -19,7 +19,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { defer cancel() exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) - tracker, err := NewInboxTracker(db, streamer, nil, nil) + tracker, err := NewInboxTracker(db, streamer, nil) Require(t, err) err = streamer.Start(ctx) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index b758e95e62..ba1b875ec8 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -20,6 +20,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster" m "github.com/offchainlabs/nitro/broadcaster/message" @@ -37,23 +38,17 @@ type InboxTracker struct { txStreamer *TransactionStreamer mutex sync.Mutex validator *staker.BlockValidator - das arbstate.DataAvailabilityReader - blobReader arbstate.BlobReader + dapReaders []daprovider.Reader batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader, blobReader arbstate.BlobReader) (*InboxTracker, error) { - // We support a nil txStreamer for the pruning code - if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && das == nil { - return nil, errors.New("data availability service required but unconfigured") - } +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, dapReaders []daprovider.Reader) (*InboxTracker, error) { tracker := &InboxTracker{ db: db, txStreamer: txStreamer, - das: das, - blobReader: blobReader, + dapReaders: dapReaders, batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000), } return tracker, nil @@ -302,7 +297,14 @@ func (t *InboxTracker) PopulateFeedBacklog(broadcastServer *broadcaster.Broadcas if err != nil { return fmt.Errorf("error getting message %v: %w", seqNum, err) } - feedMessage, err := broadcastServer.NewBroadcastFeedMessage(*message, seqNum) + + msgResult, err := t.txStreamer.ResultAtCount(seqNum) + var blockHash *common.Hash + if err == nil { + blockHash = &msgResult.BlockHash + } + + feedMessage, err := broadcastServer.NewBroadcastFeedMessage(*message, seqNum, blockHash) if err != nil { return fmt.Errorf("error creating broadcast feed message %v: %w", seqNum, err) } @@ -659,14 +661,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - var daProviders []arbstate.DataAvailabilityProvider - if t.das != nil { - daProviders = append(daProviders, arbstate.NewDAProviderDAS(t.das)) - } - if t.blobReader != nil { - daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(t.blobReader)) - } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, arbstate.KeysetValidate) + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.dapReaders, daprovider.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index 43a05155fe..34d8952c3c 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -27,7 +27,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/resourcemanager" "github.com/offchainlabs/nitro/arbos/arbostypes" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcastclients" @@ -254,7 +254,7 @@ type Node struct { L1Reader *headerreader.HeaderReader TxStreamer *TransactionStreamer DeployInfo *chaininfo.RollupAddresses - BlobReader arbstate.BlobReader + BlobReader daprovider.BlobReader InboxReader *InboxReader InboxTracker *InboxTracker DelayedSequencer *DelayedSequencer @@ -372,7 +372,7 @@ func createNodeImpl( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, - blobReader arbstate.BlobReader, + blobReader daprovider.BlobReader, ) (*Node, error) { config := configFetcher.Get() @@ -529,7 +529,18 @@ func createNodeImpl( return nil, errors.New("a data availability service is required for this chain, but it was not configured") } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) + // We support a nil txStreamer for the pruning code + if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && daReader == nil { + return nil, errors.New("data availability service required but unconfigured") + } + var dapReaders []daprovider.Reader + if daReader != nil { + dapReaders = append(dapReaders, daprovider.NewReaderForDAS(daReader)) + } + if blobReader != nil { + dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader)) + } + inboxTracker, err := NewInboxTracker(arbDb, txStreamer, dapReaders) if err != nil { return nil, err } @@ -547,8 +558,7 @@ func createNodeImpl( txStreamer, exec, rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), - daReader, - blobReader, + dapReaders, func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator }, stack, ) @@ -655,6 +665,10 @@ func createNodeImpl( if txOptsBatchPoster == nil && config.BatchPoster.DataPoster.ExternalSigner.URL == "" { return nil, errors.New("batchposter, but no TxOpts") } + var dapWriter daprovider.Writer + if daWriter != nil { + dapWriter = daprovider.NewWriterForDAS(daWriter) + } batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{ DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix), L1Reader: l1Reader, @@ -665,7 +679,7 @@ func createNodeImpl( Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, DeployInfo: deployInfo, TransactOpts: txOptsBatchPoster, - DAWriter: daWriter, + DAPWriter: dapWriter, ParentChainID: parentChainID, }) if err != nil { @@ -725,7 +739,7 @@ func CreateNode( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, - blobReader arbstate.BlobReader, + blobReader daprovider.BlobReader, ) (*Node, error) { currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader) if err != nil { @@ -997,8 +1011,8 @@ func (n *Node) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, return n.InboxReader.GetFinalizedMsgCount(ctx) } -func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { - return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta) +func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, msgResult execution.MessageResult) error { + return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta, msgResult) } func (n *Node) ExpectChosenSequencer() error { diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index edda4e5512..46e1edb78b 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -15,7 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -159,7 +159,7 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut if len(tx.BlobHashes()) == 0 { return nil, fmt.Errorf("blob batch transaction %v has no blobs", tx.Hash()) } - data := []byte{arbstate.BlobHashesHeaderFlag} + data := []byte{daprovider.BlobHashesHeaderFlag} for _, h := range tx.BlobHashes() { data = append(data, h[:]...) } diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 3b8b3927c8..0d5ae829b0 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -460,11 +460,20 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde s.reorgMutex.Lock() defer s.reorgMutex.Unlock() - err = s.exec.Reorg(count, newMessages, oldMessages) + messagesResults, err := s.exec.Reorg(count, newMessages, oldMessages) if err != nil { return err } + messagesWithBlockHash := make([]broadcaster.MessageWithMetadataAndBlockHash, 0, len(messagesResults)) + for i := 0; i < len(messagesResults); i++ { + messagesWithBlockHash = append(messagesWithBlockHash, broadcaster.MessageWithMetadataAndBlockHash{ + Message: newMessages[i], + BlockHash: &messagesResults[i].BlockHash, + }) + } + s.broadcastMessages(messagesWithBlockHash, count) + if s.validator != nil { err = s.validator.Reorg(s.GetContext(), count) if err != nil { @@ -970,7 +979,11 @@ func (s *TransactionStreamer) ExpectChosenSequencer() error { return nil } -func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { +func (s *TransactionStreamer) WriteMessageFromSequencer( + pos arbutil.MessageIndex, + msgWithMeta arbostypes.MessageWithMetadata, + msgResult execution.MessageResult, +) error { if err := s.ExpectChosenSequencer(); err != nil { return err } @@ -998,6 +1011,12 @@ func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex return err } + msgWithBlockHash := broadcaster.MessageWithMetadataAndBlockHash{ + Message: msgWithMeta, + BlockHash: &msgResult.BlockHash, + } + s.broadcastMessages([]broadcaster.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) + return nil } @@ -1026,6 +1045,18 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty return batch.Put(key, msgBytes) } +func (s *TransactionStreamer) broadcastMessages( + msgs []broadcaster.MessageWithMetadataAndBlockHash, + pos arbutil.MessageIndex, +) { + if s.broadcastServer == nil { + return + } + if err := s.broadcastServer.BroadcastMessages(msgs, pos); err != nil { + log.Error("failed broadcasting messages", "pos", pos, "err", err) + } +} + // The mutex must be held, and pos must be the latest message count. // `batch` may be nil, which initializes a new batch. The batch is closed out in this function. func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { @@ -1053,12 +1084,6 @@ func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages [ default: } - if s.broadcastServer != nil { - if err := s.broadcastServer.BroadcastMessages(messages, pos); err != nil { - log.Error("failed broadcasting message", "pos", pos, "err", err) - } - } - return nil } @@ -1110,7 +1135,8 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution } msgForPrefetch = msg } - if err = s.exec.DigestMessage(pos, msg, msgForPrefetch); err != nil { + msgResult, err := s.exec.DigestMessage(pos, msg, msgForPrefetch) + if err != nil { logger := log.Warn if prevMessageCount < msgCount { logger = log.Debug @@ -1118,6 +1144,13 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution logger("feedOneMsg failed to send message to execEngine", "err", err, "pos", pos) return false } + + msgWithBlockHash := broadcaster.MessageWithMetadataAndBlockHash{ + Message: *msg, + BlockHash: &msgResult.BlockHash, + } + s.broadcastMessages([]broadcaster.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) + return pos+1 < msgCount } diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 123dda54ce..09989f3380 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -114,6 +114,11 @@ func callProgram( asm := db.GetActivatedAsm(moduleHash) debug := stylusParams.debugMode + if len(asm) == 0 { + log.Error("missing asm", "program", address, "module", moduleHash) + panic("missing asm") + } + if db, ok := db.(*state.StateDB); ok { db.RecordProgram(moduleHash) } diff --git a/arbstate/daprovider/reader.go b/arbstate/daprovider/reader.go new file mode 100644 index 0000000000..560af3af1d --- /dev/null +++ b/arbstate/daprovider/reader.go @@ -0,0 +1,104 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package daprovider + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/util/blobs" +) + +type Reader interface { + // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider + IsValidHeaderByte(headerByte byte) bool + + // RecoverPayloadFromBatch fetches the underlying payload from the DA provider given the batch header information + RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, + ) ([]byte, error) +} + +// NewReaderForDAS is generally meant to be only used by nitro. +// DA Providers should implement methods in the Reader interface independently +func NewReaderForDAS(dasReader DASReader) *readerForDAS { + return &readerForDAS{dasReader: dasReader} +} + +type readerForDAS struct { + dasReader DASReader +} + +func (d *readerForDAS) IsValidHeaderByte(headerByte byte) bool { + return IsDASMessageHeaderByte(headerByte) +} + +func (d *readerForDAS) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, +) ([]byte, error) { + return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.dasReader, preimageRecorder, validateSeqMsg) +} + +// NewReaderForBlobReader is generally meant to be only used by nitro. +// DA Providers should implement methods in the Reader interface independently +func NewReaderForBlobReader(blobReader BlobReader) *readerForBlobReader { + return &readerForBlobReader{blobReader: blobReader} +} + +type readerForBlobReader struct { + blobReader BlobReader +} + +func (b *readerForBlobReader) IsValidHeaderByte(headerByte byte) bool { + return IsBlobHashesHeaderByte(headerByte) +} + +func (b *readerForBlobReader) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, +) ([]byte, error) { + blobHashes := sequencerMsg[41:] + if len(blobHashes)%len(common.Hash{}) != 0 { + return nil, ErrInvalidBlobDataFormat + } + versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) + for i := 0; i*32 < len(blobHashes); i += 1 { + copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) + } + kzgBlobs, err := b.blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) + if err != nil { + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + if preimageRecorder != nil { + for i, blob := range kzgBlobs { + // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable + // Won't be necessary after Go 1.22 with https://go.dev/blog/loopvar-preview + b := blob + preimageRecorder(versionedHashes[i], b[:], arbutil.EthVersionedHashPreimageType) + } + } + payload, err := blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return nil, nil + } + return payload, nil +} diff --git a/arbstate/das_reader.go b/arbstate/daprovider/util.go similarity index 62% rename from arbstate/das_reader.go rename to arbstate/daprovider/util.go index f131a53608..054bde5503 100644 --- a/arbstate/das_reader.go +++ b/arbstate/daprovider/util.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package arbstate +package daprovider import ( "bufio" @@ -13,18 +13,53 @@ import ( "io" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" ) -type DataAvailabilityReader interface { +type DASReader interface { GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) ExpirationPolicy(ctx context.Context) (ExpirationPolicy, error) } -var ErrHashMismatch = errors.New("result does not match expected hash") +type DASWriter interface { + // Store requests that the message be stored until timeout (UTC time in unix epoch seconds). + Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*DataAvailabilityCertificate, error) + fmt.Stringer +} + +type BlobReader interface { + GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, + ) ([]kzg4844.Blob, error) + Initialize(ctx context.Context) error +} + +// PreimageRecorder is used to add (key,value) pair to the map accessed by key = ty of a bigger map, preimages. +// If ty doesn't exist as a key in the preimages map, then it is intialized to map[common.Hash][]byte and then (key,value) pair is added +type PreimageRecorder func(key common.Hash, value []byte, ty arbutil.PreimageType) + +// RecordPreimagesTo takes in preimages map and returns a function that can be used +// In recording (hash,preimage) key value pairs into preimages map, when fetching payload through RecoverPayloadFromBatch +func RecordPreimagesTo(preimages map[arbutil.PreimageType]map[common.Hash][]byte) PreimageRecorder { + if preimages == nil { + return nil + } + return func(key common.Hash, value []byte, ty arbutil.PreimageType) { + if preimages[ty] == nil { + preimages[ty] = make(map[common.Hash][]byte) + } + preimages[ty][key] = value + } +} // DASMessageHeaderFlag indicates that this data is a certificate for the data availability service, // which will retrieve the full batch data. @@ -83,6 +118,114 @@ func IsKnownHeaderByte(b uint8) bool { return b&^KnownHeaderBits == 0 } +const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week +var ( + ErrHashMismatch = errors.New("result does not match expected hash") + ErrBatchToDasFailed = errors.New("unable to batch to DAS") + ErrNoBlobReader = errors.New("blob batch payload was encountered but no BlobReader was configured") + ErrInvalidBlobDataFormat = errors.New("blob batch data is not a list of hashes as expected") + ErrSeqMsgValidation = errors.New("error validating recovered payload from batch") +) + +type KeysetValidationMode uint8 + +const KeysetValidate KeysetValidationMode = 0 +const KeysetPanicIfInvalid KeysetValidationMode = 1 +const KeysetDontValidate KeysetValidationMode = 2 + +func RecoverPayloadFromDasBatch( + ctx context.Context, + batchNum uint64, + sequencerMsg []byte, + dasReader DASReader, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, +) ([]byte, error) { + cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) + if err != nil { + log.Error("Failed to deserialize DAS message", "err", err) + return nil, nil + } + version := cert.Version + + if version >= 2 { + log.Error("Your node software is probably out of date", "certificateVersion", version) + return nil, nil + } + + getByHash := func(ctx context.Context, hash common.Hash) ([]byte, error) { + newHash := hash + if version == 0 { + newHash = dastree.FlatHashToTreeHash(hash) + } + + preimage, err := dasReader.GetByHash(ctx, newHash) + if err != nil && hash != newHash { + log.Debug("error fetching new style hash, trying old", "new", newHash, "old", hash, "err", err) + preimage, err = dasReader.GetByHash(ctx, hash) + } + if err != nil { + return nil, err + } + + switch { + case version == 0 && crypto.Keccak256Hash(preimage) != hash: + fallthrough + case version == 1 && dastree.Hash(preimage) != hash: + log.Error( + "preimage mismatch for hash", + "hash", hash, "err", ErrHashMismatch, "version", version, + ) + return nil, ErrHashMismatch + } + return preimage, nil + } + + keysetPreimage, err := getByHash(ctx, cert.KeysetHash) + if err != nil { + log.Error("Couldn't get keyset", "err", err) + return nil, err + } + if preimageRecorder != nil { + dastree.RecordHash(preimageRecorder, keysetPreimage) + } + + keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage), !validateSeqMsg) + if err != nil { + return nil, fmt.Errorf("%w. Couldn't deserialize keyset, err: %w, keyset hash: %x batch num: %d", ErrSeqMsgValidation, err, cert.KeysetHash, batchNum) + } + err = keyset.VerifySignature(cert.SignersMask, cert.SerializeSignableFields(), cert.Sig) + if err != nil { + log.Error("Bad signature on DAS batch", "err", err) + return nil, nil + } + + maxTimestamp := binary.BigEndian.Uint64(sequencerMsg[8:16]) + if cert.Timeout < maxTimestamp+MinLifetimeSecondsForDataAvailabilityCert { + log.Error("Data availability cert expires too soon", "err", "") + return nil, nil + } + + dataHash := cert.DataHash + payload, err := getByHash(ctx, dataHash) + if err != nil { + log.Error("Couldn't fetch DAS batch contents", "err", err) + return nil, err + } + + if preimageRecorder != nil { + if version == 0 { + treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) + preimageRecorder(dataHash, payload, arbutil.Keccak256PreimageType) + preimageRecorder(crypto.Keccak256Hash(treeLeaf), treeLeaf, arbutil.Keccak256PreimageType) + } else { + dastree.RecordHash(preimageRecorder, payload) + } + } + + return payload, nil +} + type DataAvailabilityCertificate struct { KeysetHash [32]byte DataHash [32]byte @@ -167,7 +310,7 @@ func (c *DataAvailabilityCertificate) SerializeSignableFields() []byte { func (c *DataAvailabilityCertificate) RecoverKeyset( ctx context.Context, - da DataAvailabilityReader, + da DASReader, assumeKeysetValid bool, ) (*DataAvailabilityKeyset, error) { keysetBytes, err := da.GetByHash(ctx, c.KeysetHash) @@ -316,3 +459,22 @@ func StringToExpirationPolicy(s string) (ExpirationPolicy, error) { return -1, fmt.Errorf("invalid Expiration Policy: %s", s) } } + +func Serialize(c *DataAvailabilityCertificate) []byte { + + flags := DASMessageHeaderFlag + if c.Version != 0 { + flags |= TreeDASMessageHeaderFlag + } + + buf := make([]byte, 0) + buf = append(buf, flags) + buf = append(buf, c.KeysetHash[:]...) + buf = append(buf, c.SerializeSignableFields()...) + + var intData [8]byte + binary.BigEndian.PutUint64(intData[:], c.SignersMask) + buf = append(buf, intData[:]...) + + return append(buf, blsSignatures.SignatureToBytes(c.Sig)...) +} diff --git a/arbstate/daprovider/writer.go b/arbstate/daprovider/writer.go new file mode 100644 index 0000000000..75b356c4b8 --- /dev/null +++ b/arbstate/daprovider/writer.go @@ -0,0 +1,48 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package daprovider + +import ( + "context" + "errors" + + "github.com/ethereum/go-ethereum/log" +) + +type Writer interface { + // Store posts the batch data to the invoking DA provider + // And returns sequencerMsg which is later used to retrieve the batch data + Store( + ctx context.Context, + message []byte, + timeout uint64, + sig []byte, + disableFallbackStoreDataOnChain bool, + ) ([]byte, error) +} + +// DAProviderWriterForDAS is generally meant to be only used by nitro. +// DA Providers should implement methods in the DAProviderWriter interface independently +func NewWriterForDAS(dasWriter DASWriter) *writerForDAS { + return &writerForDAS{dasWriter: dasWriter} +} + +type writerForDAS struct { + dasWriter DASWriter +} + +func (d *writerForDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte, disableFallbackStoreDataOnChain bool) ([]byte, error) { + cert, err := d.dasWriter.Store(ctx, message, timeout, []byte{}) + if errors.Is(err, ErrBatchToDasFailed) { + if disableFallbackStoreDataOnChain { + return nil, errors.New("unable to batch to DAS and fallback storing data on chain is disabled") + } + log.Warn("Falling back to storing data on chain", "err", err) + return message, nil + } else if err != nil { + return nil, err + } else { + return Serialize(cert), nil + } +} diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 3105ee92b1..753ca19cd6 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -13,8 +13,6 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -22,9 +20,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" - "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/blobs" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/zeroheavy" ) @@ -40,15 +36,6 @@ type InboxBackend interface { ReadDelayedInbox(seqNum uint64) (*arbostypes.L1IncomingMessage, error) } -type BlobReader interface { - GetBlobs( - ctx context.Context, - batchBlockHash common.Hash, - versionedHashes []common.Hash, - ) ([]kzg4844.Blob, error) - Initialize(ctx context.Context) error -} - type sequencerMessage struct { minTimestamp uint64 maxTimestamp uint64 @@ -61,14 +48,8 @@ type sequencerMessage struct { const MaxDecompressedLen int = 1024 * 1024 * 16 // 16 MiB const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 -const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -var ( - ErrNoBlobReader = errors.New("blob batch payload was encountered but no BlobReader was configured") - ErrInvalidBlobDataFormat = errors.New("blob batch data is not a list of hashes as expected") -) - -func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dapReaders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -86,22 +67,32 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // If the parent chain sequencer inbox smart contract authenticated this batch, // an unknown header byte must mean that this node is out of date, // because the smart contract understands the header byte and this node doesn't. - if len(payload) > 0 && IsL1AuthenticatedMessageHeaderByte(payload[0]) && !IsKnownHeaderByte(payload[0]) { + if len(payload) > 0 && daprovider.IsL1AuthenticatedMessageHeaderByte(payload[0]) && !daprovider.IsKnownHeaderByte(payload[0]) { return nil, fmt.Errorf("%w: batch has unsupported authenticated header byte 0x%02x", arbosState.ErrFatalNodeOutOfDate, payload[0]) } // Stage 1: Extract the payload from any data availability header. // It's important that multiple DAS strategies can't both be invoked in the same batch, // as these headers are validated by the sequencer inbox and not other DASs. - // We try to extract payload from the first occuring valid DA provider in the daProviders list + // We try to extract payload from the first occuring valid DA reader in the dapReaders list if len(payload) > 0 { foundDA := false var err error - for _, provider := range daProviders { - if provider != nil && provider.IsValidHeaderByte(payload[0]) { - payload, err = provider.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode) + for _, dapReader := range dapReaders { + if dapReader != nil && dapReader.IsValidHeaderByte(payload[0]) { + payload, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode != daprovider.KeysetDontValidate) if err != nil { - return nil, err + // Matches the way keyset validation was done inside DAS readers i.e logging the error + // But other daproviders might just want to return the error + if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(payload[0]) { + logLevel := log.Error + if keysetValidationMode == daprovider.KeysetPanicIfInvalid { + logLevel = log.Crit + } + logLevel(err.Error()) + } else { + return nil, err + } } if payload == nil { return parsedMsg, nil @@ -112,10 +103,10 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } if !foundDA { - if IsDASMessageHeaderByte(payload[0]) { + if daprovider.IsDASMessageHeaderByte(payload[0]) { log.Error("No DAS Reader configured, but sequencer message found with DAS header") - } else if IsBlobHashesHeaderByte(payload[0]) { - return nil, ErrNoBlobReader + } else if daprovider.IsBlobHashesHeaderByte(payload[0]) { + return nil, daprovider.ErrNoBlobReader } } } @@ -124,7 +115,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // It's not safe to trust any part of the payload from this point onwards. // Stage 2: If enabled, decode the zero heavy payload (saves gas based on calldata charging). - if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { + if len(payload) > 0 && daprovider.IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) if err != nil { log.Warn("error reading from zeroheavy decoder", err.Error()) @@ -134,7 +125,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } // Stage 3: Decompress the brotli payload and fill the parsedMsg.segments list. - if len(payload) > 0 && IsBrotliMessageHeaderByte(payload[0]) { + if len(payload) > 0 && daprovider.IsBrotliMessageHeaderByte(payload[0]) { decompressed, err := arbcompress.Decompress(payload[1:], MaxDecompressedLen) if err == nil { reader := bytes.NewReader(decompressed) @@ -170,224 +161,24 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash return parsedMsg, nil } -func RecoverPayloadFromDasBatch( - ctx context.Context, - batchNum uint64, - sequencerMsg []byte, - dasReader DataAvailabilityReader, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, -) ([]byte, error) { - var keccakPreimages map[common.Hash][]byte - if preimages != nil { - if preimages[arbutil.Keccak256PreimageType] == nil { - preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) - } - keccakPreimages = preimages[arbutil.Keccak256PreimageType] - } - cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) - if err != nil { - log.Error("Failed to deserialize DAS message", "err", err) - return nil, nil - } - version := cert.Version - recordPreimage := func(key common.Hash, value []byte) { - keccakPreimages[key] = value - } - - if version >= 2 { - log.Error("Your node software is probably out of date", "certificateVersion", version) - return nil, nil - } - - getByHash := func(ctx context.Context, hash common.Hash) ([]byte, error) { - newHash := hash - if version == 0 { - newHash = dastree.FlatHashToTreeHash(hash) - } - - preimage, err := dasReader.GetByHash(ctx, newHash) - if err != nil && hash != newHash { - log.Debug("error fetching new style hash, trying old", "new", newHash, "old", hash, "err", err) - preimage, err = dasReader.GetByHash(ctx, hash) - } - if err != nil { - return nil, err - } - - switch { - case version == 0 && crypto.Keccak256Hash(preimage) != hash: - fallthrough - case version == 1 && dastree.Hash(preimage) != hash: - log.Error( - "preimage mismatch for hash", - "hash", hash, "err", ErrHashMismatch, "version", version, - ) - return nil, ErrHashMismatch - } - return preimage, nil - } - - keysetPreimage, err := getByHash(ctx, cert.KeysetHash) - if err != nil { - log.Error("Couldn't get keyset", "err", err) - return nil, err - } - if keccakPreimages != nil { - dastree.RecordHash(recordPreimage, keysetPreimage) - } - - keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage), keysetValidationMode == KeysetDontValidate) - if err != nil { - logLevel := log.Error - if keysetValidationMode == KeysetPanicIfInvalid { - logLevel = log.Crit - } - logLevel("Couldn't deserialize keyset", "err", err, "keysetHash", cert.KeysetHash, "batchNum", batchNum) - return nil, nil - } - err = keyset.VerifySignature(cert.SignersMask, cert.SerializeSignableFields(), cert.Sig) - if err != nil { - log.Error("Bad signature on DAS batch", "err", err) - return nil, nil - } - - maxTimestamp := binary.BigEndian.Uint64(sequencerMsg[8:16]) - if cert.Timeout < maxTimestamp+MinLifetimeSecondsForDataAvailabilityCert { - log.Error("Data availability cert expires too soon", "err", "") - return nil, nil - } - - dataHash := cert.DataHash - payload, err := getByHash(ctx, dataHash) - if err != nil { - log.Error("Couldn't fetch DAS batch contents", "err", err) - return nil, err - } - - if keccakPreimages != nil { - if version == 0 { - treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) - keccakPreimages[dataHash] = payload - keccakPreimages[crypto.Keccak256Hash(treeLeaf)] = treeLeaf - } else { - dastree.RecordHash(recordPreimage, payload) - } - } - - return payload, nil -} - -type DataAvailabilityProvider interface { - // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider - IsValidHeaderByte(headerByte byte) bool - - // RecoverPayloadFromBatch fetches the underlying payload from the DA provider given the batch header information - RecoverPayloadFromBatch( - ctx context.Context, - batchNum uint64, - batchBlockHash common.Hash, - sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, - ) ([]byte, error) -} - -// NewDAProviderDAS is generally meant to be only used by nitro. -// DA Providers should implement methods in the DataAvailabilityProvider interface independently -func NewDAProviderDAS(das DataAvailabilityReader) *dAProviderForDAS { - return &dAProviderForDAS{ - das: das, - } -} - -type dAProviderForDAS struct { - das DataAvailabilityReader -} - -func (d *dAProviderForDAS) IsValidHeaderByte(headerByte byte) bool { - return IsDASMessageHeaderByte(headerByte) -} - -func (d *dAProviderForDAS) RecoverPayloadFromBatch( - ctx context.Context, - batchNum uint64, - batchBlockHash common.Hash, - sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, -) ([]byte, error) { - return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.das, preimages, keysetValidationMode) -} - -// NewDAProviderBlobReader is generally meant to be only used by nitro. -// DA Providers should implement methods in the DataAvailabilityProvider interface independently -func NewDAProviderBlobReader(blobReader BlobReader) *dAProviderForBlobReader { - return &dAProviderForBlobReader{ - blobReader: blobReader, - } -} - -type dAProviderForBlobReader struct { - blobReader BlobReader -} - -func (b *dAProviderForBlobReader) IsValidHeaderByte(headerByte byte) bool { - return IsBlobHashesHeaderByte(headerByte) -} - -func (b *dAProviderForBlobReader) RecoverPayloadFromBatch( - ctx context.Context, - batchNum uint64, - batchBlockHash common.Hash, - sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, -) ([]byte, error) { - blobHashes := sequencerMsg[41:] - if len(blobHashes)%len(common.Hash{}) != 0 { - return nil, ErrInvalidBlobDataFormat - } - versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) - for i := 0; i*32 < len(blobHashes); i += 1 { - copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) - } - kzgBlobs, err := b.blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) - if err != nil { - return nil, fmt.Errorf("failed to get blobs: %w", err) - } - payload, err := blobs.DecodeBlobs(kzgBlobs) - if err != nil { - log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) - return nil, nil - } - return payload, nil -} - -type KeysetValidationMode uint8 - -const KeysetValidate KeysetValidationMode = 0 -const KeysetPanicIfInvalid KeysetValidationMode = 1 -const KeysetDontValidate KeysetValidationMode = 2 - type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 - daProviders []DataAvailabilityProvider + dapReaders []daprovider.Reader cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 cachedSegmentTimestamp uint64 cachedSegmentBlockNumber uint64 cachedSubMessageNumber uint64 - keysetValidationMode KeysetValidationMode + keysetValidationMode daprovider.KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dapReaders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, - daProviders: daProviders, + dapReaders: dapReaders, keysetValidationMode: keysetValidationMode, } } @@ -409,7 +200,7 @@ func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMeta } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.daProviders, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dapReaders, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index b34c02534b..5ede321810 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type multiplexerBackend struct { @@ -67,7 +68,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, daprovider.KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go index 84356d77e0..44b48192ab 100644 --- a/broadcastclient/broadcastclient_test.go +++ b/broadcastclient/broadcastclient_test.go @@ -105,7 +105,7 @@ func testReceiveMessages(t *testing.T, clientCompression bool, serverCompression go func() { for i := 0; i < messageCount; i++ { - Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i))) + Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i), nil)) } }() @@ -156,7 +156,7 @@ func TestInvalidSignature(t *testing.T) { go func() { for i := 0; i < messageCount; i++ { - Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i))) + Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i), nil)) } }() @@ -316,7 +316,7 @@ func TestServerClientDisconnect(t *testing.T) { broadcastClient.Start(ctx) t.Log("broadcasting seq 0 message") - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0, nil)) // Wait for client to receive batch to ensure it is connected timer := time.NewTimer(5 * time.Second) @@ -387,7 +387,7 @@ func TestBroadcastClientConfirmedMessage(t *testing.T) { broadcastClient.Start(ctx) t.Log("broadcasting seq 0 message") - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0, nil)) // Wait for client to receive batch to ensure it is connected timer := time.NewTimer(5 * time.Second) @@ -724,8 +724,8 @@ func TestBroadcasterSendsCachedMessagesOnClientConnect(t *testing.T) { Require(t, b.Start(ctx)) defer b.StopAndWait() - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0)) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 1)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0, nil)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 1, nil)) var wg sync.WaitGroup for i := 0; i < 2; i++ { diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index 242b8f9eeb..ac5c6c39da 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -11,6 +11,7 @@ import ( "github.com/gobwas/ws" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -21,6 +22,11 @@ import ( "github.com/offchainlabs/nitro/wsbroadcastserver" ) +type MessageWithMetadataAndBlockHash struct { + Message arbostypes.MessageWithMetadata + BlockHash *common.Hash +} + type Broadcaster struct { server *wsbroadcastserver.WSBroadcastServer backlog backlog.Backlog @@ -38,7 +44,11 @@ func NewBroadcaster(config wsbroadcastserver.BroadcasterConfigFetcher, chainId u } } -func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMetadata, sequenceNumber arbutil.MessageIndex) (*m.BroadcastFeedMessage, error) { +func (b *Broadcaster) NewBroadcastFeedMessage( + message arbostypes.MessageWithMetadata, + sequenceNumber arbutil.MessageIndex, + blockHash *common.Hash, +) (*m.BroadcastFeedMessage, error) { var messageSignature []byte if b.dataSigner != nil { hash, err := message.Hash(sequenceNumber, b.chainId) @@ -54,18 +64,23 @@ func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMeta return &m.BroadcastFeedMessage{ SequenceNumber: sequenceNumber, Message: message, + BlockHash: blockHash, Signature: messageSignature, }, nil } -func (b *Broadcaster) BroadcastSingle(msg arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { +func (b *Broadcaster) BroadcastSingle( + msg arbostypes.MessageWithMetadata, + seq arbutil.MessageIndex, + blockHash *common.Hash, +) (err error) { defer func() { if r := recover(); r != nil { log.Error("recovered error in BroadcastSingle", "recover", r, "backtrace", string(debug.Stack())) err = errors.New("panic in BroadcastSingle") } }() - bfm, err := b.NewBroadcastFeedMessage(msg, seq) + bfm, err := b.NewBroadcastFeedMessage(msg, seq, blockHash) if err != nil { return err } @@ -82,7 +97,10 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { b.BroadcastFeedMessages(broadcastFeedMessages) } -func (b *Broadcaster) BroadcastMessages(messages []arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { +func (b *Broadcaster) BroadcastMessages( + messagesWithBlockHash []MessageWithMetadataAndBlockHash, + seq arbutil.MessageIndex, +) (err error) { defer func() { if r := recover(); r != nil { log.Error("recovered error in BroadcastMessages", "recover", r, "backtrace", string(debug.Stack())) @@ -90,8 +108,8 @@ func (b *Broadcaster) BroadcastMessages(messages []arbostypes.MessageWithMetadat } }() var feedMessages []*m.BroadcastFeedMessage - for i, msg := range messages { - bfm, err := b.NewBroadcastFeedMessage(msg, seq+arbutil.MessageIndex(i)) + for i, msg := range messagesWithBlockHash { + bfm, err := b.NewBroadcastFeedMessage(msg.Message, seq+arbutil.MessageIndex(i), msg.BlockHash) if err != nil { return err } diff --git a/broadcaster/broadcaster_test.go b/broadcaster/broadcaster_test.go index 8ac06e9705..dc208f4163 100644 --- a/broadcaster/broadcaster_test.go +++ b/broadcaster/broadcaster_test.go @@ -70,17 +70,17 @@ func TestBroadcasterMessagesRemovedOnConfirmation(t *testing.T) { } // Normal broadcasting and confirming - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 1)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 1, nil)) waitUntilUpdated(t, expectMessageCount(1, "after 1 message")) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 2)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 2, nil)) waitUntilUpdated(t, expectMessageCount(2, "after 2 messages")) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 3)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 3, nil)) waitUntilUpdated(t, expectMessageCount(3, "after 3 messages")) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 4)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 4, nil)) waitUntilUpdated(t, expectMessageCount(4, "after 4 messages")) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 5)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 5, nil)) waitUntilUpdated(t, expectMessageCount(5, "after 4 messages")) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 6)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 6, nil)) waitUntilUpdated(t, expectMessageCount(6, "after 4 messages")) b.Confirm(4) @@ -96,7 +96,7 @@ func TestBroadcasterMessagesRemovedOnConfirmation(t *testing.T) { "nothing changed because confirmed sequence number before cache")) b.Confirm(5) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 7)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 7, nil)) waitUntilUpdated(t, expectMessageCount(2, "after 7 messages, 5 cleared by confirm")) diff --git a/broadcaster/message/message.go b/broadcaster/message/message.go index a575ae5cd0..aca9598754 100644 --- a/broadcaster/message/message.go +++ b/broadcaster/message/message.go @@ -34,6 +34,7 @@ type BroadcastMessage struct { type BroadcastFeedMessage struct { SequenceNumber arbutil.MessageIndex `json:"sequenceNumber"` Message arbostypes.MessageWithMetadata `json:"message"` + BlockHash *common.Hash `json:"blockHash,omitempty"` Signature []byte `json:"signature"` CumulativeSumMsgSize uint64 `json:"-"` diff --git a/broadcaster/message/message_serialization_test.go b/broadcaster/message/message_serialization_test.go index c3e14a86ae..1d8c10e388 100644 --- a/broadcaster/message/message_serialization_test.go +++ b/broadcaster/message/message_serialization_test.go @@ -13,7 +13,40 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" ) -func ExampleBroadcastMessage_broadcastfeedmessage() { +func ExampleBroadcastMessage_broadcastfeedmessageWithBlockHash() { + var requestId common.Hash + msg := BroadcastMessage{ + Version: 1, + Messages: []*BroadcastFeedMessage{ + { + SequenceNumber: 12345, + Message: arbostypes.MessageWithMetadata{ + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: 0, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &requestId, + L1BaseFee: big.NewInt(0), + }, + L2msg: []byte{0xde, 0xad, 0xbe, 0xef}, + }, + DelayedMessagesRead: 3333, + }, + BlockHash: &common.Hash{0: 0xff}, + Signature: nil, + }, + }, + } + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + _ = encoder.Encode(msg) + fmt.Println(buf.String()) + // Output: {"version":1,"messages":[{"sequenceNumber":12345,"message":{"message":{"header":{"kind":0,"sender":"0x0000000000000000000000000000000000000000","blockNumber":0,"timestamp":0,"requestId":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeeL1":0},"l2Msg":"3q2+7w=="},"delayedMessagesRead":3333},"blockHash":"0xff00000000000000000000000000000000000000000000000000000000000000","signature":null}]} +} + +func ExampleBroadcastMessage_broadcastfeedmessageWithoutBlockHash() { var requestId common.Hash msg := BroadcastMessage{ Version: 1, diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 3e96412648..8036487d26 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -14,8 +14,6 @@ import ( "syscall" "time" - "golang.org/x/exp/slog" - koanfjson "github.com/knadh/koanf/parsers/json" flag "github.com/spf13/pflag" @@ -46,7 +44,7 @@ type DAServerConfig struct { DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` Conf genericconf.ConfConfig `koanf:"conf"` - LogLevel int `koanf:"log-level"` + LogLevel string `koanf:"log-level"` LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` @@ -66,7 +64,7 @@ var DefaultDAServerConfig = DAServerConfig{ RESTServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, DataAvailability: das.DefaultDataAvailabilityConfig, Conf: genericconf.ConfConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, @@ -103,7 +101,7 @@ func parseDAServer(args []string) (*DAServerConfig, error) { f.Bool("pprof", DefaultDAServerConfig.PProf, "enable pprof") genericconf.PProfAddOptions("pprof-cfg", f) - f.Int("log-level", int(log.LvlInfo), "log level; 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE") + f.String("log-level", DefaultDAServerConfig.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", DefaultDAServerConfig.LogType, "log type (plaintext or json)") das.DataAvailabilityConfigAddDaserverOptions("data-availability", f) @@ -185,13 +183,18 @@ func startup() error { confighelpers.PrintErrorAndExit(errors.New("please specify at least one of --enable-rest or --enable-rpc"), printSampleUsage) } + logLevel, err := genericconf.ToSlogLevel(serverConfig.LogLevel) + if err != nil { + confighelpers.PrintErrorAndExit(err, printSampleUsage) + } + handler, err := genericconf.HandlerFromLogType(serverConfig.LogType, io.Writer(os.Stderr)) if err != nil { flag.Usage() return fmt.Errorf("error parsing log type when creating handler: %w", err) } glogger := log.NewGlogHandler(handler) - glogger.Verbosity(slog.Level(serverConfig.LogLevel)) + glogger.Verbosity(logLevel) log.SetDefault(log.NewLogger(glogger)) if err := startMetrics(serverConfig); err != nil { diff --git a/cmd/dataavailability/data_availability_check.go b/cmd/dataavailability/data_availability_check.go index 72a311a7be..d80c0475bf 100644 --- a/cmd/dataavailability/data_availability_check.go +++ b/cmd/dataavailability/data_availability_check.go @@ -21,7 +21,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -65,7 +65,7 @@ type DataAvailabilityCheck struct { config *DataAvailabilityCheckConfig inboxAddr *common.Address inboxContract *bridgegen.SequencerInbox - urlToReaderMap map[string]arbstate.DataAvailabilityReader + urlToReaderMap map[string]daprovider.DASReader checkInterval time.Duration } @@ -86,7 +86,7 @@ func newDataAvailabilityCheck(ctx context.Context, dataAvailabilityCheckConfig * if err != nil { return nil, err } - urlToReaderMap := make(map[string]arbstate.DataAvailabilityReader, len(onlineUrls)) + urlToReaderMap := make(map[string]daprovider.DASReader, len(onlineUrls)) for _, url := range onlineUrls { reader, err := das.NewRestfulDasClientFromURL(url) if err != nil { @@ -238,7 +238,7 @@ func (d *DataAvailabilityCheck) checkDataAvailability(ctx context.Context, deliv if data == nil { return false, nil } - cert, err := arbstate.DeserializeDASCertFrom(bytes.NewReader(data)) + cert, err := daprovider.DeserializeDASCertFrom(bytes.NewReader(data)) if err != nil { return true, err } diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index d78d975fd5..3f64a990ca 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -22,7 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util" @@ -165,7 +165,7 @@ func startClientStore(args []string) error { } ctx := context.Background() - var cert *arbstate.DataAvailabilityCertificate + var cert *daprovider.DataAvailabilityCertificate if config.RandomMessageSize > 0 { message := make([]byte, config.RandomMessageSize) @@ -184,7 +184,7 @@ func startClientStore(args []string) error { return err } - serializedCert := das.Serialize(cert) + serializedCert := daprovider.Serialize(cert) fmt.Printf("Hex Encoded Cert: %s\n", hexutil.Encode(serializedCert)) fmt.Printf("Hex Encoded Data Hash: %s\n", hexutil.Encode(cert.DataHash[:])) diff --git a/cmd/genericconf/logging.go b/cmd/genericconf/logging.go index d77071a0bf..fa45953278 100644 --- a/cmd/genericconf/logging.go +++ b/cmd/genericconf/logging.go @@ -9,7 +9,6 @@ import ( "sync" "github.com/ethereum/go-ethereum/log" - "golang.org/x/exp/slog" "gopkg.in/natefinch/lumberjack.v2" ) @@ -90,7 +89,7 @@ func (l *fileLoggerFactory) close() error { } // initLog is not threadsafe -func InitLog(logType string, logLevel slog.Level, fileLoggingConfig *FileLoggingConfig, pathResolver func(string) string) error { +func InitLog(logType string, logLevel string, fileLoggingConfig *FileLoggingConfig, pathResolver func(string) string) error { var glogger *log.GlogHandler // always close previous instance of file logger if err := globalFileLoggerFactory.close(); err != nil { @@ -111,8 +110,14 @@ func InitLog(logType string, logLevel slog.Level, fileLoggingConfig *FileLogging flag.Usage() return fmt.Errorf("error parsing log type when creating handler: %w", err) } + slogLevel, err := ToSlogLevel(logLevel) + if err != nil { + flag.Usage() + return fmt.Errorf("error parsing log level: %w", err) + } + glogger = log.NewGlogHandler(handler) - glogger.Verbosity(logLevel) + glogger.Verbosity(slogLevel) log.SetDefault(log.NewLogger(glogger)) return nil } diff --git a/cmd/genericconf/loglevel.go b/cmd/genericconf/loglevel.go new file mode 100644 index 0000000000..f7ad05a2cc --- /dev/null +++ b/cmd/genericconf/loglevel.go @@ -0,0 +1,38 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package genericconf + +import ( + "errors" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/log" + "golang.org/x/exp/slog" +) + +func ToSlogLevel(str string) (slog.Level, error) { + switch strings.ToLower(str) { + case "trace": + return log.LevelTrace, nil + case "debug": + return log.LevelDebug, nil + case "info": + return log.LevelInfo, nil + case "warn": + return log.LevelWarn, nil + case "error": + return log.LevelError, nil + case "crit": + return log.LevelCrit, nil + default: + legacyLevel, err := strconv.Atoi(str) + if err != nil { + // Leave legacy geth numeric log levels undocumented, but if anyone happens + // to be using them, it will work. + return log.LevelTrace, errors.New("invalid log-level") + } + return log.FromLegacyLevel(legacyLevel), nil + } +} diff --git a/cmd/nitro-val/config.go b/cmd/nitro-val/config.go index 51d3978836..b52a1c6b5e 100644 --- a/cmd/nitro-val/config.go +++ b/cmd/nitro-val/config.go @@ -2,10 +2,10 @@ package main import ( "fmt" + "reflect" "time" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/nat" @@ -20,7 +20,7 @@ import ( type ValidationNodeConfig struct { Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` Validation valnode.Config `koanf:"validation" reload:"hot"` - LogLevel int `koanf:"log-level" reload:"hot"` + LogLevel string `koanf:"log-level" reload:"hot"` LogType string `koanf:"log-type" reload:"hot"` FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` Persistent conf.PersistentConfig `koanf:"persistent"` @@ -61,7 +61,7 @@ var IPCConfigDefault = genericconf.IPCConfig{ var ValidationNodeConfigDefault = ValidationNodeConfig{ Conf: genericconf.ConfConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", Persistent: conf.PersistentConfigDefault, HTTP: HTTPConfigDefault, @@ -79,7 +79,7 @@ var ValidationNodeConfigDefault = ValidationNodeConfig{ func ValidationNodeConfigAddOptions(f *flag.FlagSet) { genericconf.ConfConfigAddOptions("conf", f) valnode.ValidationConfigAddOptions("validation", f) - f.Int("log-level", ValidationNodeConfigDefault.LogLevel, "log level") + f.String("log-level", ValidationNodeConfigDefault.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", ValidationNodeConfigDefault.LogType, "log type (plaintext or json)") genericconf.FileLoggingConfigAddOptions("file-logging", f) conf.PersistentConfigAddOptions("persistent", f) diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index 4e543f7953..1e894336ea 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -22,7 +22,6 @@ import ( "github.com/offchainlabs/nitro/cmd/util/confighelpers" _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/validator/valnode" - "golang.org/x/exp/slog" ) func printSampleUsage(name string) { @@ -90,7 +89,7 @@ func mainImpl() int { } } - err = genericconf.InitLog(nodeConfig.LogType, slog.Level(nodeConfig.LogLevel), &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) + err = genericconf.InitLog(nodeConfig.LogType, nodeConfig.LogLevel, &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing logging: %v\n", err) return 1 @@ -109,7 +108,7 @@ func mainImpl() int { liveNodeConfig := genericconf.NewLiveConfig[*ValidationNodeConfig](args, nodeConfig, ParseNode) liveNodeConfig.SetOnReloadHook(func(oldCfg *ValidationNodeConfig, newCfg *ValidationNodeConfig) error { - return genericconf.InitLog(newCfg.LogType, slog.Level(newCfg.LogLevel), &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) + return genericconf.InitLog(newCfg.LogType, newCfg.LogLevel, &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) }) valnode.EnsureValidationExposedViaAuthRPC(&stackConf) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index df0feca8ee..9280c3af02 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -42,7 +42,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/resourcemanager" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" blocksreexecutor "github.com/offchainlabs/nitro/blocks_reexecutor" "github.com/offchainlabs/nitro/cmd/chaininfo" @@ -63,7 +63,6 @@ import ( "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" - "golang.org/x/exp/slog" ) func printSampleUsage(name string) { @@ -208,7 +207,7 @@ func mainImpl() int { } stackConf.JWTSecret = filename } - err = genericconf.InitLog(nodeConfig.LogType, slog.Level(nodeConfig.LogLevel), &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) + err = genericconf.InitLog(nodeConfig.LogType, nodeConfig.LogLevel, &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing logging: %v\n", err) return 1 @@ -325,7 +324,7 @@ func mainImpl() int { var rollupAddrs chaininfo.RollupAddresses var l1Client *ethclient.Client var l1Reader *headerreader.HeaderReader - var blobReader arbstate.BlobReader + var blobReader daprovider.BlobReader if nodeConfig.Node.ParentChainReader.Enable { confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().ParentChain.Connection } rpcClient := rpcclient.NewRpcClient(confFetcher, nil) @@ -600,7 +599,7 @@ func mainImpl() int { } liveNodeConfig.SetOnReloadHook(func(oldCfg *NodeConfig, newCfg *NodeConfig) error { - if err := genericconf.InitLog(newCfg.LogType, slog.Level(newCfg.LogLevel), &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)); err != nil { + if err := genericconf.InitLog(newCfg.LogType, newCfg.LogLevel, &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)); err != nil { return fmt.Errorf("failed to re-init logging: %w", err) } return currentNode.OnConfigReload(&oldCfg.Node, &newCfg.Node) @@ -691,7 +690,7 @@ type NodeConfig struct { Validation valnode.Config `koanf:"validation" reload:"hot"` ParentChain conf.ParentChainConfig `koanf:"parent-chain" reload:"hot"` Chain conf.L2Config `koanf:"chain"` - LogLevel int `koanf:"log-level" reload:"hot"` + LogLevel string `koanf:"log-level" reload:"hot"` LogType string `koanf:"log-type" reload:"hot"` FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` Persistent conf.PersistentConfig `koanf:"persistent"` @@ -717,7 +716,7 @@ var NodeConfigDefault = NodeConfig{ Validation: valnode.DefaultValidationConfig, ParentChain: conf.L1ConfigDefault, Chain: conf.L2ConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", FileLogging: genericconf.DefaultFileLoggingConfig, Persistent: conf.PersistentConfigDefault, @@ -743,7 +742,7 @@ func NodeConfigAddOptions(f *flag.FlagSet) { valnode.ValidationConfigAddOptions("validation", f) conf.L1ConfigAddOptions("parent-chain", f) conf.L2ConfigAddOptions("chain", f) - f.Int("log-level", NodeConfigDefault.LogLevel, "log level") + f.String("log-level", NodeConfigDefault.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", NodeConfigDefault.LogType, "log type (plaintext or json)") genericconf.FileLoggingConfigAddOptions("file-logging", f) conf.PersistentConfigAddOptions("persistent", f) diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index c483526aa1..3ef888e897 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -189,7 +189,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, fmt.Errorf("failed to get finalized block: %w", err) } l1BlockNum := l1Block.NumberU64() - tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil, nil) + tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil) if err != nil { return nil, err } diff --git a/cmd/relay/relay.go b/cmd/relay/relay.go index 5a7499e691..6f786f976a 100644 --- a/cmd/relay/relay.go +++ b/cmd/relay/relay.go @@ -20,7 +20,6 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/relay" - "golang.org/x/exp/slog" ) func main() { @@ -69,8 +68,12 @@ func startup() error { flag.Usage() return fmt.Errorf("error parsing log type when creating handler: %w", err) } + logLevel, err := genericconf.ToSlogLevel(relayConfig.LogLevel) + if err != nil { + confighelpers.PrintErrorAndExit(err, printSampleUsage) + } glogger := log.NewGlogHandler(handler) - glogger.Verbosity(slog.Level(relayConfig.LogLevel)) + glogger.Verbosity(logLevel) log.SetDefault(log.NewLogger(glogger)) vcsRevision, _, vcsTime := confighelpers.GetVersion() diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 3348d0b431..71ea6760ed 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -28,6 +28,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/das/dastree" @@ -116,8 +117,8 @@ func (dasReader *PreimageDASReader) HealthCheck(ctx context.Context) error { return nil } -func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.DiscardImmediately, nil +func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.DiscardImmediately, nil } type BlobPreimageReader struct { @@ -203,21 +204,21 @@ func main() { if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - var dasReader arbstate.DataAvailabilityReader + var dasReader daprovider.DASReader if dasEnabled { dasReader = &PreimageDASReader{} } backend := WavmInbox{} - var keysetValidationMode = arbstate.KeysetPanicIfInvalid + var keysetValidationMode = daprovider.KeysetPanicIfInvalid if backend.GetPositionWithinMessage() > 0 { - keysetValidationMode = arbstate.KeysetDontValidate + keysetValidationMode = daprovider.KeysetDontValidate } - var daProviders []arbstate.DataAvailabilityProvider + var dapReaders []daprovider.Reader if dasReader != nil { - daProviders = append(daProviders, arbstate.NewDAProviderDAS(dasReader)) + dapReaders = append(dapReaders, daprovider.NewReaderForDAS(dasReader)) } - daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(&BlobPreimageReader{})) - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, keysetValidationMode) + dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { diff --git a/contracts b/contracts index 77ee9de042..7a41cd59cd 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 77ee9de042de225fab560096f7624f3d13bd12cb +Subproject commit 7a41cd59cdf2eb01cf31c2351b8d1ff6fbf52178 diff --git a/das/aggregator.go b/das/aggregator.go index 4b4571eb43..d3edd58437 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -17,7 +17,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" @@ -37,8 +37,6 @@ var DefaultAggregatorConfig = AggregatorConfig{ Backends: "", } -var BatchToDasFailed = errors.New("unable to batch to DAS") - func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultAggregatorConfig.Enable, "enable storage/retrieval of sequencer batch data from a list of RPC endpoints; this should only be used by the batch poster and not in combination with other DAS storage types") f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") @@ -164,7 +162,7 @@ type storeResponse struct { // constructed, calls to Store(...) will try to verify the passed-in data's signature // is from the batch poster. If the contract details are not provided, then the // signature is not checked, which is useful for testing. -func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { log.Trace("das.Aggregator.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig)) if a.addrVerifier != nil { actualSigner, err := DasRecoverSigner(message, timeout, sig) @@ -243,7 +241,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, }(ctx, d) } - var aggCert arbstate.DataAvailabilityCertificate + var aggCert daprovider.DataAvailabilityCertificate type certDetails struct { pubKeys []blsSignatures.PublicKey @@ -296,7 +294,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, } } else if storeFailures > a.maxAllowedServiceStoreFailures { cd := certDetails{} - cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, BatchToDasFailed) + cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, daprovider.ErrBatchToDasFailed) certDetailsChan <- cd returned = true } @@ -323,10 +321,10 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, verified, err := blsSignatures.VerifySignature(aggCert.Sig, aggCert.SerializeSignableFields(), aggPubKey) if err != nil { //nolint:errorlint - return nil, fmt.Errorf("%s. %w", err.Error(), BatchToDasFailed) + return nil, fmt.Errorf("%s. %w", err.Error(), daprovider.ErrBatchToDasFailed) } if !verified { - return nil, fmt.Errorf("failed aggregate signature check. %w", BatchToDasFailed) + return nil, fmt.Errorf("failed aggregate signature check. %w", daprovider.ErrBatchToDasFailed) } return &aggCert, nil } diff --git a/das/aggregator_test.go b/das/aggregator_test.go index ef8ef5327a..728db6cf50 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -16,10 +16,10 @@ import ( "testing" "time" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" ) func TestDAS_BasicAggregationLocal(t *testing.T) { @@ -123,7 +123,7 @@ type WrapStore struct { DataAvailabilityServiceWriter } -func (w *WrapStore) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (w *WrapStore) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { switch w.injector.shouldFail() { case success: return w.DataAvailabilityServiceWriter.Store(ctx, message, timeout, sig) diff --git a/das/cache_storage_service.go b/das/cache_storage_service.go index 13bdb189d3..439ccda086 100644 --- a/das/cache_storage_service.go +++ b/das/cache_storage_service.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" @@ -82,7 +82,7 @@ func (c *CacheStorageService) Close(ctx context.Context) error { return c.baseStorageService.Close(ctx) } -func (c *CacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (c *CacheStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { return c.baseStorageService.ExpirationPolicy(ctx) } diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go index bc8ab5bc19..99311decaa 100644 --- a/das/chain_fetch_das.go +++ b/das/chain_fetch_das.go @@ -8,7 +8,7 @@ import ( "errors" "sync" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/pretty" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -38,13 +38,13 @@ func (c *syncedKeysetCache) put(key [32]byte, value []byte) { } type ChainFetchReader struct { - arbstate.DataAvailabilityReader + daprovider.DASReader seqInboxCaller *bridgegen.SequencerInboxCaller seqInboxFilterer *bridgegen.SequencerInboxFilterer keysetCache syncedKeysetCache } -func NewChainFetchReader(inner arbstate.DataAvailabilityReader, l1client arbutil.L1Interface, seqInboxAddr common.Address) (*ChainFetchReader, error) { +func NewChainFetchReader(inner daprovider.DASReader, l1client arbutil.L1Interface, seqInboxAddr common.Address) (*ChainFetchReader, error) { seqInbox, err := bridgegen.NewSequencerInbox(seqInboxAddr, l1client) if err != nil { return nil, err @@ -53,18 +53,18 @@ func NewChainFetchReader(inner arbstate.DataAvailabilityReader, l1client arbutil return NewChainFetchReaderWithSeqInbox(inner, seqInbox) } -func NewChainFetchReaderWithSeqInbox(inner arbstate.DataAvailabilityReader, seqInbox *bridgegen.SequencerInbox) (*ChainFetchReader, error) { +func NewChainFetchReaderWithSeqInbox(inner daprovider.DASReader, seqInbox *bridgegen.SequencerInbox) (*ChainFetchReader, error) { return &ChainFetchReader{ - DataAvailabilityReader: inner, - seqInboxCaller: &seqInbox.SequencerInboxCaller, - seqInboxFilterer: &seqInbox.SequencerInboxFilterer, - keysetCache: syncedKeysetCache{cache: make(map[[32]byte][]byte)}, + DASReader: inner, + seqInboxCaller: &seqInbox.SequencerInboxCaller, + seqInboxFilterer: &seqInbox.SequencerInboxFilterer, + keysetCache: syncedKeysetCache{cache: make(map[[32]byte][]byte)}, }, nil } func (c *ChainFetchReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { log.Trace("das.ChainFetchReader.GetByHash", "hash", pretty.PrettyHash(hash)) - return chainFetchGetByHash(ctx, c.DataAvailabilityReader, &c.keysetCache, c.seqInboxCaller, c.seqInboxFilterer, hash) + return chainFetchGetByHash(ctx, c.DASReader, &c.keysetCache, c.seqInboxCaller, c.seqInboxFilterer, hash) } func (c *ChainFetchReader) String() string { return "ChainFetchReader" @@ -72,7 +72,7 @@ func (c *ChainFetchReader) String() string { func chainFetchGetByHash( ctx context.Context, - daReader arbstate.DataAvailabilityReader, + daReader daprovider.DASReader, cache *syncedKeysetCache, seqInboxCaller *bridgegen.SequencerInboxCaller, seqInboxFilterer *bridgegen.SequencerInboxFilterer, diff --git a/das/das.go b/das/das.go index dd8e43a34d..b0708e3b33 100644 --- a/das/das.go +++ b/das/das.go @@ -5,7 +5,6 @@ package das import ( "context" - "encoding/binary" "errors" "fmt" "math" @@ -16,18 +15,17 @@ import ( "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" - "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type DataAvailabilityServiceWriter interface { // Store requests that the message be stored until timeout (UTC time in unix epoch seconds). - Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) + Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) fmt.Stringer } type DataAvailabilityServiceReader interface { - arbstate.DataAvailabilityReader + daprovider.DASReader fmt.Stringer } @@ -138,25 +136,6 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "parent chain address of SequencerInbox contract") } -func Serialize(c *arbstate.DataAvailabilityCertificate) []byte { - - flags := arbstate.DASMessageHeaderFlag - if c.Version != 0 { - flags |= arbstate.TreeDASMessageHeaderFlag - } - - buf := make([]byte, 0) - buf = append(buf, flags) - buf = append(buf, c.KeysetHash[:]...) - buf = append(buf, c.SerializeSignableFields()...) - - var intData [8]byte - binary.BigEndian.PutUint64(intData[:], c.SignersMask) - buf = append(buf, intData[:]...) - - return append(buf, blsSignatures.SignatureToBytes(c.Sig)...) -} - func GetL1Client(ctx context.Context, maxConnectionAttempts int, l1URL string) (*ethclient.Client, error) { if maxConnectionAttempts <= 0 { maxConnectionAttempts = math.MaxInt diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 54d8eba94c..5fca1e449f 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -13,7 +13,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/util/pretty" ) @@ -34,7 +34,7 @@ func NewDASRPCClient(target string) (*DASRPCClient, error) { }, nil } -func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64, reqSig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64, reqSig []byte) (*daprovider.DataAvailabilityCertificate, error) { log.Trace("das.DASRPCClient.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(reqSig), "this", *c) var ret StoreResult if err := c.clnt.CallContext(ctx, &ret, "das_store", hexutil.Bytes(message), hexutil.Uint64(timeout), hexutil.Bytes(reqSig)); err != nil { @@ -44,7 +44,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 if err != nil { return nil, err } - return &arbstate.DataAvailabilityCertificate{ + return &daprovider.DataAvailabilityCertificate{ DataHash: common.BytesToHash(ret.DataHash), Timeout: uint64(ret.Timeout), SignersMask: uint64(ret.SignersMask), @@ -62,11 +62,11 @@ func (c *DASRPCClient) HealthCheck(ctx context.Context) error { return c.clnt.CallContext(ctx, nil, "das_healthCheck") } -func (c *DASRPCClient) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (c *DASRPCClient) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { var res string err := c.clnt.CallContext(ctx, &res, "das_expirationPolicy") if err != nil { return -1, err } - return arbstate.StringToExpirationPolicy(res) + return daprovider.StringToExpirationPolicy(res) } diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index bc325a3200..d873f0568d 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -26,7 +27,7 @@ type node struct { // RecordHash chunks the preimage into 64kB bins and generates a recursive hash tree, // calling the caller-supplied record function for each hash/preimage pair created in // building the tree structure. -func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { +func RecordHash(record func(bytes32, []byte, arbutil.PreimageType), preimage ...[]byte) bytes32 { // Algorithm // 1. split the preimage into 64kB bins and double hash them to produce the tree's leaves // 2. repeatedly hash pairs and their combined length, bubbling up any odd-one's out, to form the root @@ -48,7 +49,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { keccord := func(value []byte) bytes32 { hash := crypto.Keccak256Hash(value) - record(hash, value) + record(hash, value, arbutil.Keccak256PreimageType) return hash } prepend := func(before byte, slice []byte) []byte { @@ -94,7 +95,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { func Hash(preimage ...[]byte) bytes32 { // Merkelizes without recording anything. All but the validator's DAS will call this - return RecordHash(func(bytes32, []byte) {}, preimage...) + return RecordHash(func(bytes32, []byte, arbutil.PreimageType) {}, preimage...) } func HashBytes(preimage ...[]byte) []byte { diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go index 33f729f4f3..4d24c9ae98 100644 --- a/das/dastree/dastree_test.go +++ b/das/dastree/dastree_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/testhelpers" @@ -25,7 +26,7 @@ func TestDASTree(t *testing.T) { tests = append(tests, large) } - record := func(key bytes32, value []byte) { + record := func(key bytes32, value []byte, ty arbutil.PreimageType) { colors.PrintGrey("storing ", key, " ", pretty.PrettyBytes(value)) store[key] = value if crypto.Keccak256Hash(value) != key { diff --git a/das/db_storage_service.go b/das/db_storage_service.go index 33d21942b2..5596ff378e 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -12,7 +12,7 @@ import ( badger "github.com/dgraph-io/badger/v4" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -173,11 +173,11 @@ func (dbs *DBStorageService) Close(ctx context.Context) error { return dbs.stopWaiter.StopAndWait() } -func (dbs *DBStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (dbs *DBStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { if dbs.discardAfterTimeout { - return arbstate.DiscardAfterDataTimeout, nil + return daprovider.DiscardAfterDataTimeout, nil } - return arbstate.KeepForever, nil + return daprovider.KeepForever, nil } func (dbs *DBStorageService) String() string { diff --git a/das/extra_signature_checker_test.go b/das/extra_signature_checker_test.go index 88a0969229..2fcfac167d 100644 --- a/das/extra_signature_checker_test.go +++ b/das/extra_signature_checker_test.go @@ -14,7 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/signature" ) @@ -22,7 +22,7 @@ type StubSignatureCheckDAS struct { keyDir string } -func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { pubkeyEncoded, err := ioutil.ReadFile(s.keyDir + "/ecdsa.pub") if err != nil { return nil, err @@ -39,8 +39,8 @@ func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeo return nil, nil } -func (s *StubSignatureCheckDAS) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (s *StubSignatureCheckDAS) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (s *StubSignatureCheckDAS) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { diff --git a/das/fallback_storage_service.go b/das/fallback_storage_service.go index a78b4104e8..49f961da60 100644 --- a/das/fallback_storage_service.go +++ b/das/fallback_storage_service.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/pretty" @@ -18,7 +18,7 @@ import ( type FallbackStorageService struct { StorageService - backup arbstate.DataAvailabilityReader + backup daprovider.DASReader backupHealthChecker DataAvailabilityServiceHealthChecker backupRetentionSeconds uint64 ignoreRetentionWriteErrors bool @@ -32,7 +32,7 @@ type FallbackStorageService struct { // a successful GetByHash result from the backup is Put into the primary. func NewFallbackStorageService( primary StorageService, - backup arbstate.DataAvailabilityReader, + backup daprovider.DASReader, backupHealthChecker DataAvailabilityServiceHealthChecker, backupRetentionSeconds uint64, // how long to retain data that we copy in from the backup (MaxUint64 means forever) ignoreRetentionWriteErrors bool, // if true, don't return error if write of retention data to primary fails diff --git a/das/ipfs_storage_service.bkup_go b/das/ipfs_storage_service.bkup_go index 11d435affb..43b06fd4b6 100644 --- a/das/ipfs_storage_service.bkup_go +++ b/das/ipfs_storage_service.bkup_go @@ -25,7 +25,8 @@ import ( "github.com/ipfs/interface-go-ipfs-core/options" "github.com/ipfs/interface-go-ipfs-core/path" "github.com/multiformats/go-multihash" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/ipfshelper" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" @@ -183,7 +184,7 @@ func (s *IpfsStorageService) Put(ctx context.Context, data []byte, timeout uint6 var chunks [][]byte - record := func(_ common.Hash, value []byte) { + record := func(_ common.Hash, value []byte, ty arbutil.PreimageType) { chunks = append(chunks, value) } @@ -222,8 +223,8 @@ func (s *IpfsStorageService) Put(ctx context.Context, data []byte, timeout uint6 panic("unreachable") } -func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (s *IpfsStorageService) Sync(ctx context.Context) error { diff --git a/das/ipfs_storage_service_stub.go b/das/ipfs_storage_service_stub.go index db434d5bf3..5814f2c7e4 100644 --- a/das/ipfs_storage_service_stub.go +++ b/das/ipfs_storage_service_stub.go @@ -14,7 +14,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" flag "github.com/spf13/pflag" ) @@ -47,8 +47,8 @@ func (s *IpfsStorageService) Put(ctx context.Context, data []byte, timeout uint6 return ErrIpfsNotSupported } -func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, ErrIpfsNotSupported +func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, ErrIpfsNotSupported } func (s *IpfsStorageService) Sync(ctx context.Context) error { diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 5fa5306e39..4ebb1d56d9 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -14,7 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" @@ -130,8 +130,8 @@ func (s *LocalFileStorageService) Close(ctx context.Context) error { return nil } -func (s *LocalFileStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (s *LocalFileStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (s *LocalFileStorageService) String() string { diff --git a/das/memory_backed_storage_service.go b/das/memory_backed_storage_service.go index 6484231479..91f7d9a2f5 100644 --- a/das/memory_backed_storage_service.go +++ b/das/memory_backed_storage_service.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" ) @@ -79,8 +79,8 @@ func (m *MemoryBackedStorageService) Close(ctx context.Context) error { return nil } -func (m *MemoryBackedStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (m *MemoryBackedStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (m *MemoryBackedStorageService) String() string { diff --git a/das/panic_wrapper.go b/das/panic_wrapper.go index 7a15f6bec0..dbb61cba96 100644 --- a/das/panic_wrapper.go +++ b/das/panic_wrapper.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type WriterPanicWrapper struct { @@ -26,7 +26,7 @@ func (w *WriterPanicWrapper) String() string { return fmt.Sprintf("WriterPanicWrapper{%v}", w.DataAvailabilityServiceWriter) } -func (w *WriterPanicWrapper) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (w *WriterPanicWrapper) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { cert, err := w.DataAvailabilityServiceWriter.Store(ctx, message, timeout, sig) if err != nil { panic(fmt.Sprintf("panic wrapper Store: %v", err)) diff --git a/das/read_limited.go b/das/read_limited.go index 74d6d5358d..5ef0335d5f 100644 --- a/das/read_limited.go +++ b/das/read_limited.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) // These classes are wrappers implementing das.StorageService and das.DataAvailabilityService. @@ -16,12 +16,12 @@ import ( // it is a programming error in the code setting up the node or daserver if a non-writeable object // is used in a writeable context. -func NewReadLimitedStorageService(reader arbstate.DataAvailabilityReader) *readLimitedStorageService { +func NewReadLimitedStorageService(reader daprovider.DASReader) *readLimitedStorageService { return &readLimitedStorageService{reader} } type readLimitedStorageService struct { - arbstate.DataAvailabilityReader + daprovider.DASReader } func (s *readLimitedStorageService) Put(ctx context.Context, data []byte, expiration uint64) error { @@ -37,22 +37,22 @@ func (s *readLimitedStorageService) Close(ctx context.Context) error { } func (s *readLimitedStorageService) String() string { - return fmt.Sprintf("readLimitedStorageService(%v)", s.DataAvailabilityReader) + return fmt.Sprintf("readLimitedStorageService(%v)", s.DASReader) } type readLimitedDataAvailabilityService struct { - arbstate.DataAvailabilityReader + daprovider.DASReader } -func NewReadLimitedDataAvailabilityService(da arbstate.DataAvailabilityReader) *readLimitedDataAvailabilityService { +func NewReadLimitedDataAvailabilityService(da daprovider.DASReader) *readLimitedDataAvailabilityService { return &readLimitedDataAvailabilityService{da} } -func (*readLimitedDataAvailabilityService) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (*readLimitedDataAvailabilityService) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { panic("Logic error: readLimitedDataAvailabilityService.Store shouldn't be called.") } func (s *readLimitedDataAvailabilityService) String() string { - return fmt.Sprintf("ReadLimitedDataAvailabilityService(%v)", s.DataAvailabilityReader) + return fmt.Sprintf("ReadLimitedDataAvailabilityService(%v)", s.DASReader) } diff --git a/das/reader_aggregator_strategies.go b/das/reader_aggregator_strategies.go index 855be5e318..d20760bd5b 100644 --- a/das/reader_aggregator_strategies.go +++ b/das/reader_aggregator_strategies.go @@ -10,30 +10,30 @@ import ( "sync" "sync/atomic" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) var ErrNoReadersResponded = errors.New("no DAS readers responded successfully") type aggregatorStrategy interface { newInstance() aggregatorStrategyInstance - update([]arbstate.DataAvailabilityReader, map[arbstate.DataAvailabilityReader]readerStats) + update([]daprovider.DASReader, map[daprovider.DASReader]readerStats) } type abstractAggregatorStrategy struct { sync.RWMutex - readers []arbstate.DataAvailabilityReader - stats map[arbstate.DataAvailabilityReader]readerStats + readers []daprovider.DASReader + stats map[daprovider.DASReader]readerStats } -func (s *abstractAggregatorStrategy) update(readers []arbstate.DataAvailabilityReader, stats map[arbstate.DataAvailabilityReader]readerStats) { +func (s *abstractAggregatorStrategy) update(readers []daprovider.DASReader, stats map[daprovider.DASReader]readerStats) { s.Lock() defer s.Unlock() - s.readers = make([]arbstate.DataAvailabilityReader, len(readers)) + s.readers = make([]daprovider.DASReader, len(readers)) copy(s.readers, readers) - s.stats = make(map[arbstate.DataAvailabilityReader]readerStats) + s.stats = make(map[daprovider.DASReader]readerStats) for k, v := range stats { s.stats[k] = v } @@ -51,11 +51,11 @@ type simpleExploreExploitStrategy struct { func (s *simpleExploreExploitStrategy) newInstance() aggregatorStrategyInstance { iterations := atomic.AddUint32(&s.iterations, 1) - readerSets := make([][]arbstate.DataAvailabilityReader, 0) + readerSets := make([][]daprovider.DASReader, 0) s.RLock() defer s.RUnlock() - readers := make([]arbstate.DataAvailabilityReader, len(s.readers)) + readers := make([]daprovider.DASReader, len(s.readers)) copy(readers, s.readers) if iterations%(s.exploreIterations+s.exploitIterations) < s.exploreIterations { @@ -70,7 +70,7 @@ func (s *simpleExploreExploitStrategy) newInstance() aggregatorStrategyInstance } for i, maxTake := 0, 1; i < len(readers); maxTake = maxTake * 2 { - readerSet := make([]arbstate.DataAvailabilityReader, 0, maxTake) + readerSet := make([]daprovider.DASReader, 0, maxTake) for taken := 0; taken < maxTake && i < len(readers); i, taken = i+1, taken+1 { readerSet = append(readerSet, readers[i]) } @@ -91,7 +91,7 @@ func (s *testingSequentialStrategy) newInstance() aggregatorStrategyInstance { si := basicStrategyInstance{} for _, reader := range s.readers { - si.readerSets = append(si.readerSets, []arbstate.DataAvailabilityReader{reader}) + si.readerSets = append(si.readerSets, []daprovider.DASReader{reader}) } return &si @@ -99,14 +99,14 @@ func (s *testingSequentialStrategy) newInstance() aggregatorStrategyInstance { // Instance of a strategy that returns readers in an order according to the strategy type aggregatorStrategyInstance interface { - nextReaders() []arbstate.DataAvailabilityReader + nextReaders() []daprovider.DASReader } type basicStrategyInstance struct { - readerSets [][]arbstate.DataAvailabilityReader + readerSets [][]daprovider.DASReader } -func (si *basicStrategyInstance) nextReaders() []arbstate.DataAvailabilityReader { +func (si *basicStrategyInstance) nextReaders() []daprovider.DASReader { if len(si.readerSets) == 0 { return nil } diff --git a/das/reader_aggregator_strategies_test.go b/das/reader_aggregator_strategies_test.go index 987bc08938..cdb85b25e9 100644 --- a/das/reader_aggregator_strategies_test.go +++ b/das/reader_aggregator_strategies_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type dummyReader struct { @@ -26,13 +26,13 @@ func (*dummyReader) HealthCheck(context.Context) error { return errors.New("not implemented") } -func (*dummyReader) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (*dummyReader) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { return -1, errors.New("not implemented") } func TestDAS_SimpleExploreExploit(t *testing.T) { - readers := []arbstate.DataAvailabilityReader{&dummyReader{0}, &dummyReader{1}, &dummyReader{2}, &dummyReader{3}, &dummyReader{4}, &dummyReader{5}} - stats := make(map[arbstate.DataAvailabilityReader]readerStats) + readers := []daprovider.DASReader{&dummyReader{0}, &dummyReader{1}, &dummyReader{2}, &dummyReader{3}, &dummyReader{4}, &dummyReader{5}} + stats := make(map[daprovider.DASReader]readerStats) stats[readers[0]] = []readerStat{ // weighted avg 10s {10 * time.Second, true}, } @@ -57,7 +57,7 @@ func TestDAS_SimpleExploreExploit(t *testing.T) { {8 * time.Second, true}, } - expectedOrdering := []arbstate.DataAvailabilityReader{readers[1], readers[2], readers[5], readers[4], readers[0], readers[3]} + expectedOrdering := []daprovider.DASReader{readers[1], readers[2], readers[5], readers[4], readers[0], readers[3]} expectedExploreIterations, expectedExploitIterations := uint32(5), uint32(5) strategy := simpleExploreExploitStrategy{ @@ -66,7 +66,7 @@ func TestDAS_SimpleExploreExploit(t *testing.T) { } strategy.update(readers, stats) - checkMatch := func(expected, was []arbstate.DataAvailabilityReader, doMatch bool) { + checkMatch := func(expected, was []daprovider.DASReader, doMatch bool) { if len(expected) != len(was) { Fail(t, fmt.Sprintf("Incorrect number of nextReaders %d, expected %d", len(was), len(expected))) } diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index 3449a8e78c..dbd85921ed 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -13,7 +13,7 @@ import ( "golang.org/x/crypto/sha3" "github.com/go-redis/redis/v8" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/redisutil" @@ -162,7 +162,7 @@ func (rs *RedisStorageService) Close(ctx context.Context) error { return rs.baseStorageService.Close(ctx) } -func (rs *RedisStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (rs *RedisStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { return rs.baseStorageService.ExpirationPolicy(ctx) } diff --git a/das/redundant_storage_service.go b/das/redundant_storage_service.go index 74d32bd819..3158d28076 100644 --- a/das/redundant_storage_service.go +++ b/das/redundant_storage_service.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/pretty" ) @@ -121,7 +121,7 @@ func (r *RedundantStorageService) Close(ctx context.Context) error { return anyError } -func (r *RedundantStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (r *RedundantStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { // If at least one inner service has KeepForever, // then whole redundant service can serve after timeout. @@ -132,20 +132,20 @@ func (r *RedundantStorageService) ExpirationPolicy(ctx context.Context) (arbstat // If no inner service has KeepForever, DiscardAfterArchiveTimeout, // but at least one inner service has DiscardAfterDataTimeout, // then whole redundant service can serve till data timeout. - var res arbstate.ExpirationPolicy = -1 + var res daprovider.ExpirationPolicy = -1 for _, serv := range r.innerServices { expirationPolicy, err := serv.ExpirationPolicy(ctx) if err != nil { return -1, err } switch expirationPolicy { - case arbstate.KeepForever: - return arbstate.KeepForever, nil - case arbstate.DiscardAfterArchiveTimeout: - res = arbstate.DiscardAfterArchiveTimeout - case arbstate.DiscardAfterDataTimeout: - if res != arbstate.DiscardAfterArchiveTimeout { - res = arbstate.DiscardAfterDataTimeout + case daprovider.KeepForever: + return daprovider.KeepForever, nil + case daprovider.DiscardAfterArchiveTimeout: + res = daprovider.DiscardAfterArchiveTimeout + case daprovider.DiscardAfterDataTimeout: + if res != daprovider.DiscardAfterArchiveTimeout { + res = daprovider.DiscardAfterDataTimeout } } } diff --git a/das/restful_client.go b/das/restful_client.go index 7d757c6bb8..b65426e7cd 100644 --- a/das/restful_client.go +++ b/das/restful_client.go @@ -14,11 +14,11 @@ import ( "strings" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" ) -// RestfulDasClient implements DataAvailabilityReader +// RestfulDasClient implements daprovider.DASReader type RestfulDasClient struct { url string } @@ -65,7 +65,7 @@ func (c *RestfulDasClient) GetByHash(ctx context.Context, hash common.Hash) ([]b return nil, err } if !dastree.ValidHash(hash, decodedBytes) { - return nil, arbstate.ErrHashMismatch + return nil, daprovider.ErrHashMismatch } return decodedBytes, nil @@ -82,7 +82,7 @@ func (c *RestfulDasClient) HealthCheck(ctx context.Context) error { return nil } -func (c *RestfulDasClient) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (c *RestfulDasClient) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { res, err := http.Get(c.url + expirationPolicyRequestPath) if err != nil { return -1, err @@ -101,5 +101,5 @@ func (c *RestfulDasClient) ExpirationPolicy(ctx context.Context) (arbstate.Expir return -1, err } - return arbstate.StringToExpirationPolicy(response.ExpirationPolicy) + return daprovider.StringToExpirationPolicy(response.ExpirationPolicy) } diff --git a/das/restful_server.go b/das/restful_server.go index 5c5e82e820..b1607729e2 100644 --- a/das/restful_server.go +++ b/das/restful_server.go @@ -17,7 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/util/pretty" ) @@ -32,13 +32,13 @@ var ( type RestfulDasServer struct { server *http.Server - daReader arbstate.DataAvailabilityReader + daReader daprovider.DASReader daHealthChecker DataAvailabilityServiceHealthChecker httpServerExitedChan chan interface{} httpServerError error } -func NewRestfulDasServer(address string, port uint64, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader arbstate.DataAvailabilityReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { +func NewRestfulDasServer(address string, port uint64, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader daprovider.DASReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", address, port)) if err != nil { return nil, err @@ -46,7 +46,7 @@ func NewRestfulDasServer(address string, port uint64, restServerTimeouts generic return NewRestfulDasServerOnListener(listener, restServerTimeouts, daReader, daHealthChecker) } -func NewRestfulDasServerOnListener(listener net.Listener, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader arbstate.DataAvailabilityReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { +func NewRestfulDasServerOnListener(listener net.Listener, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader daprovider.DASReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { ret := &RestfulDasServer{ daReader: daReader, diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 134c4229c8..490116a89a 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -12,7 +12,7 @@ import ( "math/bits" "net/url" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/metricsutil" @@ -102,7 +102,7 @@ func KeysetHashFromServices(services []ServiceDetails, assumedHonest uint64) ([3 return [32]byte{}, nil, errors.New("at least two signers share a mask") } - keyset := &arbstate.DataAvailabilityKeyset{ + keyset := &daprovider.DataAvailabilityKeyset{ AssumedHonest: uint64(assumedHonest), PubKeys: pubKeys, } diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index 1a3ae94114..b5150fb8ed 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -15,7 +15,7 @@ import ( "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" @@ -145,11 +145,11 @@ func (s3s *S3StorageService) Close(ctx context.Context) error { return nil } -func (s3s *S3StorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (s3s *S3StorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { if s3s.discardAfterTimeout { - return arbstate.DiscardAfterDataTimeout, nil + return daprovider.DiscardAfterDataTimeout, nil } - return arbstate.KeepForever, nil + return daprovider.KeepForever, nil } func (s3s *S3StorageService) String() string { diff --git a/das/sign_after_store_das_writer.go b/das/sign_after_store_das_writer.go index 50c4ee9aee..36c51c022e 100644 --- a/das/sign_after_store_das_writer.go +++ b/das/sign_after_store_das_writer.go @@ -18,7 +18,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -123,7 +123,7 @@ func NewSignAfterStoreDASWriterWithSeqInboxCaller( return nil, err } - keyset := &arbstate.DataAvailabilityKeyset{ + keyset := &daprovider.DataAvailabilityKeyset{ AssumedHonest: 1, PubKeys: []blsSignatures.PublicKey{publicKey}, } @@ -180,7 +180,7 @@ func NewSignAfterStoreDASWriterWithSeqInboxCaller( func (d *SignAfterStoreDASWriter) Store( ctx context.Context, message []byte, timeout uint64, sig []byte, -) (c *arbstate.DataAvailabilityCertificate, err error) { +) (c *daprovider.DataAvailabilityCertificate, err error) { log.Trace("das.SignAfterStoreDASWriter.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", d) var verified bool if d.extraBpVerifier != nil { @@ -201,7 +201,7 @@ func (d *SignAfterStoreDASWriter) Store( } } - c = &arbstate.DataAvailabilityCertificate{ + c = &daprovider.DataAvailabilityCertificate{ Timeout: timeout, DataHash: dastree.Hash(message), Version: 1, diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index eb82a33837..dc6147a7e4 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -14,7 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -80,7 +80,7 @@ func SimpleExploreExploitStrategyConfigAddOptions(prefix string, f *flag.FlagSet func NewRestfulClientAggregator(ctx context.Context, config *RestfulClientAggregatorConfig) (*SimpleDASReaderAggregator, error) { a := SimpleDASReaderAggregator{ config: config, - stats: make(map[arbstate.DataAvailabilityReader]readerStats), + stats: make(map[daprovider.DASReader]readerStats), } combinedUrls := make(map[string]bool) @@ -160,7 +160,7 @@ type readerStat struct { type readerStatMessage struct { readerStat - reader arbstate.DataAvailabilityReader + reader daprovider.DASReader } type SimpleDASReaderAggregator struct { @@ -170,8 +170,8 @@ type SimpleDASReaderAggregator struct { readersMutex sync.RWMutex // readers and stats are only to be updated by the stats goroutine - readers []arbstate.DataAvailabilityReader - stats map[arbstate.DataAvailabilityReader]readerStats + readers []daprovider.DASReader + stats map[daprovider.DASReader]readerStats strategy aggregatorStrategy @@ -199,7 +199,7 @@ func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash common.H waitChan := make(chan interface{}) for _, reader := range readers { wg.Add(1) - go func(reader arbstate.DataAvailabilityReader) { + go func(reader daprovider.DASReader) { defer wg.Done() data, err := a.tryGetByHash(subCtx, hash, reader) if err != nil && errors.Is(ctx.Err(), context.Canceled) { @@ -243,7 +243,7 @@ func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash common.H } func (a *SimpleDASReaderAggregator) tryGetByHash( - ctx context.Context, hash common.Hash, reader arbstate.DataAvailabilityReader, + ctx context.Context, hash common.Hash, reader daprovider.DASReader, ) ([]byte, error) { stat := readerStatMessage{reader: reader} stat.success = false @@ -278,7 +278,7 @@ func (a *SimpleDASReaderAggregator) Start(ctx context.Context) { defer a.readersMutex.Unlock() combinedUrls := a.config.Urls combinedUrls = append(combinedUrls, urls...) - combinedReaders := make(map[arbstate.DataAvailabilityReader]bool) + combinedReaders := make(map[daprovider.DASReader]bool) for _, url := range combinedUrls { reader, err := NewRestfulDasClientFromURL(url) if err != nil { @@ -286,7 +286,7 @@ func (a *SimpleDASReaderAggregator) Start(ctx context.Context) { } combinedReaders[reader] = true } - a.readers = make([]arbstate.DataAvailabilityReader, 0, len(combinedUrls)) + a.readers = make([]daprovider.DASReader, 0, len(combinedUrls)) // Update reader and add newly added stats for reader := range combinedReaders { a.readers = append(a.readers, reader) @@ -350,7 +350,7 @@ func (a *SimpleDASReaderAggregator) HealthCheck(ctx context.Context) error { return nil } -func (a *SimpleDASReaderAggregator) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (a *SimpleDASReaderAggregator) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { a.readersMutex.RLock() defer a.readersMutex.RUnlock() if len(a.readers) == 0 { @@ -368,7 +368,7 @@ func (a *SimpleDASReaderAggregator) ExpirationPolicy(ctx context.Context) (arbst return -1, err } if ep != expectedExpirationPolicy { - return arbstate.MixedTimeout, nil + return daprovider.MixedTimeout, nil } } return expectedExpirationPolicy, nil diff --git a/das/storage_service.go b/das/storage_service.go index 881d6fc8b1..806e80dba5 100644 --- a/das/storage_service.go +++ b/das/storage_service.go @@ -11,13 +11,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) var ErrNotFound = errors.New("not found") type StorageService interface { - arbstate.DataAvailabilityReader + daprovider.DASReader Put(ctx context.Context, data []byte, expirationTime uint64) error Sync(ctx context.Context) error Closer diff --git a/das/store_signing.go b/das/store_signing.go index 8039774b65..8ebc1a9805 100644 --- a/das/store_signing.go +++ b/das/store_signing.go @@ -12,7 +12,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/signature" @@ -56,7 +56,7 @@ func NewStoreSigningDAS(inner DataAvailabilityServiceWriter, signer signature.Da return &StoreSigningDAS{inner, signer, addr}, nil } -func (s *StoreSigningDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (s *StoreSigningDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { log.Trace("das.StoreSigningDAS.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", s) mySig, err := applyDasSigner(s.signer, message, timeout) if err != nil { diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index c79cd80400..411e7a1977 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -20,7 +20,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/arbmath" @@ -94,7 +94,7 @@ type l1SyncService struct { config SyncToStorageConfig syncTo StorageService - dataSource arbstate.DataAvailabilityReader + dataSource daprovider.DASReader l1Reader *headerreader.HeaderReader inboxContract *bridgegen.SequencerInbox @@ -161,7 +161,7 @@ func writeSyncState(syncDir string, blockNr uint64) error { return os.Rename(f.Name(), path) } -func newl1SyncService(config *SyncToStorageConfig, syncTo StorageService, dataSource arbstate.DataAvailabilityReader, l1Reader *headerreader.HeaderReader, inboxAddr common.Address) (*l1SyncService, error) { +func newl1SyncService(config *SyncToStorageConfig, syncTo StorageService, dataSource daprovider.DASReader, l1Reader *headerreader.HeaderReader, inboxAddr common.Address) (*l1SyncService, error) { l1Client := l1Reader.Client() inboxContract, err := bridgegen.NewSequencerInbox(inboxAddr, l1Client) if err != nil { @@ -213,9 +213,14 @@ func (s *l1SyncService) processBatchDelivered(ctx context.Context, batchDelivere data = append(header, data...) preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) - if _, err = arbstate.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimages, arbstate.KeysetValidate); err != nil { - log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data) - return err + preimageRecorder := daprovider.RecordPreimagesTo(preimages) + if _, err = daprovider.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimageRecorder, true); err != nil { + if errors.Is(err, daprovider.ErrSeqMsgValidation) { + log.Error(err.Error()) + } else { + log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data) + return err + } } for _, preimages := range preimages { for hash, contents := range preimages { @@ -291,7 +296,7 @@ func FindDASDataFromLog( log.Warn("BatchDelivered - no data found", "data", data) return nil, nil } - if !arbstate.IsDASMessageHeaderByte(data[0]) { + if !daprovider.IsDASMessageHeaderByte(data[0]) { log.Warn("BatchDelivered - data not DAS") return nil, nil } @@ -417,7 +422,7 @@ type SyncingFallbackStorageService struct { func NewSyncingFallbackStorageService(ctx context.Context, primary StorageService, - backup arbstate.DataAvailabilityReader, + backup daprovider.DASReader, backupHealthChecker DataAvailabilityServiceHealthChecker, l1Reader *headerreader.HeaderReader, inboxAddr common.Address, diff --git a/das/util.go b/das/util.go index d98a2687fe..de266c433f 100644 --- a/das/util.go +++ b/das/util.go @@ -7,11 +7,11 @@ import ( "time" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/pretty" ) -func logPut(store string, data []byte, timeout uint64, reader arbstate.DataAvailabilityReader, more ...interface{}) { +func logPut(store string, data []byte, timeout uint64, reader daprovider.DASReader, more ...interface{}) { if len(more) == 0 { log.Trace( store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 46d5027ada..38569f44ab 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -116,9 +116,9 @@ func (s *ExecutionEngine) GetBatchFetcher() execution.BatchFetcher { return s.consensus } -func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error { +func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { if count == 0 { - return errors.New("cannot reorg out genesis") + return nil, errors.New("cannot reorg out genesis") } s.createBlocksMutex.Lock() resequencing := false @@ -134,7 +134,7 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost targetBlock := s.bc.GetBlockByNumber(uint64(blockNum)) if targetBlock == nil { log.Warn("reorg target block not found", "block", blockNum) - return nil + return nil, nil } // reorg Rust-side VM state @@ -142,17 +142,20 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost err := s.bc.ReorgToOldBlock(targetBlock) if err != nil { - return err + return nil, err } + + newMessagesResults := make([]*execution.MessageResult, 0, len(oldMessages)) for i := range newMessages { var msgForPrefetch *arbostypes.MessageWithMetadata if i < len(newMessages)-1 { msgForPrefetch = &newMessages[i] } - err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i], msgForPrefetch) + msgResult, err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i], msgForPrefetch) if err != nil { - return err + return nil, err } + newMessagesResults = append(newMessagesResults, msgResult) } if s.recorder != nil { s.recorder.ReorgTo(targetBlock.Header()) @@ -161,7 +164,7 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost s.resequenceChan <- oldMessages resequencing = true } - return nil + return newMessagesResults, nil } func (s *ExecutionEngine) getCurrentHeader() (*types.Header, error) { @@ -370,17 +373,21 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. return nil, err } + pos, err := s.BlockNumberToMessageIndex(lastBlockHeader.Number.Uint64() + 1) + if err != nil { + return nil, err + } + msgWithMeta := arbostypes.MessageWithMetadata{ Message: msg, DelayedMessagesRead: delayedMessagesRead, } - - pos, err := s.BlockNumberToMessageIndex(lastBlockHeader.Number.Uint64() + 1) + msgResult, err := s.resultFromHeader(block.Header()) if err != nil { return nil, err } - err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta) + err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta, *msgResult) if err != nil { return nil, err } @@ -426,18 +433,24 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp DelayedMessagesRead: delayedSeqNum + 1, } - err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta) + startTime := time.Now() + block, statedb, receipts, err := s.createBlockFromNextMessage(&messageWithMeta, false) if err != nil { return nil, err } + blockCalcTime := time.Since(startTime) - startTime := time.Now() - block, statedb, receipts, err := s.createBlockFromNextMessage(&messageWithMeta, false) + msgResult, err := s.resultFromHeader(block.Header()) if err != nil { return nil, err } - err = s.appendBlock(block, statedb, receipts, time.Since(startTime)) + err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta, *msgResult) + if err != nil { + return nil, err + } + + err = s.appendBlock(block, statedb, receipts, blockCalcTime) if err != nil { return nil, err } @@ -601,25 +614,25 @@ func (s *ExecutionEngine) cacheL1PriceDataOfMsg(num arbutil.MessageIndex, receip // in parallel, creates a block by executing msgForPrefetch (msg+1) against the latest state // but does not store the block. // This helps in filling the cache, so that the next block creation is faster. -func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { +func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*execution.MessageResult, error) { if !s.createBlocksMutex.TryLock() { - return errors.New("createBlock mutex held") + return nil, errors.New("createBlock mutex held") } defer s.createBlocksMutex.Unlock() return s.digestMessageWithBlockMutex(num, msg, msgForPrefetch) } -func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { +func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*execution.MessageResult, error) { currentHeader, err := s.getCurrentHeader() if err != nil { - return err + return nil, err } curMsg, err := s.BlockNumberToMessageIndex(currentHeader.Number.Uint64()) if err != nil { - return err + return nil, err } if curMsg+1 != num { - return fmt.Errorf("wrong message number in digest got %d expected %d", num, curMsg+1) + return nil, fmt.Errorf("wrong message number in digest got %d expected %d", num, curMsg+1) } startTime := time.Now() @@ -634,22 +647,23 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, block, statedb, receipts, err := s.createBlockFromNextMessage(msg, false) if err != nil { - return err + return nil, err } + err = s.appendBlock(block, statedb, receipts, time.Since(startTime)) if err != nil { - return err + return nil, err } if time.Now().After(s.nextScheduledVersionCheck) { s.nextScheduledVersionCheck = time.Now().Add(time.Minute) arbState, err := arbosState.OpenSystemArbosState(statedb, nil, true) if err != nil { - return err + return nil, err } version, timestampInt, err := arbState.GetScheduledUpgrade() if err != nil { - return err + return nil, err } var timeUntilUpgrade time.Duration var timestamp time.Time @@ -685,7 +699,12 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, case s.newBlockNotifier <- struct{}{}: default: } - return nil + + msgResult, err := s.resultFromHeader(block.Header()) + if err != nil { + return nil, err + } + return msgResult, nil } func (s *ExecutionEngine) ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 54f9ed6fe1..ae76b88530 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -343,10 +343,10 @@ func (n *ExecutionNode) StopAndWait() { // } } -func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { +func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*execution.MessageResult, error) { return n.ExecEngine.DigestMessage(num, msg, msgForPrefetch) } -func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error { +func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { return n.ExecEngine.Reorg(count, newMessages, oldMessages) } func (n *ExecutionNode) HeadMessageNumber() (arbutil.MessageIndex, error) { diff --git a/execution/interface.go b/execution/interface.go index e2b41b9a0a..d2a5b58fe5 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -30,8 +30,8 @@ var ErrSequencerInsertLockTaken = errors.New("insert lock taken") // always needed type ExecutionClient interface { - DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error - Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error + DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*MessageResult, error) + Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*MessageResult, error) HeadMessageNumber() (arbutil.MessageIndex, error) HeadMessageNumberSync(t *testing.T) (arbutil.MessageIndex, error) ResultAtPos(pos arbutil.MessageIndex) (*MessageResult, error) @@ -92,7 +92,7 @@ type ConsensusInfo interface { } type ConsensusSequencer interface { - WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error + WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, msgResult MessageResult) error ExpectChosenSequencer() error CacheL1PriceDataOfMsg(pos arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) BacklogL1GasCharged() uint64 diff --git a/go-ethereum b/go-ethereum index 72f81daa8c..9874ec397a 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 72f81daa8c59f044246b6e1f3eca08187edd7417 +Subproject commit 9874ec397a5b499eefc98f7f9ae9632c3fc1e17f diff --git a/go.mod b/go.mod index 22b6b8b4af..6b350a4008 100644 --- a/go.mod +++ b/go.mod @@ -92,7 +92,7 @@ require ( github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect - github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect + github.com/fjl/memsize v0.0.2 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gammazero/deque v0.2.1 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect diff --git a/go.sum b/go.sum index 9d685c0abc..1b63dfb496 100644 --- a/go.sum +++ b/go.sum @@ -233,8 +233,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= -github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= +github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= diff --git a/relay/relay.go b/relay/relay.go index 8e29971384..89bb899f29 100644 --- a/relay/relay.go +++ b/relay/relay.go @@ -10,8 +10,6 @@ import ( flag "github.com/spf13/pflag" - "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcastclients" @@ -120,7 +118,7 @@ func (r *Relay) StopAndWait() { type Config struct { Conf genericconf.ConfConfig `koanf:"conf"` Chain L2Config `koanf:"chain"` - LogLevel int `koanf:"log-level"` + LogLevel string `koanf:"log-level"` LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` @@ -133,7 +131,7 @@ type Config struct { var ConfigDefault = Config{ Conf: genericconf.ConfConfigDefault, Chain: L2ConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, @@ -146,7 +144,7 @@ var ConfigDefault = Config{ func ConfigAddOptions(f *flag.FlagSet) { genericconf.ConfConfigAddOptions("conf", f) L2ConfigAddOptions("chain", f) - f.Int("log-level", ConfigDefault.LogLevel, "log level") + f.String("log-level", ConfigDefault.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", ConfigDefault.LogType, "log type") f.Bool("metrics", ConfigDefault.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 56389ae80e..d68365ede0 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -10,9 +10,9 @@ import ( "math/big" "time" - "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/validator" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -50,7 +50,6 @@ type L1Validator struct { wallet ValidatorWalletInterface callOpts bind.CallOpts - das arbstate.DataAvailabilityReader inboxTracker InboxTrackerInterface txStreamer TransactionStreamerInterface blockValidator *BlockValidator @@ -62,7 +61,6 @@ func NewL1Validator( wallet ValidatorWalletInterface, validatorUtilsAddress common.Address, callOpts bind.CallOpts, - das arbstate.DataAvailabilityReader, inboxTracker InboxTrackerInterface, txStreamer TransactionStreamerInterface, blockValidator *BlockValidator, @@ -90,7 +88,6 @@ func NewL1Validator( builder: builder, wallet: wallet, callOpts: callOpts, - das: das, inboxTracker: inboxTracker, txStreamer: txStreamer, blockValidator: blockValidator, @@ -191,12 +188,16 @@ func (v *L1Validator) resolveNextNode(ctx context.Context, info *StakerInfo, lat func (v *L1Validator) isRequiredStakeElevated(ctx context.Context) (bool, error) { callOpts := v.getCallOpts(ctx) - requiredStake, err := v.rollup.CurrentRequiredStake(callOpts) + baseStake, err := v.rollup.BaseStake(callOpts) if err != nil { return false, err } - baseStake, err := v.rollup.BaseStake(callOpts) + requiredStake, err := v.rollup.CurrentRequiredStake(callOpts) if err != nil { + if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) { + log.Warn("execution reverted checking if required state is elevated; assuming elevated", "err", err) + return true, nil + } return false, err } return requiredStake.Cmp(baseStake) > 0, nil diff --git a/staker/staker.go b/staker/staker.go index 2a95e9c9f7..da6413e122 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -291,7 +291,7 @@ func NewStaker( } client := l1Reader.Client() val, err := NewL1Validator(client, wallet, validatorUtilsAddress, callOpts, - statelessBlockValidator.daService, statelessBlockValidator.inboxTracker, statelessBlockValidator.streamer, blockValidator) + statelessBlockValidator.inboxTracker, statelessBlockValidator.streamer, blockValidator) if err != nil { return nil, err } diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 19c61dae3d..49ce8f492c 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -9,6 +9,8 @@ import ( "fmt" "testing" + "github.com/offchainlabs/nitro/arbstate/daprovider" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/ethdb" @@ -16,7 +18,6 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbostypes" - "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/rpcclient" @@ -38,8 +39,7 @@ type StatelessBlockValidator struct { inboxTracker InboxTrackerInterface streamer TransactionStreamerInterface db ethdb.Database - daService arbstate.DataAvailabilityReader - blobReader arbstate.BlobReader + dapReaders []daprovider.Reader } type BlockValidatorRegistrer interface { @@ -192,8 +192,7 @@ func NewStatelessBlockValidator( streamer TransactionStreamerInterface, recorder execution.ExecutionRecorder, arbdb ethdb.Database, - das arbstate.DataAvailabilityReader, - blobReader arbstate.BlobReader, + dapReaders []daprovider.Reader, config func() *BlockValidatorConfig, stack *node.Node, ) (*StatelessBlockValidator, error) { @@ -226,8 +225,7 @@ func NewStatelessBlockValidator( inboxTracker: inbox, streamer: streamer, db: arbdb, - daService: das, - blobReader: blobReader, + dapReaders: dapReaders, execSpawners: executionSpawners, }, nil } @@ -267,39 +265,27 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * if len(batch.Data) <= 40 { continue } - if arbstate.IsBlobHashesHeaderByte(batch.Data[40]) { - payload := batch.Data[41:] - if len(payload)%len(common.Hash{}) != 0 { - return fmt.Errorf("blob batch data is not a list of hashes as expected") - } - versionedHashes := make([]common.Hash, len(payload)/len(common.Hash{})) - for i := 0; i*32 < len(payload); i += 1 { - copy(versionedHashes[i][:], payload[i*32:(i+1)*32]) - } - blobs, err := v.blobReader.GetBlobs(ctx, batch.BlockHash, versionedHashes) - if err != nil { - return fmt.Errorf("failed to get blobs: %w", err) - } - if e.Preimages[arbutil.EthVersionedHashPreimageType] == nil { - e.Preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) - } - for i, blob := range blobs { - // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable - // Won't be necessary after Go 1.22 with https://go.dev/blog/loopvar-preview - b := blob - e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = b[:] - } - } - if arbstate.IsDASMessageHeaderByte(batch.Data[40]) { - if v.daService == nil { - log.Warn("No DAS configured, but sequencer message found with DAS header") - } else { - _, err := arbstate.RecoverPayloadFromDasBatch( - ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, - ) + foundDA := false + for _, dapReader := range v.dapReaders { + if dapReader != nil && dapReader.IsValidHeaderByte(batch.Data[40]) { + preimageRecorder := daprovider.RecordPreimagesTo(e.Preimages) + _, err := dapReader.RecoverPayloadFromBatch(ctx, batch.Number, batch.BlockHash, batch.Data, preimageRecorder, true) if err != nil { - return err + // Matches the way keyset validation was done inside DAS readers i.e logging the error + // But other daproviders might just want to return the error + if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(batch.Data[40]) { + log.Error(err.Error()) + } else { + return err + } } + foundDA = true + break + } + } + if !foundDA { + if daprovider.IsDASMessageHeaderByte(batch.Data[40]) { + log.Error("No DAS Reader configured, but sequencer message found with DAS header") } } } diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 0fc127d0e3..6bc22b23da 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -171,7 +171,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, DeployInfo: builder.L2.ConsensusNode.DeployInfo, TransactOpts: &seqTxOpts, - DAWriter: nil, + DAPWriter: nil, ParentChainID: parentChainID, }, ) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index a9f655ff77..1b2b7ca6d6 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -20,7 +20,7 @@ import ( "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/util" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" @@ -1035,7 +1035,7 @@ func authorizeDASKeyset( if dasSignerKey == nil { return } - keyset := &arbstate.DataAvailabilityKeyset{ + keyset := &daprovider.DataAvailabilityKeyset{ AssumedHonest: 1, PubKeys: []blsSignatures.PublicKey{*dasSignerKey}, } diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 197ea1a59f..e4c49c3227 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -390,7 +390,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall confirmLatestBlock(ctx, t, l1Info, l1Backend) - asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2.ArbDB, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2.ArbDB, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -407,7 +407,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall if err != nil { Fatal(t, err) } - challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2.ArbDB, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2.ArbDB, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go index d0ca0ccda3..739d756a31 100644 --- a/system_tests/outbox_test.go +++ b/system_tests/outbox_test.go @@ -15,7 +15,9 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" @@ -23,6 +25,31 @@ import ( "github.com/offchainlabs/nitro/util/merkletree" ) +func TestP256VerifyEnabled(t *testing.T) { + gethhook.RequireHookedGeth() + for _, tc := range []struct { + stylusEnabled bool + wantP256Verify bool + }{ + { + stylusEnabled: false, + wantP256Verify: false, + }, + { + stylusEnabled: true, + wantP256Verify: true, + }, + } { + got := false + for _, a := range vm.ActivePrecompiles(params.Rules{IsStylus: tc.stylusEnabled}) { + got = got || (a == common.BytesToAddress([]byte{0x01, 0x00})) + } + if got != tc.wantP256Verify { + t.Errorf("Got P256Verify enabled: %t, want: %t", got, tc.wantP256Verify) + } + } +} + func TestOutboxProofs(t *testing.T) { t.Parallel() gethhook.RequireHookedGeth() diff --git a/system_tests/program_recursive_test.go b/system_tests/program_recursive_test.go new file mode 100644 index 0000000000..d4cab510d3 --- /dev/null +++ b/system_tests/program_recursive_test.go @@ -0,0 +1,195 @@ +package arbtest + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +type multiCallRecurse struct { + Name string + opcode vm.OpCode +} + +func printRecurse(recurse []multiCallRecurse) string { + result := "" + for _, contract := range recurse { + result = result + "(" + contract.Name + "," + contract.opcode.String() + ")" + } + return result +} + +func testProgramRecursiveCall(t *testing.T, builder *NodeBuilder, slotVals map[string]common.Hash, rander *testhelpers.PseudoRandomDataSource, recurse []multiCallRecurse) uint64 { + ctx := builder.ctx + slot := common.HexToHash("0x11223344556677889900aabbccddeeff") + val := common.Hash{} + args := []byte{} + if recurse[0].opcode == vm.SSTORE { + // send event from storage on sstore + val = rander.GetHash() + args = append([]byte{0x1, 0, 0, 0, 65, 0x18}, slot[:]...) + args = append(args, val[:]...) + } else if recurse[0].opcode == vm.SLOAD { + args = append([]byte{0x1, 0, 0, 0, 33, 0x11}, slot[:]...) + } else { + t.Fatal("first level must be sload or sstore") + } + shouldSucceed := true + delegateChangesStorageDest := true + storageDest := recurse[0].Name + for i := 1; i < len(recurse); i++ { + call := recurse[i] + prev := recurse[i-1] + args = argsForMulticall(call.opcode, builder.L2Info.GetAddress(prev.Name), nil, args) + if call.opcode == vm.STATICCALL && recurse[0].opcode == vm.SSTORE { + shouldSucceed = false + } + if delegateChangesStorageDest && call.opcode == vm.DELEGATECALL { + storageDest = call.Name + } else { + delegateChangesStorageDest = false + } + } + if recurse[0].opcode == vm.SLOAD { + // send event from caller on sload + args[5] = args[5] | 0x8 + } + multiCaller, err := mocksgen.NewMultiCallTest(builder.L2Info.GetAddress(recurse[len(recurse)-1].Name), builder.L2.Client) + Require(t, err) + ownerTransact := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + ownerTransact.GasLimit = 10000000 + tx, err := multiCaller.Fallback(&ownerTransact, args) + Require(t, err) + receipt, err := WaitForTx(ctx, builder.L2.Client, tx.Hash(), time.Second*3) + Require(t, err) + + if shouldSucceed { + if receipt.Status != types.ReceiptStatusSuccessful { + log.Error("error when shouldn't", "case", printRecurse(recurse)) + Fatal(t, arbutil.DetailTxError(ctx, builder.L2.Client, tx, receipt)) + } + if len(receipt.Logs) != 1 { + Fatal(t, "incorrect number of logs: ", len(receipt.Logs)) + } + if recurse[0].opcode == vm.SSTORE { + slotVals[storageDest] = val + storageEvt, err := multiCaller.ParseStorage(*receipt.Logs[0]) + Require(t, err) + gotData := common.BytesToHash(storageEvt.Data[:]) + gotSlot := common.BytesToHash(storageEvt.Slot[:]) + if gotData != val || gotSlot != slot || storageEvt.Write != (recurse[0].opcode == vm.SSTORE) { + Fatal(t, "unexpected event", gotData, val, gotSlot, slot, storageEvt.Write, recurse[0].opcode) + } + } else { + calledEvt, err := multiCaller.ParseCalled(*receipt.Logs[0]) + Require(t, err) + gotData := common.BytesToHash(calledEvt.ReturnData) + if gotData != slotVals[storageDest] { + Fatal(t, "unexpected event", gotData, val, slotVals[storageDest]) + } + } + } else if receipt.Status == types.ReceiptStatusSuccessful { + Fatal(t, "should have failed") + } + for contract, expected := range slotVals { + found, err := builder.L2.Client.StorageAt(ctx, builder.L2Info.GetAddress(contract), slot, receipt.BlockNumber) + Require(t, err) + foundHash := common.BytesToHash(found) + if expected != foundHash { + Fatal(t, "contract", contract, "expected", expected, "found", foundHash) + } + } + return receipt.BlockNumber.Uint64() +} + +func testProgramResursiveCalls(t *testing.T, tests [][]multiCallRecurse, jit bool) { + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + // set-up contracts + callsAddr := deployWasm(t, ctx, auth, l2client, rustFile("multicall")) + builder.L2Info.SetContract("multicall-rust", callsAddr) + multiCallWasm, _ := readWasmFile(t, rustFile("multicall")) + auth.GasLimit = 32000000 // skip gas estimation + multicallB := deployContract(t, ctx, auth, l2client, multiCallWasm) + builder.L2Info.SetContract("multicall-rust-b", multicallB) + multiAddr, tx, _, err := mocksgen.DeployMultiCallTest(&auth, builder.L2.Client) + builder.L2Info.SetContract("multicall-evm", multiAddr) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) + Require(t, err) + slotVals := make(map[string]common.Hash) + rander := testhelpers.NewPseudoRandomDataSource(t, 0) + + // set-up validator + validatorConfig := arbnode.ConfigDefaultL1NonSequencerTest() + validatorConfig.BlockValidator.Enable = true + AddDefaultValNode(t, ctx, validatorConfig, jit, "") + valClient, valCleanup := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: validatorConfig}) + defer valCleanup() + + // store initial values + for _, contract := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + storeRecure := []multiCallRecurse{ + { + Name: contract, + opcode: vm.SSTORE, + }, + } + testProgramRecursiveCall(t, builder, slotVals, rander, storeRecure) + } + + // execute transactions + blockNum := uint64(0) + for { + item := int(rander.GetUint64()/4) % len(tests) + blockNum = testProgramRecursiveCall(t, builder, slotVals, rander, tests[item]) + tests[item] = tests[len(tests)-1] + tests = tests[:len(tests)-1] + if len(tests)%100 == 0 { + log.Error("running transactions..", "blockNum", blockNum, "remaining", len(tests)) + } + if len(tests) == 0 { + break + } + } + + // wait for validation + for { + got := valClient.ConsensusNode.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(blockNum), time.Second*10) + if got { + break + } + log.Error("validating blocks..", "waiting for", blockNum, "validated", valClient.ConsensusNode.BlockValidator.GetValidated()) + } +} + +func TestProgramCallSimple(t *testing.T) { + tests := [][]multiCallRecurse{ + { + { + Name: "multicall-rust", + opcode: vm.SLOAD, + }, + { + Name: "multicall-rust", + opcode: vm.STATICCALL, + }, + { + Name: "multicall-rust", + opcode: vm.DELEGATECALL, + }, + }, + } + testProgramResursiveCalls(t, tests, true) +} diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index d5bbeaa079..2d188295ec 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -208,7 +208,6 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeA, l2nodeA.ArbDB, nil, - nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) @@ -261,7 +260,6 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeB, l2nodeB.ArbDB, nil, - nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 9c34a82553..bb78bda480 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -26,6 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/statetransfer" ) @@ -41,7 +42,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, daprovider.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -121,7 +122,7 @@ func (c noopChainContext) GetHeader(common.Hash, uint64) *types.Header { func FuzzStateTransition(f *testing.F) { f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte) { - if len(seqMsg) > 0 && arbstate.IsL1AuthenticatedMessageHeaderByte(seqMsg[0]) { + if len(seqMsg) > 0 && daprovider.IsL1AuthenticatedMessageHeaderByte(seqMsg[0]) { return } chainDb := rawdb.NewMemoryDatabase() @@ -176,7 +177,7 @@ func FuzzStateTransition(f *testing.F) { binary.BigEndian.PutUint64(seqBatch[24:32], ^uint64(0)) binary.BigEndian.PutUint64(seqBatch[32:40], uint64(len(delayedMessages))) if compressSeqMsg { - seqBatch = append(seqBatch, arbstate.BrotliMessageHeaderByte) + seqBatch = append(seqBatch, daprovider.BrotliMessageHeaderByte) seqMsgCompressed, err := arbcompress.CompressLevel(seqMsg, 0) if err != nil { panic(fmt.Sprintf("failed to compress sequencer message: %v", err)) diff --git a/system_tests/stylus_test.go b/system_tests/stylus_test.go index 46a9103b04..97f3041196 100644 --- a/system_tests/stylus_test.go +++ b/system_tests/stylus_test.go @@ -8,6 +8,8 @@ package arbtest import ( "testing" + + "github.com/ethereum/go-ethereum/core/vm" ) func TestProgramArbitratorKeccak(t *testing.T) { @@ -67,3 +69,42 @@ func TestProgramArbitratorActivateFails(t *testing.T) { func TestProgramArbitratorEarlyExit(t *testing.T) { testEarlyExit(t, false) } + +func fullRecurseTest() [][]multiCallRecurse { + result := make([][]multiCallRecurse, 0) + for _, op0 := range []vm.OpCode{vm.SSTORE, vm.SLOAD} { + for _, contract0 := range []string{"multicall-rust", "multicall-evm"} { + for _, op1 := range []vm.OpCode{vm.CALL, vm.STATICCALL, vm.DELEGATECALL} { + for _, contract1 := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + for _, op2 := range []vm.OpCode{vm.CALL, vm.STATICCALL, vm.DELEGATECALL} { + for _, contract2 := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + for _, op3 := range []vm.OpCode{vm.CALL, vm.STATICCALL, vm.DELEGATECALL} { + for _, contract3 := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + recurse := make([]multiCallRecurse, 4) + recurse[0].opcode = op0 + recurse[0].Name = contract0 + recurse[1].opcode = op1 + recurse[1].Name = contract1 + recurse[2].opcode = op2 + recurse[2].Name = contract2 + recurse[3].opcode = op3 + recurse[3].Name = contract3 + result = append(result, recurse) + } + } + } + } + } + } + } + } + return result +} + +func TestProgramLongCall(t *testing.T) { + testProgramResursiveCalls(t, fullRecurseTest(), true) +} + +func TestProgramLongArbitratorCall(t *testing.T) { + testProgramResursiveCalls(t, fullRecurseTest(), false) +} diff --git a/system_tests/transfer_test.go b/system_tests/transfer_test.go index a270cca76b..a49e059351 100644 --- a/system_tests/transfer_test.go +++ b/system_tests/transfer_test.go @@ -4,10 +4,14 @@ package arbtest import ( + "bytes" "context" "fmt" "math/big" "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" ) func TestTransfer(t *testing.T) { @@ -36,3 +40,45 @@ func TestTransfer(t *testing.T) { Fatal(t, "Unexpected recipient balance: ", bal2) } } + +func TestP256Verify(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, tc := range []struct { + desc string + initialVersion uint64 + want []byte + }{ + { + desc: "p256 should not be enabled on arbOS 20", + initialVersion: 20, + want: nil, + }, + { + desc: "p256 should be enabled on arbOS 20", + initialVersion: 30, + want: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.chainConfig.ArbitrumChainParams.InitialArbOSVersion = tc.initialVersion + cleanup := builder.Build(t) + defer cleanup() + addr := common.BytesToAddress([]byte{0x01, 0x00}) + got, err := builder.L2.Client.CallContract(ctx, ethereum.CallMsg{ + From: builder.L2Info.GetAddress("Owner"), + To: &addr, + Gas: builder.L2Info.TransferGas, + Data: common.Hex2Bytes("4cee90eb86eaa050036147a12d49004b6b9c72bd725d39d4785011fe190f0b4da73bd4903f0ce3b639bbbf6e8e80d16931ff4bcf5993d58468e8fb19086e8cac36dbcd03009df8c59286b162af3bd7fcc0450c9aa81be5d10d312af6c66b1d604aebd3099c618202fcfe16ae7770b0c49ab5eadf74b754204a3bb6060e44eff37618b065f9832de4ca6ca971a7a1adc826d0f7c00181a5fb2ddf79ae00b4e10e"), + Value: big.NewInt(1e12), + }, nil) + if err != nil { + t.Fatalf("CallContract() unexpected error: %v", err) + } + if !bytes.Equal(got, tc.want) { + t.Errorf("P256Verify() = %v, want: %v", got, tc.want) + } + }) + } +} diff --git a/util/headerreader/blob_client.go b/util/headerreader/blob_client.go index 664dbb5e30..73849d0d3a 100644 --- a/util/headerreader/blob_client.go +++ b/util/headerreader/blob_client.go @@ -13,6 +13,7 @@ import ( "net/url" "os" "path" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -188,8 +189,14 @@ const trailingCharsOfResponse = 25 func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { rawData, err := beaconRequest[json.RawMessage](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) - if err != nil { - return nil, fmt.Errorf("error calling beacon client in blobSidecars: %w", err) + if err != nil || len(rawData) == 0 { + // blobs are pruned after 4096 epochs (1 epoch = 32 slots), we determine if the requested slot were to be pruned by a non-archive endpoint + roughAgeOfSlot := uint64(time.Now().Unix()) - (b.genesisTime + slot*b.secondsPerSlot) + if roughAgeOfSlot > b.secondsPerSlot*32*4096 { + return nil, fmt.Errorf("beacon client in blobSidecars got error or empty response fetching older blobs in slot: %d, an archive endpoint is required, please refer to https://docs.arbitrum.io/run-arbitrum-node/l1-ethereum-beacon-chain-rpc-providers, err: %w", slot, err) + } else { + return nil, fmt.Errorf("beacon client in blobSidecars got error or empty response fetching non-expired blobs in slot: %d, if using a prysm endpoint, try --enable-experimental-backfill flag, err: %w", slot, err) + } } var response []blobResponseItem if err := json.Unmarshal(rawData, &response); err != nil { diff --git a/validator/server_jit/jit_machine.go b/validator/server_jit/jit_machine.go index 8a85aa7115..1a3ccfa340 100644 --- a/validator/server_jit/jit_machine.go +++ b/validator/server_jit/jit_machine.go @@ -98,7 +98,7 @@ func (machine *JitMachine) prove( // Wait for the forked process to connect conn, err := tcp.Accept() if err != nil { - return state, err + return state, fmt.Errorf("error waiting for jit machine to connect back to validator: %w", err) } go func() { <-ctx.Done() diff --git a/validator/server_jit/machine_loader.go b/validator/server_jit/machine_loader.go index 3a831928b7..b2bdb65322 100644 --- a/validator/server_jit/machine_loader.go +++ b/validator/server_jit/machine_loader.go @@ -27,13 +27,16 @@ var DefaultJitMachineConfig = JitMachineConfig{ func getJitPath() (string, error) { var jitBinary string executable, err := os.Executable() + println("executable: ", executable) if err == nil { - if strings.Contains(filepath.Base(executable), "test") { + if strings.Contains(filepath.Base(executable), "test") || strings.Contains(filepath.Dir(executable), "system_tests") { _, thisfile, _, _ := runtime.Caller(0) projectDir := filepath.Dir(filepath.Dir(filepath.Dir(thisfile))) + println("projectDir: ", projectDir) jitBinary = filepath.Join(projectDir, "target", "bin", "jit") } else { jitBinary = filepath.Join(filepath.Dir(executable), "jit") + println("inside else: ", jitBinary) } _, err = os.Stat(jitBinary) }