diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index bc2bd4bc48..a147786086 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -36,6 +36,7 @@ use once_cell::sync::OnceCell; use static_assertions::const_assert_eq; use std::{ ffi::CStr, + marker::PhantomData, num::NonZeroUsize, os::raw::{c_char, c_int}, path::Path, @@ -59,11 +60,67 @@ pub struct CByteArray { } #[repr(C)] -#[derive(Clone, Copy)] -pub struct RustByteArray { +pub struct RustSlice<'a> { + pub ptr: *const u8, + pub len: usize, + pub phantom: PhantomData<&'a [u8]>, +} + +impl<'a> RustSlice<'a> { + pub fn new(slice: &'a [u8]) -> Self { + if slice.is_empty() { + return Self { + ptr: ptr::null(), + len: 0, + phantom: PhantomData, + }; + } + Self { + ptr: slice.as_ptr(), + len: slice.len(), + phantom: PhantomData, + } + } +} + +#[repr(C)] +pub struct RustBytes { pub ptr: *mut u8, pub len: usize, - pub capacity: usize, + pub cap: usize, +} + +impl RustBytes { + pub unsafe fn into_vec(self) -> Vec { + Vec::from_raw_parts(self.ptr, self.len, self.cap) + } + + pub unsafe fn write(&mut self, mut vec: Vec) { + if vec.capacity() == 0 { + *self = RustBytes { + ptr: ptr::null_mut(), + len: 0, + cap: 0, + }; + return; + } + self.ptr = vec.as_mut_ptr(); + self.len = vec.len(); + self.cap = vec.capacity(); + std::mem::forget(vec); + } +} + +/// Frees the vector. Does nothing when the vector is null. +/// +/// # Safety +/// +/// Must only be called once per vec. +#[no_mangle] +pub unsafe extern "C" fn free_rust_bytes(vec: RustBytes) { + if !vec.ptr.is_null() { + drop(vec.into_vec()) + } } #[no_mangle] @@ -410,18 +467,6 @@ pub unsafe extern "C" fn arbitrator_module_root(mach: *mut Machine) -> Bytes32 { #[no_mangle] #[cfg(feature = "native")] -pub unsafe extern "C" fn arbitrator_gen_proof(mach: *mut Machine) -> RustByteArray { - let mut proof = (*mach).serialize_proof(); - let ret = RustByteArray { - ptr: proof.as_mut_ptr(), - len: proof.len(), - capacity: proof.capacity(), - }; - std::mem::forget(proof); - ret -} - -#[no_mangle] -pub unsafe extern "C" fn arbitrator_free_proof(proof: RustByteArray) { - drop(Vec::from_raw_parts(proof.ptr, proof.len, proof.capacity)) +pub unsafe extern "C" fn arbitrator_gen_proof(mach: *mut Machine, out: *mut RustBytes) { + (*out).write((*mach).serialize_proof()); } diff --git a/arbitrator/stylus/src/evm_api.rs b/arbitrator/stylus/src/evm_api.rs index 0dd27e3f8c..7aa605dfe7 100644 --- a/arbitrator/stylus/src/evm_api.rs +++ b/arbitrator/stylus/src/evm_api.rs @@ -1,11 +1,12 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE -use crate::{GoSliceData, RustSlice}; +use crate::GoSliceData; use arbutil::evm::{ api::{EvmApiMethod, Gas, EVM_API_METHOD_REQ_OFFSET}, req::RequestHandler, }; +use prover::RustSlice; #[repr(C)] pub struct NativeRequestHandler { diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index e7f10c2400..c73c4b2c2e 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -15,9 +15,12 @@ use cache::{deserialize_module, CacheMetrics, InitCache}; use evm_api::NativeRequestHandler; use eyre::ErrReport; use native::NativeInstance; -use prover::programs::{prelude::*, StylusData}; +use prover::{ + programs::{prelude::*, StylusData}, + RustBytes, +}; use run::RunProgram; -use std::{marker::PhantomData, mem, ptr}; +use std::ptr; use target_cache::{target_cache_get, target_cache_set}; pub use brotli; @@ -76,52 +79,15 @@ impl DataReader for GoSliceData { } } -#[repr(C)] -pub struct RustSlice<'a> { - ptr: *const u8, - len: usize, - phantom: PhantomData<&'a [u8]>, -} - -impl<'a> RustSlice<'a> { - fn new(slice: &'a [u8]) -> Self { - Self { - ptr: slice.as_ptr(), - len: slice.len(), - phantom: PhantomData, - } - } -} - -#[repr(C)] -pub struct RustBytes { - ptr: *mut u8, - len: usize, - cap: usize, +unsafe fn write_err(output: &mut RustBytes, err: ErrReport) -> UserOutcomeKind { + output.write(err.debug_bytes()); + UserOutcomeKind::Failure } -impl RustBytes { - unsafe fn into_vec(self) -> Vec { - Vec::from_raw_parts(self.ptr, self.len, self.cap) - } - - unsafe fn write(&mut self, mut vec: Vec) { - self.ptr = vec.as_mut_ptr(); - self.len = vec.len(); - self.cap = vec.capacity(); - mem::forget(vec); - } - - unsafe fn write_err(&mut self, err: ErrReport) -> UserOutcomeKind { - self.write(err.debug_bytes()); - UserOutcomeKind::Failure - } - - unsafe fn write_outcome(&mut self, outcome: UserOutcome) -> UserOutcomeKind { - let (status, outs) = outcome.into_data(); - self.write(outs); - status - } +unsafe fn write_outcome(output: &mut RustBytes, outcome: UserOutcome) -> UserOutcomeKind { + let (status, outs) = outcome.into_data(); + output.write(outs); + status } /// "activates" a user wasm. @@ -164,7 +130,7 @@ pub unsafe extern "C" fn stylus_activate( gas, ) { Ok(val) => val, - Err(err) => return output.write_err(err), + Err(err) => return write_err(output, err), }; *module_hash = module.hash(); @@ -194,16 +160,16 @@ pub unsafe extern "C" fn stylus_compile( let output = &mut *output; let name = match String::from_utf8(name.slice().to_vec()) { Ok(val) => val, - Err(err) => return output.write_err(err.into()), + Err(err) => return write_err(output, err.into()), }; let target = match target_cache_get(&name) { Ok(val) => val, - Err(err) => return output.write_err(err), + Err(err) => return write_err(output, err), }; let asm = match native::compile(wasm, version, debug, target) { Ok(val) => val, - Err(err) => return output.write_err(err), + Err(err) => return write_err(output, err), }; output.write(asm); @@ -218,7 +184,7 @@ pub unsafe extern "C" fn wat_to_wasm(wat: GoSliceData, output: *mut RustBytes) - let output = &mut *output; let wasm = match wasmer::wat2wasm(wat.slice()) { Ok(val) => val, - Err(err) => return output.write_err(err.into()), + Err(err) => return write_err(output, err.into()), }; output.write(wasm.into_owned()); UserOutcomeKind::Success @@ -241,16 +207,16 @@ pub unsafe extern "C" fn stylus_target_set( let output = &mut *output; let name = match String::from_utf8(name.slice().to_vec()) { Ok(val) => val, - Err(err) => return output.write_err(err.into()), + Err(err) => return write_err(output, err.into()), }; let desc_str = match String::from_utf8(description.slice().to_vec()) { Ok(val) => val, - Err(err) => return output.write_err(err.into()), + Err(err) => return write_err(output, err.into()), }; if let Err(err) = target_cache_set(name, desc_str, native) { - return output.write_err(err); + return write_err(output, err); }; UserOutcomeKind::Success @@ -298,8 +264,8 @@ pub unsafe extern "C" fn stylus_call( }; let status = match instance.run_main(&calldata, config, ink) { - Err(e) | Ok(UserOutcome::Failure(e)) => output.write_err(e.wrap_err("call failed")), - Ok(outcome) => output.write_outcome(outcome), + Err(e) | Ok(UserOutcome::Failure(e)) => write_err(output, e.wrap_err("call failed")), + Ok(outcome) => write_outcome(output, outcome), }; let ink_left = match status { UserOutcomeKind::OutOfStack => Ink(0), // take all gas when out of stack @@ -352,18 +318,6 @@ pub extern "C" fn stylus_reorg_vm(_block: u64, arbos_tag: u32) { InitCache::clear_long_term(arbos_tag); } -/// Frees the vector. Does nothing when the vector is null. -/// -/// # Safety -/// -/// Must only be called once per vec. -#[no_mangle] -pub unsafe extern "C" fn stylus_drop_vec(vec: RustBytes) { - if !vec.ptr.is_null() { - mem::drop(vec.into_vec()) - } -} - /// Gets cache metrics. /// /// # Safety diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index a3256cb78f..45bd70c92b 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/bold/solgen/go/bridgegen" "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/redislock" @@ -44,7 +45,6 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/execution" - "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/blobs" @@ -80,8 +80,10 @@ var ( const ( batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" - sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" - sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlobs" + sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" + sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlobs" + sequencerBatchPostDelayProofMethodName = "addSequencerL2BatchFromOriginDelayProof" + sequencerBatchPostWithBlobsDelayProofMethodName = "addSequencerL2BatchFromBlobsDelayProof" ) type batchPosterPosition struct { @@ -172,6 +174,7 @@ type BatchPosterConfig struct { ReorgResistanceMargin time.Duration `koanf:"reorg-resistance-margin" reload:"hot"` CheckBatchCorrectness bool `koanf:"check-batch-correctness"` MaxEmptyBatchDelay time.Duration `koanf:"max-empty-batch-delay"` + DelayBufferThresholdMargin uint64 `koanf:"delay-buffer-threshold-margin"` gasRefunder common.Address l1BlockBound l1BlockBound @@ -230,6 +233,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".reorg-resistance-margin", DefaultBatchPosterConfig.ReorgResistanceMargin, "do not post batch if its within this duration from layer 1 minimum bounds. Requires l1-block-bound option not be set to \"ignore\"") f.Bool(prefix+".check-batch-correctness", DefaultBatchPosterConfig.CheckBatchCorrectness, "setting this to true will run the batch against an inbox multiplexer and verifies that it produces the correct set of messages") f.Duration(prefix+".max-empty-batch-delay", DefaultBatchPosterConfig.MaxEmptyBatchDelay, "maximum empty batch posting delay, batch poster will only be able to post an empty batch if this time period building a batch has passed") + f.Uint64(prefix+".delay-buffer-threshold-margin", DefaultBatchPosterConfig.DelayBufferThresholdMargin, "the number of blocks to post the batch before reaching the delay buffer threshold") redislock.AddConfigOptions(prefix+".redis-lock", f) dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfig) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.ParentChainWallet.Pathname) @@ -263,6 +267,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ ReorgResistanceMargin: 10 * time.Minute, CheckBatchCorrectness: true, MaxEmptyBatchDelay: 3 * 24 * time.Hour, + DelayBufferThresholdMargin: 25, // 5 minutes considering 12-second blocks } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ @@ -294,6 +299,7 @@ var TestBatchPosterConfig = BatchPosterConfig{ UseAccessLists: true, GasEstimateBaseFeeMultipleBips: arbmath.OneInUBips * 3 / 2, CheckBatchCorrectness: true, + DelayBufferThresholdMargin: 0, } type BatchPosterOpts struct { @@ -725,6 +731,7 @@ type buildingBatch struct { haveUsefulMessage bool use4844 bool muxBackend *simulatedMuxBackend + firstDelayedMsg *arbostypes.MessageWithMetadata firstNonDelayedMsg *arbostypes.MessageWithMetadata firstUsefulMsg *arbostypes.MessageWithMetadata } @@ -963,41 +970,45 @@ func (b *BatchPoster) encodeAddBatch( l2MessageData []byte, delayedMsg uint64, use4844 bool, + delayProof *bridgegen.DelayProof, ) ([]byte, []kzg4844.Blob, error) { - methodName := sequencerBatchPostMethodName + var methodName string if use4844 { - methodName = sequencerBatchPostWithBlobsMethodName + if delayProof != nil { + methodName = sequencerBatchPostWithBlobsDelayProofMethodName + } else { + methodName = sequencerBatchPostWithBlobsMethodName + } + } else if delayProof != nil { + methodName = sequencerBatchPostDelayProofMethodName + } else { + methodName = sequencerBatchPostMethodName } method, ok := b.seqInboxABI.Methods[methodName] if !ok { return nil, nil, errors.New("failed to find add batch method") } - var calldata []byte + var args []any var kzgBlobs []kzg4844.Blob var err error + args = append(args, seqNum) if use4844 { kzgBlobs, err = blobs.EncodeBlobs(l2MessageData) if err != nil { return nil, nil, fmt.Errorf("failed to encode blobs: %w", err) } - // EIP4844 transactions to the sequencer inbox will not use transaction calldata for L2 info. - calldata, err = method.Inputs.Pack( - seqNum, - new(big.Int).SetUint64(delayedMsg), - b.config().gasRefunder, - new(big.Int).SetUint64(uint64(prevMsgNum)), - new(big.Int).SetUint64(uint64(newMsgNum)), - ) } else { - calldata, err = method.Inputs.Pack( - seqNum, - l2MessageData, - new(big.Int).SetUint64(delayedMsg), - b.config().gasRefunder, - new(big.Int).SetUint64(uint64(prevMsgNum)), - new(big.Int).SetUint64(uint64(newMsgNum)), - ) + // EIP4844 transactions to the sequencer inbox will not use transaction calldata for L2 info. + args = append(args, l2MessageData) } + args = append(args, new(big.Int).SetUint64(delayedMsg)) + args = append(args, b.config().gasRefunder) + args = append(args, new(big.Int).SetUint64(uint64(prevMsgNum))) + args = append(args, new(big.Int).SetUint64(uint64(newMsgNum))) + if delayProof != nil { + args = append(args, delayProof) + } + calldata, err := method.Inputs.Pack(args...) if err != nil { return nil, nil, err } @@ -1023,7 +1034,17 @@ func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimat return uint64(gas), err } -func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList) (uint64, error) { +func (b *BatchPoster) estimateGas( + ctx context.Context, + sequencerMessage []byte, + delayedMessages uint64, + realData []byte, + realBlobs []kzg4844.Blob, + realNonce uint64, + realAccessList types.AccessList, + delayProof *bridgegen.DelayProof, +) (uint64, error) { + config := b.config() rpcClient := b.l1Reader.Client() rawRpcClient := rpcClient.Client() @@ -1065,7 +1086,7 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, // However, we set nextMsgNum to 1 because it is necessary for a correct estimation for the final to be non-zero. // Because we're likely estimating against older state, this might not be the actual next message, // but the gas used should be the same. - data, kzgBlobs, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages, len(realBlobs) > 0) + data, kzgBlobs, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages, len(realBlobs) > 0, delayProof) if err != nil { return 0, err } @@ -1319,7 +1340,11 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) b.building.firstUsefulMsg = msg } } - if !isDelayed && b.building.firstNonDelayedMsg == nil { + if isDelayed { + if b.building.firstDelayedMsg == nil { + b.building.firstDelayedMsg = msg + } + } else if b.building.firstNonDelayedMsg == nil { b.building.firstNonDelayedMsg = msg } b.building.msgCount++ @@ -1334,6 +1359,27 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } } + delayBuffer, err := GetDelayBufferConfig(ctx, b.seqInbox) + if err != nil { + return false, err + } + if delayBuffer.Enabled && b.building.firstDelayedMsg != nil { + latestHeader, err := b.l1Reader.LastHeader(ctx) + if err != nil { + return false, err + } + latestBlock := latestHeader.Number.Uint64() + firstDelayedMsgBlock := b.building.firstDelayedMsg.Message.Header.BlockNumber + threasholdLimit := firstDelayedMsgBlock + delayBuffer.Threshold - b.config().DelayBufferThresholdMargin + if latestBlock >= threasholdLimit { + log.Info("force post batch because of the delay buffer", + "firstDelayedMsgBlock", firstDelayedMsgBlock, + "threshold", delayBuffer.Threshold, + "latestBlock", latestBlock) + forcePostBatch = true + } + } + if b.building.firstNonDelayedMsg != nil && hasL1Bound && config.ReorgResistanceMargin > 0 { firstMsgBlockNumber := b.building.firstNonDelayedMsg.Message.Header.BlockNumber firstMsgTimeStamp := b.building.firstNonDelayedMsg.Message.Header.Timestamp @@ -1425,7 +1471,15 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) prevMessageCount = 0 } - data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), prevMessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844) + var delayProof *bridgegen.DelayProof + if delayBuffer.Enabled && b.building.firstDelayedMsg != nil { + delayProof, err = GenDelayProof(ctx, b.building.firstDelayedMsg, b.inbox) + if err != nil { + return false, fmt.Errorf("failed to generate delay proof: %w", err) + } + } + + data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), prevMessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844, delayProof) if err != nil { return false, err } @@ -1440,7 +1494,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) // In theory, this might reduce gas usage, but only by a factor that's already // accounted for in `config.ExtraBatchGas`, as that same factor can appear if a user // posts a new delayed message that we didn't see while gas estimating. - gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, kzgBlobs, nonce, accessList) + gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, kzgBlobs, nonce, accessList, delayProof) if err != nil { return false, err } diff --git a/arbnode/delay_buffer.go b/arbnode/delay_buffer.go new file mode 100644 index 0000000000..3f0514bbe2 --- /dev/null +++ b/arbnode/delay_buffer.go @@ -0,0 +1,87 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +// This file contains functions related to the delay buffer feature that are used mostly in the +// batch poster. + +package arbnode + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + + "github.com/offchainlabs/bold/solgen/go/bridgegen" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/util/headerreader" +) + +// DelayBufferConfig originates from the sequencer inbox contract. +type DelayBufferConfig struct { + Enabled bool + Threshold uint64 +} + +// GetBufferConfig gets the delay buffer config from the sequencer inbox contract. +// If the contract doesn't support the delay buffer, it returns a config with Enabled set to false. +func GetDelayBufferConfig(ctx context.Context, sequencerInbox *bridgegen.SequencerInbox) ( + *DelayBufferConfig, error) { + + callOpts := bind.CallOpts{Context: ctx} + enabled, err := sequencerInbox.IsDelayBufferable(&callOpts) + if err != nil { + if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) { + return &DelayBufferConfig{Enabled: false}, nil + } + return nil, fmt.Errorf("retrieve SequencerInbox.isDelayBufferable: %w", err) + } + if !enabled { + return &DelayBufferConfig{Enabled: false}, nil + } + bufferData, err := sequencerInbox.Buffer(&callOpts) + if err != nil { + return nil, fmt.Errorf("retrieve SequencerInbox.buffer: %w", err) + } + config := &DelayBufferConfig{ + Enabled: true, + Threshold: bufferData.Threshold, + } + return config, nil +} + +// GenDelayProof generates the delay proof based on batch's first delayed message and the delayed +// accumulater from the inbox. +func GenDelayProof(ctx context.Context, message *arbostypes.MessageWithMetadata, inbox *InboxTracker) ( + *bridgegen.DelayProof, error) { + + if message.DelayedMessagesRead == 0 { + return nil, fmt.Errorf("BUG: trying to generate delay proof without delayed message") + } + seqNum := message.DelayedMessagesRead - 1 + var beforeDelayedAcc common.Hash + if seqNum > 0 { + var err error + beforeDelayedAcc, err = inbox.GetDelayedAcc(seqNum - 1) + if err != nil { + return nil, err + } + } + delayedMessage := bridgegen.MessagesMessage{ + Kind: message.Message.Header.Kind, + Sender: message.Message.Header.Poster, + BlockNumber: message.Message.Header.BlockNumber, + Timestamp: message.Message.Header.Timestamp, + InboxSeqNum: new(big.Int).SetUint64(seqNum), + BaseFeeL1: message.Message.Header.L1BaseFee, + MessageDataHash: crypto.Keccak256Hash(message.Message.L2msg), + } + delayProof := &bridgegen.DelayProof{ + BeforeDelayedAcc: beforeDelayedAcc, + DelayedMessage: delayedMessage, + } + return delayProof, nil +} diff --git a/arbos/programs/native.go b/arbos/programs/native.go index f162704995..cfc1170c5b 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -450,10 +450,16 @@ func addressToBytes20(addr common.Address) bytes20 { } func (slice *rustSlice) read() []byte { + if slice.len == 0 { + return nil + } return arbutil.PointerToSlice((*byte)(slice.ptr), int(slice.len)) } func (vec *rustBytes) read() []byte { + if vec.len == 0 { + return nil + } return arbutil.PointerToSlice((*byte)(vec.ptr), int(vec.len)) } @@ -464,7 +470,7 @@ func (vec *rustBytes) intoBytes() []byte { } func (vec *rustBytes) drop() { - C.stylus_drop_vec(*vec) + C.free_rust_bytes(*vec) } func goSlice(slice []byte) C.GoSliceData { diff --git a/nitro-testnode b/nitro-testnode index fa19e22104..c177f28234 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit fa19e2210403ad24519ea46c2d337f54a9f47593 +Subproject commit c177f282340285bcdae2d6a784547e2bb8b97498 diff --git a/precompiles/ArbAggregator.go b/precompiles/ArbAggregator.go index b74e280fe8..cee395189c 100644 --- a/precompiles/ArbAggregator.go +++ b/precompiles/ArbAggregator.go @@ -36,6 +36,7 @@ func (con ArbAggregator) GetBatchPosters(c ctx, evm mech) ([]addr, error) { return c.State.L1PricingState().BatchPosterTable().AllPosters(65536) } +// Adds additional batch poster address func (con ArbAggregator) AddBatchPoster(c ctx, evm mech, newBatchPoster addr) error { isOwner, err := c.State.ChainOwners().IsMember(c.caller) if err != nil { @@ -90,12 +91,14 @@ func (con ArbAggregator) SetFeeCollector(c ctx, evm mech, batchPoster addr, newF } // GetTxBaseFee gets an aggregator's current fixed fee to submit a tx +// Deprecated: always returns zero func (con ArbAggregator) GetTxBaseFee(c ctx, evm mech, aggregator addr) (huge, error) { // This is deprecated and now always returns zero. return big.NewInt(0), nil } // SetTxBaseFee sets an aggregator's fixed fee (caller must be the aggregator, its fee collector, or an owner) +// Deprecated: no-op func (con ArbAggregator) SetTxBaseFee(c ctx, evm mech, aggregator addr, feeInL1Gas huge) error { // This is deprecated and is now a no-op. return nil diff --git a/precompiles/ArbDebug.go b/precompiles/ArbDebug.go index bf85d5e18f..60e520da3e 100644 --- a/precompiles/ArbDebug.go +++ b/precompiles/ArbDebug.go @@ -24,6 +24,7 @@ type ArbDebug struct { UnusedError func() error } +// Emits events with values based on the args provided func (con ArbDebug) Events(c ctx, evm mech, paid huge, flag bool, value bytes32) (addr, huge, error) { // Emits 2 events that cover each case // Basic tests an index'd value & a normal value @@ -42,11 +43,13 @@ func (con ArbDebug) Events(c ctx, evm mech, paid huge, flag bool, value bytes32) return c.caller, paid, nil } +// Tries (and fails) to emit logs in a view context func (con ArbDebug) EventsView(c ctx, evm mech) error { _, _, err := con.Events(c, evm, common.Big0, true, bytes32{}) return err } +// Throws a custom error func (con ArbDebug) CustomRevert(c ctx, number uint64) error { return con.CustomError(number, "This spider family wards off bugs: /\\oo/\\ //\\(oo)//\\ /\\oo/\\", true) } @@ -61,6 +64,7 @@ func (con ArbDebug) Panic(c ctx, evm mech) error { panic("called ArbDebug's debug-only Panic method") } +// Throws a hardcoded error func (con ArbDebug) LegacyError(c ctx) error { return errors.New("example legacy error") } diff --git a/precompiles/ArbOwner.go b/precompiles/ArbOwner.go index 90a7b4ccc2..a6df0bd0dc 100644 --- a/precompiles/ArbOwner.go +++ b/precompiles/ArbOwner.go @@ -120,38 +120,48 @@ func (con ArbOwner) ScheduleArbOSUpgrade(c ctx, evm mech, newVersion uint64, tim return c.State.ScheduleArbOSUpgrade(newVersion, timestamp) } +// Sets equilibration units parameter for L1 price adjustment algorithm func (con ArbOwner) SetL1PricingEquilibrationUnits(c ctx, evm mech, equilibrationUnits huge) error { return c.State.L1PricingState().SetEquilibrationUnits(equilibrationUnits) } +// Sets inertia parameter for L1 price adjustment algorithm func (con ArbOwner) SetL1PricingInertia(c ctx, evm mech, inertia uint64) error { return c.State.L1PricingState().SetInertia(inertia) } +// Sets reward recipient address for L1 price adjustment algorithm func (con ArbOwner) SetL1PricingRewardRecipient(c ctx, evm mech, recipient addr) error { return c.State.L1PricingState().SetPayRewardsTo(recipient) } +// Sets reward amount for L1 price adjustment algorithm, in wei per unit func (con ArbOwner) SetL1PricingRewardRate(c ctx, evm mech, weiPerUnit uint64) error { return c.State.L1PricingState().SetPerUnitReward(weiPerUnit) } +// Set how much ArbOS charges per L1 gas spent on transaction data. func (con ArbOwner) SetL1PricePerUnit(c ctx, evm mech, pricePerUnit *big.Int) error { return c.State.L1PricingState().SetPricePerUnit(pricePerUnit) } +// Sets the base charge (in L1 gas) attributed to each data batch in the calldata pricer func (con ArbOwner) SetPerBatchGasCharge(c ctx, evm mech, cost int64) error { return c.State.L1PricingState().SetPerBatchGasCost(cost) } +// Sets the cost amortization cap in basis points func (con ArbOwner) SetAmortizedCostCapBips(c ctx, evm mech, cap uint64) error { return c.State.L1PricingState().SetAmortizedCostCapBips(cap) } +// Sets the Brotli compression level used for fast compression +// Available in ArbOS version 12 with default level as 1 func (con ArbOwner) SetBrotliCompressionLevel(c ctx, evm mech, level uint64) error { return c.State.SetBrotliCompressionLevel(level) } +// Releases surplus funds from L1PricerFundsPoolAddress for use func (con ArbOwner) ReleaseL1PricerSurplusFunds(c ctx, evm mech, maxWeiToRelease huge) (huge, error) { balance := evm.StateDB.GetBalance(l1pricing.L1PricerFundsPoolAddress) l1p := c.State.L1PricingState() @@ -295,6 +305,7 @@ func (con ArbOwner) RemoveWasmCacheManager(c ctx, _ mech, manager addr) error { return managers.Remove(manager, c.State.ArbOSVersion()) } +// Sets serialized chain config in ArbOS state func (con ArbOwner) SetChainConfig(c ctx, evm mech, serializedChainConfig []byte) error { if c == nil { return errors.New("nil context") diff --git a/precompiles/ArbRetryableTx.go b/precompiles/ArbRetryableTx.go index 49cc9a3264..8fb5aa9391 100644 --- a/precompiles/ArbRetryableTx.go +++ b/precompiles/ArbRetryableTx.go @@ -222,6 +222,9 @@ func (con ArbRetryableTx) Cancel(c ctx, evm mech, ticketId bytes32) error { return con.Canceled(c, evm, ticketId) } +// Gets the redeemer of the current retryable redeem attempt. +// Returns the zero address if the current transaction is not a retryable redeem attempt. +// If this is an auto-redeem, returns the fee refund address of the retryable. func (con ArbRetryableTx) GetCurrentRedeemer(c ctx, evm mech) (common.Address, error) { if c.txProcessor.CurrentRefundTo != nil { return *c.txProcessor.CurrentRefundTo, nil @@ -229,6 +232,7 @@ func (con ArbRetryableTx) GetCurrentRedeemer(c ctx, evm mech) (common.Address, e return common.Address{}, nil } +// Do not call. This method represents a retryable submission to aid explorers. Calling it will always revert. func (con ArbRetryableTx) SubmitRetryable( c ctx, evm mech, requestId bytes32, l1BaseFee, deposit, callvalue, gasFeeCap huge, gasLimit uint64, maxSubmissionFee huge, diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 39d7fa576c..ee0c3b4a3a 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -6,6 +6,7 @@ package arbtest import ( "context" "crypto/rand" + "errors" "fmt" "math/big" "strings" @@ -15,6 +16,7 @@ import ( "github.com/andybalholm/brotli" "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -364,3 +366,119 @@ func TestAllowPostingFirstBatchWhenSequencerMessageCountMismatchEnabled(t *testi func TestAllowPostingFirstBatchWhenSequencerMessageCountMismatchDisabled(t *testing.T) { testAllowPostingFirstBatchWhenSequencerMessageCountMismatch(t, false) } + +func GetBatchCount(t *testing.T, builder *NodeBuilder) uint64 { + t.Helper() + sequenceInbox, err := bridgegen.NewSequencerInbox(builder.L1Info.GetAddress("SequencerInbox"), builder.L1.Client) + Require(t, err) + batchCount, err := sequenceInbox.BatchCount(&bind.CallOpts{Context: builder.ctx}) + Require(t, err) + return batchCount.Uint64() +} + +func CheckBatchCount(t *testing.T, builder *NodeBuilder, want uint64) { + if got := GetBatchCount(t, builder); got != want { + t.Fatalf("invalid batch count, want %v, got %v", want, got) + } +} + +func testBatchPosterDelayBuffer(t *testing.T, delayBufferEnabled bool) { + const messagesPerBatch = 3 + const numBatches = 3 + var threshold uint64 + if delayBufferEnabled { + threshold = 100 + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx). + DefaultConfig(t, true). + WithBoldDeployment(). + WithDelayBuffer(threshold) + builder.L2Info.GenerateAccount("User2") + builder.nodeConfig.BatchPoster.MaxDelay = time.Hour // set high max-delay so we can test the delay buffer + cleanup := builder.Build(t) + defer cleanup() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() + + initialBatchCount := GetBatchCount(t, builder) + for batch := uint64(0); batch < numBatches; batch++ { + txs := make(types.Transactions, messagesPerBatch) + for i := range txs { + txs[i] = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + } + SendSignedTxesInBatchViaL1(t, ctx, builder.L1Info, builder.L1.Client, builder.L2.Client, txs) + + // Check batch wasn't sent + _, err := WaitForTx(ctx, testClientB.Client, txs[0].Hash(), 100*time.Millisecond) + if err == nil || !errors.Is(err, context.DeadlineExceeded) { + Fatal(t, "expected context-deadline exceeded error, but got:", err) + } + CheckBatchCount(t, builder, initialBatchCount+batch) + + // Advance L1 to force a batch given the delay buffer threshold + AdvanceL1(t, ctx, builder.L1.Client, builder.L1Info, int(threshold)) // #nosec G115 + if !delayBufferEnabled { + // If the delay buffer is disabled, set max delay to zero to force it + CheckBatchCount(t, builder, initialBatchCount+batch) + builder.nodeConfig.BatchPoster.MaxDelay = 0 + } + for _, tx := range txs { + _, err := testClientB.EnsureTxSucceeded(tx) + Require(t, err, "tx not found on second node") + } + CheckBatchCount(t, builder, initialBatchCount+batch+1) + if !delayBufferEnabled { + builder.nodeConfig.BatchPoster.MaxDelay = time.Hour + } + } +} + +func TestBatchPosterDelayBufferEnabled(t *testing.T) { + testBatchPosterDelayBuffer(t, true) +} + +func TestBatchPosterDelayBufferDisabled(t *testing.T) { + testBatchPosterDelayBuffer(t, false) +} + +func TestBatchPosterDelayBufferDontForceNonDelayedMessages(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const threshold = 100 + builder := NewNodeBuilder(ctx). + DefaultConfig(t, true). + WithBoldDeployment(). + WithDelayBuffer(threshold) + builder.L2Info.GenerateAccount("User2") + builder.nodeConfig.BatchPoster.MaxDelay = time.Hour // set high max-delay so we can test the delay buffer + cleanup := builder.Build(t) + defer cleanup() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() + + // Send non-delayed message and advance L1 + initialBatchCount := GetBatchCount(t, builder) + const numTxs = 3 + txs := make(types.Transactions, numTxs) + for i := range txs { + txs[i] = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + } + builder.L2.SendWaitTestTransactions(t, txs) + AdvanceL1(t, ctx, builder.L1.Client, builder.L1Info, threshold) + + // Even advancing the L1, the batch won't be posted because it doesn't contain a delayed message + CheckBatchCount(t, builder, initialBatchCount) + + // Set delay to zero to force non-delayed messages + builder.nodeConfig.BatchPoster.MaxDelay = 0 + for _, tx := range txs { + _, err := testClientB.EnsureTxSucceeded(tx) + Require(t, err, "tx not found on second node") + } + CheckBatchCount(t, builder, initialBatchCount+1) +} diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 72d2645910..346a5feec4 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -258,6 +258,7 @@ type NodeBuilder struct { l3InitMessage *arbostypes.ParsedInitMessage withProdConfirmPeriodBlocks bool wasmCacheTag uint32 + delayBufferThreshold uint64 // Created nodes L1 *TestClient @@ -375,6 +376,14 @@ func (b *NodeBuilder) WithStylusLongTermCache(enabled bool) *NodeBuilder { return b } +// WithDelayBuffer sets the delay-buffer threshold, which is the number of blocks the batch-poster +// is allowed to delay a batch with a delayed message. +// Setting the threshold to zero disabled the delay buffer (default behaviour). +func (b *NodeBuilder) WithDelayBuffer(threshold uint64) *NodeBuilder { + b.delayBufferThreshold = threshold + return b +} + func (b *NodeBuilder) Build(t *testing.T) func() { b.CheckConfig(t) if b.withL1 { @@ -425,6 +434,7 @@ func (b *NodeBuilder) BuildL1(t *testing.T) { b.withProdConfirmPeriodBlocks, true, b.deployBold, + b.delayBufferThreshold, ) b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } } @@ -529,6 +539,7 @@ func (b *NodeBuilder) BuildL3OnL2(t *testing.T) func() { b.l3Config.withProdConfirmPeriodBlocks, false, b.deployBold, + 0, ) b.L3 = buildOnParentChain( @@ -886,6 +897,21 @@ func BridgeBalance( return tx, res } +// AdvanceL1 sends dummy transactions to L1 to create blocks. +func AdvanceL1( + t *testing.T, + ctx context.Context, + l1client *ethclient.Client, + l1info *BlockchainTestInfo, + numBlocks int, +) { + for i := 0; i < numBlocks; i++ { + SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ + l1info.PrepareTx("Faucet", "Faucet", 30000, big.NewInt(1e12), nil), + }) + } +} + func SendSignedTxesInBatchViaL1( t *testing.T, ctx context.Context, @@ -905,12 +931,7 @@ func SendSignedTxesInBatchViaL1( _, err = EnsureTxSucceeded(ctx, l1client, l1tx) Require(t, err) - // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in - for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "Faucet", 30000, big.NewInt(1e12), nil), - }) - } + AdvanceL1(t, ctx, l1client, l1info, 30) var receipts types.Receipts for _, tx := range delayedTxes { receipt, err := EnsureTxSucceeded(ctx, l2client, tx) @@ -957,12 +978,7 @@ func SendSignedTxViaL1( _, err = EnsureTxSucceeded(ctx, l1client, l1tx) Require(t, err) - // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in - for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "Faucet", 30000, big.NewInt(1e12), nil), - }) - } + AdvanceL1(t, ctx, l1client, l1info, 30) receipt, err := EnsureTxSucceeded(ctx, l2client, delayedTx) Require(t, err) return receipt @@ -1008,12 +1024,7 @@ func SendUnsignedTxViaL1( _, err = EnsureTxSucceeded(ctx, l1client, l1tx) Require(t, err) - // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in - for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "Faucet", 30000, big.NewInt(1e12), nil), - }) - } + AdvanceL1(t, ctx, l1client, l1info, 30) receipt, err := EnsureTxSucceeded(ctx, l2client, unsignedTx) Require(t, err) return receipt @@ -1277,6 +1288,7 @@ func deployOnParentChain( prodConfirmPeriodBlocks bool, chainSupportsBlobs bool, deployBold bool, + delayBufferThreshold uint64, ) (*chaininfo.RollupAddresses, *arbostypes.ParsedInitMessage) { parentChainInfo.GenerateAccount("RollupOwner") parentChainInfo.GenerateAccount("Sequencer") @@ -1318,6 +1330,11 @@ func deployOnParentChain( MachineStatus: 1, // Finished EndHistoryRoot: [32]byte{}, } + bufferConfig := rollupgen.BufferConfig{ + Threshold: delayBufferThreshold, // number of blocks + Max: 14400, // 2 days of blocks + ReplenishRateInBasis: 500, // 5% + } cfg := rollupgen.Config{ MiniStakeValues: miniStakeValues, ConfirmPeriodBlocks: 120, @@ -1342,6 +1359,7 @@ func deployOnParentChain( AnyTrustFastConfirmer: common.Address{}, NumBigStepLevel: 3, ChallengeGracePeriodBlocks: 3, + BufferConfig: bufferConfig, } wrappedClient := butil.NewBackendWrapper(parentChainReader.Client(), rpc.LatestBlockNumber) boldAddresses, err := setup.DeployFullRollupStack( diff --git a/validator/server_arb/machine.go b/validator/server_arb/machine.go index 09a00635fb..e4e07d3c2d 100644 --- a/validator/server_arb/machine.go +++ b/validator/server_arb/machine.go @@ -304,9 +304,13 @@ func (m *ArbitratorMachine) ProveNextStep() []byte { m.mutex.Lock() defer m.mutex.Unlock() - rustProof := C.arbitrator_gen_proof(m.ptr) - proofBytes := C.GoBytes(unsafe.Pointer(rustProof.ptr), C.int(rustProof.len)) - C.arbitrator_free_proof(rustProof) + output := &C.RustBytes{} + C.arbitrator_gen_proof(m.ptr, output) + defer C.free_rust_bytes(*output) + if output.len == 0 { + return nil + } + proofBytes := C.GoBytes(unsafe.Pointer(output.ptr), C.int(output.len)) return proofBytes } diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index 76c19dc8f2..4c74bca695 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -215,9 +215,8 @@ func (v *ArbitratorSpawner) execute( } func (v *ArbitratorSpawner) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { - println("LAUCHING ARBITRATOR VALIDATION") v.count.Add(1) - promise := stopwaiter.LaunchPromiseThread[validator.GoGlobalState](v, func(ctx context.Context) (validator.GoGlobalState, error) { + promise := stopwaiter.LaunchPromiseThread(v, func(ctx context.Context) (validator.GoGlobalState, error) { defer v.count.Add(-1) return v.execute(ctx, entry, moduleRoot) })