Skip to content

Commit

Permalink
Merge branch 'master' into remove-precompilesgen-dependency
Browse files Browse the repository at this point in the history
  • Loading branch information
ganeshvanahalli authored Sep 13, 2023
2 parents fdcb21b + ad8cfe5 commit abeec66
Show file tree
Hide file tree
Showing 17 changed files with 126 additions and 86 deletions.
67 changes: 38 additions & 29 deletions arbnode/batch_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"fmt"
"math"
"math/big"
"strings"
"sync/atomic"
"time"

Expand Down Expand Up @@ -167,19 +168,20 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) {
var DefaultBatchPosterConfig = BatchPosterConfig{
Enable: false,
DisableDasFallbackStoreDataOnChain: false,
MaxSize: 100000,
PollInterval: time.Second * 10,
ErrorDelay: time.Second * 10,
MaxDelay: time.Hour,
WaitForMaxDelay: false,
CompressionLevel: brotli.BestCompression,
DASRetentionPeriod: time.Hour * 24 * 15,
GasRefunderAddress: "",
ExtraBatchGas: 50_000,
DataPoster: dataposter.DefaultDataPosterConfig,
ParentChainWallet: DefaultBatchPosterL1WalletConfig,
L1BlockBound: "",
L1BlockBoundBypass: time.Hour,
// This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go
MaxSize: 100000,
PollInterval: time.Second * 10,
ErrorDelay: time.Second * 10,
MaxDelay: time.Hour,
WaitForMaxDelay: false,
CompressionLevel: brotli.BestCompression,
DASRetentionPeriod: time.Hour * 24 * 15,
GasRefunderAddress: "",
ExtraBatchGas: 50_000,
DataPoster: dataposter.DefaultDataPosterConfig,
ParentChainWallet: DefaultBatchPosterL1WalletConfig,
L1BlockBound: "",
L1BlockBoundBypass: time.Hour,
}

var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{
Expand Down Expand Up @@ -261,12 +263,13 @@ func NewBatchPoster(dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderRe
// contain reverted batch_poster transaction.
// It returns true if it finds batch posting needs to halt, which is true if a batch reverts
// unless the data poster is configured with noop storage which can tolerate reverts.
func (b *BatchPoster) checkReverts(ctx context.Context, from, to int64) (bool, error) {
if from > to {
return false, fmt.Errorf("wrong range, from: %d is more to: %d", from, to)
// From must be a pointer to the starting block, which is updated after each block is checked for reverts
func (b *BatchPoster) checkReverts(ctx context.Context, from *int64, to int64) (bool, error) {
if *from > to {
return false, fmt.Errorf("wrong range, from: %d > to: %d", from, to)
}
for idx := from; idx <= to; idx++ {
number := big.NewInt(idx)
for ; *from <= to; *from++ {
number := big.NewInt(*from)
block, err := b.l1Reader.Client().BlockByNumber(ctx, number)
if err != nil {
return false, fmt.Errorf("getting block: %v by number: %w", number, err)
Expand All @@ -276,7 +279,7 @@ func (b *BatchPoster) checkReverts(ctx context.Context, from, to int64) (bool, e
if err != nil {
return false, fmt.Errorf("getting sender of transaction tx: %v, %w", tx.Hash(), err)
}
if bytes.Equal(from.Bytes(), b.dataPoster.Sender().Bytes()) {
if from == b.dataPoster.Sender() {
r, err := b.l1Reader.Client().TransactionReceipt(ctx, tx.Hash())
if err != nil {
return false, fmt.Errorf("getting a receipt for transaction: %v, %w", tx.Hash(), err)
Expand All @@ -302,7 +305,7 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) {
headerCh, unsubscribe := b.l1Reader.Subscribe(false)
defer unsubscribe()

last := int64(0) // number of last seen block
nextToCheck := int64(0) // the first unchecked block
for {
// Poll until:
// - L1 headers reader channel is closed, or
Expand All @@ -311,31 +314,37 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) {
select {
case h, ok := <-headerCh:
if !ok {
log.Info("L1 headers channel has been closed")
log.Info("L1 headers channel checking for batch poster reverts has been closed")
return
}
// If this is the first block header, set last seen as number-1.
// We may see same block number again if there is L1 reorg, in that
// case we check the block again.
if last == 0 || last == h.Number.Int64() {
last = h.Number.Int64() - 1
if nextToCheck == 0 || nextToCheck == h.Number.Int64() {
nextToCheck = h.Number.Int64()
}
if h.Number.Int64()-last > 100 {
log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", last, "current", h.Number)
last = h.Number.Int64()
if h.Number.Int64()-nextToCheck > 100 {
log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", nextToCheck, "current", h.Number)
nextToCheck = h.Number.Int64()
continue
}

reverted, err := b.checkReverts(ctx, last+1, h.Number.Int64())
reverted, err := b.checkReverts(ctx, &nextToCheck, h.Number.Int64())
if err != nil {
log.Error("Checking batch reverts", "error", err)
logLevel := log.Error
if strings.Contains(err.Error(), "not found") {
// Just parent chain node inconsistency
// One node sent us a block, but another didn't have it
// We'll try to check this block again next loop
logLevel = log.Debug
}
logLevel("Error checking batch reverts", "err", err)
continue
}
if reverted {
b.batchReverted.Store(true)
return
}
last = h.Number.Int64()
case <-ctx.Done():
return
}
Expand Down
12 changes: 6 additions & 6 deletions arbnode/dataposter/data_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,16 +111,16 @@ func NewDataPoster(db ethdb.Database, headerReader *headerreader.HeaderReader, a
switch {
case initConfig.UseNoOpStorage:
queue = &noop.Storage{}
case initConfig.UseLevelDB:
queue = leveldb.New(db, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} })
case redisClient == nil:
queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} })
default:
case redisClient != nil:
var err error
queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &initConfig.RedisSigner, encF)
if err != nil {
return nil, err
}
case initConfig.UseLevelDB:
queue = leveldb.New(db, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} })
default:
queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} })
}
return &DataPoster{
headerReader: headerReader,
Expand Down Expand Up @@ -665,7 +665,7 @@ var DefaultDataPosterConfig = DataPosterConfig{
MaxTipCapGwei: 5,
NonceRbfSoftConfs: 1,
AllocateMempoolBalance: true,
UseLevelDB: false,
UseLevelDB: true,
UseNoOpStorage: false,
LegacyStorageEncoding: true,
}
Expand Down
1 change: 1 addition & 0 deletions arbnode/dataposter/storage/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ var (
ErrStorageRace = errors.New("storage race error")

BlockValidatorPrefix string = "v" // the prefix for all block validator keys
StakerPrefix string = "S" // the prefix for all staker keys
BatchPosterPrefix string = "b" // the prefix for all batch poster keys
// TODO(anodar): move everything else from schema.go file to here once
// execution split is complete.
Expand Down
4 changes: 2 additions & 2 deletions arbnode/delayed_sequencer.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,14 +52,14 @@ func DelayedSequencerConfigAddOptions(prefix string, f *flag.FlagSet) {
var DefaultDelayedSequencerConfig = DelayedSequencerConfig{
Enable: false,
FinalizeDistance: 20,
RequireFullFinality: true,
RequireFullFinality: false,
UseMergeFinality: true,
}

var TestDelayedSequencerConfig = DelayedSequencerConfig{
Enable: true,
FinalizeDistance: 20,
RequireFullFinality: true,
RequireFullFinality: false,
UseMergeFinality: true,
}

Expand Down
1 change: 1 addition & 0 deletions arbnode/execution/sequencer.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ var DefaultSequencerConfig = SequencerConfig{
NonceCacheSize: 1024,
Dangerous: DefaultDangerousSequencerConfig,
// 95% of the default batch poster limit, leaving 5KB for headers and such
// This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go
MaxTxDataSize: 95000,
NonceFailureCacheSize: 1024,
NonceFailureCacheExpiry: time.Second,
Expand Down
10 changes: 5 additions & 5 deletions arbnode/execution/tx_pre_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,11 +145,6 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty
if config.Strictness < TxPreCheckerStrictnessLikelyCompatible {
return nil
}
balance := statedb.GetBalance(sender)
cost := tx.Cost()
if arbmath.BigLessThan(balance, cost) {
return fmt.Errorf("%w: address %v have %v want %v", core.ErrInsufficientFunds, sender, balance, cost)
}
if options != nil {
if err := options.Check(extraInfo.L1BlockNumber, header.Time, statedb); err != nil {
conditionalTxRejectedByTxPreCheckerCurrentStateCounter.Inc(1)
Expand Down Expand Up @@ -185,6 +180,11 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty
conditionalTxAcceptedByTxPreCheckerOldStateCounter.Inc(1)
}
}
balance := statedb.GetBalance(sender)
cost := tx.Cost()
if arbmath.BigLessThan(balance, cost) {
return fmt.Errorf("%w: address %v have %v want %v", core.ErrInsufficientFunds, sender, balance, cost)
}
if config.Strictness >= TxPreCheckerStrictnessFullValidation && tx.Nonce() > stateNonce {
return MakeNonceError(sender, tx.Nonce(), stateNonce)
}
Expand Down
20 changes: 6 additions & 14 deletions arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,20 +235,12 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com
}
}

func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, readerConfig headerreader.ConfigFetcher, config rollupgen.Config) (*chaininfo.RollupAddresses, error) {
arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client)
l1Reader, err := headerreader.New(ctx, l1client, readerConfig, arbSys, true)
if err != nil {
return nil, err
}
l1Reader.Start(ctx)
defer l1Reader.StopAndWait()

func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config) (*chaininfo.RollupAddresses, error) {
if config.WasmModuleRoot == (common.Hash{}) {
return nil, errors.New("no machine specified")
}

rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, l1Reader, deployAuth)
rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth)
if err != nil {
return nil, fmt.Errorf("error deploying rollup creator: %w", err)
}
Expand All @@ -267,7 +259,7 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b
if err != nil {
return nil, fmt.Errorf("error submitting create rollup tx: %w", err)
}
receipt, err := l1Reader.WaitForTxApproval(ctx, tx)
receipt, err := parentChainReader.WaitForTxApproval(ctx, tx)
if err != nil {
return nil, fmt.Errorf("error executing create rollup tx: %w", err)
}
Expand Down Expand Up @@ -547,7 +539,7 @@ func checkArbDbSchemaVersion(arbDb ethdb.Database) error {
return nil
}

func ValidatorDataposter(
func StakerDataposter(
db ethdb.Database, l1Reader *headerreader.HeaderReader,
transactOpts *bind.TransactOpts, cfgFetcher ConfigFetcher, syncMonitor *SyncMonitor,
) (*dataposter.DataPoster, error) {
Expand Down Expand Up @@ -812,8 +804,8 @@ func createNodeImpl(
var messagePruner *MessagePruner

if config.Staker.Enable {
dp, err := ValidatorDataposter(
rawdb.NewTable(arbDb, storage.BlockValidatorPrefix),
dp, err := StakerDataposter(
rawdb.NewTable(arbDb, storage.StakerPrefix),
l1Reader,
txOptsValidator,
configFetcher,
Expand Down
6 changes: 5 additions & 1 deletion cmd/chaininfo/arbitrum_chain_info.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
{
"chain-name": "arb1",
"parent-chain-id": 1,
"parent-chain-is-arbitrum": false,
"sequencer-url": "https://arb1-sequencer.arbitrum.io/rpc",
"feed-url": "wss://arb1.arbitrum.io/feed",
"has-genesis-state": true,
Expand Down Expand Up @@ -51,6 +52,7 @@
{
"chain-name": "nova",
"parent-chain-id": 1,
"parent-chain-is-arbitrum": false,
"sequencer-url": "https://nova.arbitrum.io/rpc",
"feed-url": "wss://nova.arbitrum.io/feed",
"das-index-url": "https://nova.arbitrum.io/das-servers",
Expand Down Expand Up @@ -100,6 +102,7 @@
{
"chain-name": "goerli-rollup",
"parent-chain-id": 5,
"parent-chain-is-arbitrum": false,
"sequencer-url": "https://goerli-rollup.arbitrum.io/rpc",
"feed-url": "wss://goerli-rollup.arbitrum.io/feed",
"chain-config":
Expand Down Expand Up @@ -215,9 +218,10 @@
}
}
},
{
{
"chain-id": 421614,
"parent-chain-id": 11155111,
"parent-chain-is-arbitrum": false,
"chain-name": "sepolia-rollup",
"sequencer-url": "https://sepolia-rollup-sequencer.arbitrum.io/rpc",
"feed-url": "wss://sepolia-rollup.arbitrum.io/feed",
Expand Down
5 changes: 3 additions & 2 deletions cmd/chaininfo/chain_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@ import (
var DefaultChainInfo []byte

type ChainInfo struct {
ChainName string `json:"chain-name"`
ParentChainId uint64 `json:"parent-chain-id"`
ChainName string `json:"chain-name"`
ParentChainId uint64 `json:"parent-chain-id"`
ParentChainIsArbitrum *bool `json:"parent-chain-is-arbitrum"`
// This is the forwarding target to submit transactions to, called the sequencer URL for clarity
SequencerUrl string `json:"sequencer-url"`
FeedUrl string `json:"feed-url"`
Expand Down
20 changes: 14 additions & 6 deletions cmd/deploy/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,13 +127,19 @@ func main() {
panic(fmt.Errorf("failed to deserialize chain config: %w", err))
}

l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerReaderConfig })

Check failure on line 130 in cmd/deploy/deploy.go

View workflow job for this annotation

GitHub Actions / Go Tests (defaults)

not enough arguments in call to headerreader.New

Check failure on line 130 in cmd/deploy/deploy.go

View workflow job for this annotation

GitHub Actions / Go Tests (race)

not enough arguments in call to headerreader.New

Check failure on line 130 in cmd/deploy/deploy.go

View workflow job for this annotation

GitHub Actions / Go Tests (challenge)

not enough arguments in call to headerreader.New
if err != nil {
panic(fmt.Errorf("failed to create header reader: %w", err))
}
l1Reader.Start(ctx)
defer l1Reader.StopAndWait()

deployedAddresses, err := arbnode.DeployOnL1(
ctx,
l1client,
l1Reader,
l1TransactionOpts,
sequencerAddress,
*authorizevalidators,
func() *headerreader.Config { return &headerReaderConfig },
arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress),
)
if err != nil {
Expand All @@ -148,12 +154,14 @@ func main() {
if err := os.WriteFile(*outfile, deployData, 0600); err != nil {
panic(err)
}
parentChainIsArbitrum := l1Reader.IsParentChainArbitrum()
chainsInfo := []chaininfo.ChainInfo{
{
ChainName: *l2ChainName,
ParentChainId: l1ChainId.Uint64(),
ChainConfig: &chainConfig,
RollupAddresses: deployedAddresses,
ChainName: *l2ChainName,
ParentChainId: l1ChainId.Uint64(),
ParentChainIsArbitrum: &parentChainIsArbitrum,
ChainConfig: &chainConfig,
RollupAddresses: deployedAddresses,
},
}
chainsInfoJson, err := json.Marshal(chainsInfo)
Expand Down
21 changes: 20 additions & 1 deletion cmd/nitro/nitro.go
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,6 @@ func mainImpl() int {
l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys, true)
if err != nil {
log.Crit("failed to get L1 headerreader", "error", err)

}

// Just create validator smart wallet if needed then exit
Expand Down Expand Up @@ -771,6 +770,16 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c
if err != nil {
return false, err
}
var parentChainIsArbitrum bool
if chainInfo.ParentChainIsArbitrum != nil {
parentChainIsArbitrum = *chainInfo.ParentChainIsArbitrum
} else {
log.Warn("Chain information parentChainIsArbitrum field missing, in the future this will be required", "chainId", chainId, "parentChainId", chainInfo.ParentChainId)
_, err := chaininfo.ProcessChainInfo(chainInfo.ParentChainId, "", combinedL2ChainInfoFiles, "")
if err == nil {
parentChainIsArbitrum = true
}
}
chainDefaults := map[string]interface{}{
"persistent.chain": chainInfo.ChainName,
"chain.id": chainInfo.ChainConfig.ChainID.Uint64(),
Expand All @@ -790,6 +799,16 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c
if !chainInfo.HasGenesisState {
chainDefaults["init.empty"] = true
}
if parentChainIsArbitrum {
l2MaxTxSize := execution.DefaultSequencerConfig.MaxTxDataSize
bufferSpace := 5000
if l2MaxTxSize < bufferSpace*2 {
return false, fmt.Errorf("not enough room in parent chain max tx size %v for bufferSpace %v * 2", l2MaxTxSize, bufferSpace)
}
safeBatchSize := l2MaxTxSize - bufferSpace
chainDefaults["node.batch-poster.max-size"] = safeBatchSize
chainDefaults["node.sequencer.max-tx-data-size"] = safeBatchSize - bufferSpace
}
err = k.Load(confmap.Provider(chainDefaults, "."), nil)
if err != nil {
return false, err
Expand Down
Loading

0 comments on commit abeec66

Please sign in to comment.