Skip to content

Commit

Permalink
Merge branch 'master' into preimage-type-support
Browse files Browse the repository at this point in the history
  • Loading branch information
PlasmaPower committed Sep 29, 2023
2 parents 6ecadc3 + 4b170f7 commit d8fa192
Show file tree
Hide file tree
Showing 62 changed files with 1,270 additions and 596 deletions.
1 change: 1 addition & 0 deletions .github/workflows/arbitrator-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ run-name: Arbitrator CI triggered from @${{ github.actor }} of ${{ github.head_r

on:
workflow_dispatch:
merge_group:
pull_request:
paths:
- 'arbitrator/**'
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/arbitrator-skip-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ name: Arbitrator skip CI
run-name: Arbitrator skip CI triggered from @${{ github.actor }} of ${{ github.head_ref }}

on:
merge_group:
pull_request:
paths-ignore:
- 'arbitrator/**'
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ run-name: Go tests CI triggered from @${{ github.actor }} of ${{ github.head_ref

on:
workflow_dispatch:
merge_group:
pull_request:
push:
branches:
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/codeql-analysis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ name: "CodeQL"
on:
push:
branches: [ "master" ]
merge_group:
branches: [ "master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "master" ]
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ run-name: Docker build CI triggered from @${{ github.actor }} of ${{ github.head

on:
workflow_dispatch:
merge_group:
pull_request:
push:
branches:
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
<br />
<p align="center">
<a href="https://arbitrum.io/">
<img src="https://arbitrum.io/wp-content/uploads/2021/08/Arbitrum_Symbol-Full-color-White-background-768x840.png" alt="Logo" width="80" height="80">
<img src="https://arbitrum.io/assets/arbitrum/logo_color.png" alt="Logo" width="80" height="80">
</a>

<h3 align="center">Arbitrum Nitro</h3>
Expand All @@ -14,7 +14,7 @@

## About Arbitrum Nitro

<img src="https://arbitrum.io/wp-content/uploads/2021/08/Arbitrum_Symbol-Full-color-White-background-768x840.png" alt="Logo" width="80" height="80">
<img src="https://arbitrum.io/assets/arbitrum/logo_color.png" alt="Logo" width="80" height="80">

Nitro is the latest iteration of the Arbitrum technology. It is a fully integrated, complete
layer 2 optimistic rollup system, including fraud proofs, the sequencer, the token bridges,
Expand Down
5 changes: 2 additions & 3 deletions arbcompress/compress_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,13 @@

package arbcompress

const LEVEL_FAST = 0
const LEVEL_WELL = 11
const WINDOW_SIZE = 22 // BROTLI_DEFAULT_WINDOW

func compressedBufferSizeFor(length int) int {
return length + (length>>10)*8 + 64 // actual limit is: length + (length >> 14) * 4 + 6
}

func CompressFast(input []byte) ([]byte, error) {
return compressLevel(input, LEVEL_FAST)
func CompressLevel(input []byte, level int) ([]byte, error) {
return compressLevel(input, level)
}
2 changes: 1 addition & 1 deletion arbcompress/compress_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func testCompressDecompress(t *testing.T, data []byte) {
}
testDecompress(t, compressedWell, data)

compressedFast, err := CompressFast(data)
compressedFast, err := CompressLevel(data, 0)
if err != nil {
t.Fatal(err)
}
Expand Down
2 changes: 1 addition & 1 deletion arbitrator/prover/test-cases/go/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func MerkleSample(data [][]byte, toproove int) (bool, error) {
}

func testCompression(data []byte) {
compressed, err := arbcompress.CompressFast(data)
compressed, err := arbcompress.CompressLevel(data, 0)
if err != nil {
panic(err)
}
Expand Down
17 changes: 14 additions & 3 deletions arbnode/batch_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ var TestBatchPosterConfig = BatchPosterConfig{
L1BlockBoundBypass: time.Hour,
}

func NewBatchPoster(dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderReader, inbox *InboxTracker, streamer *TransactionStreamer, syncMonitor *SyncMonitor, config BatchPosterConfigFetcher, deployInfo *chaininfo.RollupAddresses, transactOpts *bind.TransactOpts, daWriter das.DataAvailabilityServiceWriter) (*BatchPoster, error) {
func NewBatchPoster(ctx context.Context, dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderReader, inbox *InboxTracker, streamer *TransactionStreamer, syncMonitor *SyncMonitor, config BatchPosterConfigFetcher, deployInfo *chaininfo.RollupAddresses, transactOpts *bind.TransactOpts, daWriter das.DataAvailabilityServiceWriter) (*BatchPoster, error) {
seqInbox, err := bridgegen.NewSequencerInbox(deployInfo.SequencerInbox, l1Reader.Client())
if err != nil {
return nil, err
Expand Down Expand Up @@ -253,7 +253,18 @@ func NewBatchPoster(dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderRe
dataPosterConfigFetcher := func() *dataposter.DataPosterConfig {
return &config().DataPoster
}
b.dataPoster, err = dataposter.NewDataPoster(dataPosterDB, l1Reader, transactOpts, redisClient, redisLock, dataPosterConfigFetcher, b.getBatchPosterPosition)
b.dataPoster, err = dataposter.NewDataPoster(ctx,
&dataposter.DataPosterOpts{
Database: dataPosterDB,
HeaderReader: l1Reader,
Auth: transactOpts,
RedisClient: redisClient,
RedisLock: redisLock,
Config: dataPosterConfigFetcher,
MetadataRetriever: b.getBatchPosterPosition,
RedisKey: "data-poster.queue",
},
)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -331,7 +342,7 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) {

reverted, err := b.checkReverts(ctx, blockNum)
if err != nil {
logLevel := log.Error
logLevel := log.Warn
if strings.Contains(err.Error(), "not found") {
// Just parent chain node inconsistency
// One node sent us a block, but another didn't have it
Expand Down
95 changes: 63 additions & 32 deletions arbnode/dataposter/data_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/go-redis/redis/v8"
"github.com/offchainlabs/nitro/arbnode/dataposter/leveldb"
"github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage"
"github.com/offchainlabs/nitro/arbnode/dataposter/noop"
"github.com/offchainlabs/nitro/arbnode/dataposter/slice"
"github.com/offchainlabs/nitro/arbnode/dataposter/storage"
Expand Down Expand Up @@ -91,18 +91,29 @@ func parseReplacementTimes(val string) ([]time.Duration, error) {
return append(res, time.Hour*24*365*10), nil
}

func NewDataPoster(db ethdb.Database, headerReader *headerreader.HeaderReader, auth *bind.TransactOpts, redisClient redis.UniversalClient, redisLock AttemptLocker, config ConfigFetcher, metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error)) (*DataPoster, error) {
initConfig := config()
type DataPosterOpts struct {
Database ethdb.Database
HeaderReader *headerreader.HeaderReader
Auth *bind.TransactOpts
RedisClient redis.UniversalClient
RedisLock AttemptLocker
Config ConfigFetcher
MetadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error)
RedisKey string // Redis storage key
}

func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, error) {
initConfig := opts.Config()
replacementTimes, err := parseReplacementTimes(initConfig.ReplacementTimes)
if err != nil {
return nil, err
}
if headerReader.IsParentChainArbitrum() && !initConfig.UseNoOpStorage {
if opts.HeaderReader.IsParentChainArbitrum() && !initConfig.UseNoOpStorage {
initConfig.UseNoOpStorage = true
log.Info("Disabling data poster storage, as parent chain appears to be an Arbitrum chain without a mempool")
}
encF := func() storage.EncoderDecoderInterface {
if config().LegacyStorageEncoding {
if opts.Config().LegacyStorageEncoding {
return &storage.LegacyEncoderDecoder{}
}
return &storage.EncoderDecoder{}
Expand All @@ -111,27 +122,33 @@ func NewDataPoster(db ethdb.Database, headerReader *headerreader.HeaderReader, a
switch {
case initConfig.UseNoOpStorage:
queue = &noop.Storage{}
case redisClient != nil:
case opts.RedisClient != nil:
var err error
queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &initConfig.RedisSigner, encF)
queue, err = redisstorage.NewStorage(opts.RedisClient, opts.RedisKey, &initConfig.RedisSigner, encF)
if err != nil {
return nil, err
}
case initConfig.UseLevelDB:
queue = leveldb.New(db, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} })
case initConfig.UseDBStorage:
storage := dbstorage.New(opts.Database, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} })
if initConfig.Dangerous.ClearDBStorage {
if err := storage.PruneAll(ctx); err != nil {
return nil, err
}
}
queue = storage
default:
queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} })
}
return &DataPoster{
headerReader: headerReader,
client: headerReader.Client(),
sender: auth.From,
signer: auth.Signer,
config: config,
headerReader: opts.HeaderReader,
client: opts.HeaderReader.Client(),
sender: opts.Auth.From,
signer: opts.Auth.Signer,
config: opts.Config,
replacementTimes: replacementTimes,
metadataRetriever: metadataRetriever,
metadataRetriever: opts.MetadataRetriever,
queue: queue,
redisLock: redisLock,
redisLock: opts.RedisLock,
errorCount: make(map[uint64]int),
}, nil
}
Expand Down Expand Up @@ -618,19 +635,26 @@ type DataPosterConfig struct {
ReplacementTimes string `koanf:"replacement-times"`
// This is forcibly disabled if the parent chain is an Arbitrum chain,
// so you should probably use DataPoster's waitForL1Finality method instead of reading this field directly.
WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"`
MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"`
MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"`
TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"`
UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"`
MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"`
MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"`
MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"`
NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"`
AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"`
UseLevelDB bool `koanf:"use-leveldb"`
UseNoOpStorage bool `koanf:"use-noop-storage"`
LegacyStorageEncoding bool `koanf:"legacy-storage-encoding" reload:"hot"`
WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"`
MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"`
MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"`
TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"`
UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"`
MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"`
MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"`
MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"`
NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"`
AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"`
UseDBStorage bool `koanf:"use-db-storage"`
UseNoOpStorage bool `koanf:"use-noop-storage"`
LegacyStorageEncoding bool `koanf:"legacy-storage-encoding" reload:"hot"`
Dangerous DangerousConfig `koanf:"dangerous"`
}

type DangerousConfig struct {
// This should be used with caution, only when dataposter somehow gets in a
// bad state and we require clearing it.
ClearDBStorage bool `koanf:"clear-dbstorage"`
}

// ConfigFetcher function type is used instead of directly passing config so
Expand All @@ -649,10 +673,16 @@ func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet) {
f.Float64(prefix+".max-tip-cap-gwei", DefaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at")
f.Uint64(prefix+".nonce-rbf-soft-confs", DefaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee")
f.Bool(prefix+".allocate-mempool-balance", DefaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance")
f.Bool(prefix+".use-leveldb", DefaultDataPosterConfig.UseLevelDB, "uses leveldb when enabled")
f.Bool(prefix+".use-db-storage", DefaultDataPosterConfig.UseDBStorage, "uses database storage when enabled")
f.Bool(prefix+".use-noop-storage", DefaultDataPosterConfig.UseNoOpStorage, "uses noop storage, it doesn't store anything")
f.Bool(prefix+".legacy-storage-encoding", DefaultDataPosterConfig.LegacyStorageEncoding, "encodes items in a legacy way (as it was before dropping generics)")

signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f)
addDangerousOptions(prefix+".dangerous", f)
}

func addDangerousOptions(prefix string, f *pflag.FlagSet) {
f.Bool(prefix+".clear-dbstorage", DefaultDataPosterConfig.Dangerous.ClearDBStorage, "clear database storage")
}

var DefaultDataPosterConfig = DataPosterConfig{
Expand All @@ -665,9 +695,10 @@ var DefaultDataPosterConfig = DataPosterConfig{
MaxTipCapGwei: 5,
NonceRbfSoftConfs: 1,
AllocateMempoolBalance: true,
UseLevelDB: true,
UseDBStorage: true,
UseNoOpStorage: false,
LegacyStorageEncoding: true,
Dangerous: DangerousConfig{ClearDBStorage: false},
}

var DefaultDataPosterConfigForValidator = func() DataPosterConfig {
Expand All @@ -687,7 +718,7 @@ var TestDataPosterConfig = DataPosterConfig{
MaxTipCapGwei: 5,
NonceRbfSoftConfs: 1,
AllocateMempoolBalance: true,
UseLevelDB: false,
UseDBStorage: false,
UseNoOpStorage: false,
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// Copyright 2021-2023, Offchain Labs, Inc.
// For license information, see https://github.com/nitro/blob/master/LICENSE

package leveldb
package dbstorage

import (
"bytes"
Expand All @@ -10,13 +10,14 @@ import (
"fmt"
"strconv"

"github.com/cockroachdb/pebble"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/offchainlabs/nitro/arbnode/dataposter/storage"
"github.com/syndtr/goleveldb/leveldb"
)

// Storage implements leveldb based storage for batch poster.
// Storage implements db based storage for batch poster.
type Storage struct {
db ethdb.Database
encDec storage.EncoderDecoderF
Expand Down Expand Up @@ -79,6 +80,18 @@ func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, er
return s.encDec().Decode(val)
}

func (s *Storage) PruneAll(ctx context.Context) error {
idx, err := s.lastItemIdx(ctx)
if err != nil {
return fmt.Errorf("pruning all keys: %w", err)
}
until, err := strconv.Atoi(string(idx))
if err != nil {
return fmt.Errorf("converting last item index bytes to integer: %w", err)
}
return s.Prune(ctx, uint64(until+1))
}

func (s *Storage) Prune(ctx context.Context, until uint64) error {
cnt, err := s.Length(ctx)
if err != nil {
Expand Down Expand Up @@ -175,5 +188,5 @@ func (s *Storage) IsPersistent() bool {
}

func isErrNotFound(err error) bool {
return errors.Is(err, leveldb.ErrNotFound) || errors.Is(err, memorydb.ErrMemorydbNotFound)
return errors.Is(err, leveldb.ErrNotFound) || errors.Is(err, pebble.ErrNotFound) || errors.Is(err, memorydb.ErrMemorydbNotFound)
}
Loading

0 comments on commit d8fa192

Please sign in to comment.