Skip to content

Commit

Permalink
Merge pull request #1911 from OffchainLabs/cleanup
Browse files Browse the repository at this point in the history
Use options struct for batchposter to make callsites more readable
  • Loading branch information
anodar authored Oct 12, 2023
2 parents efacd68 + 4579b57 commit 71cb8aa
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 26 deletions.
60 changes: 36 additions & 24 deletions arbnode/batch_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,68 +217,80 @@ var TestBatchPosterConfig = BatchPosterConfig{
L1BlockBoundBypass: time.Hour,
}

func NewBatchPoster(ctx context.Context, dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderReader, inbox *InboxTracker, streamer *TransactionStreamer, syncMonitor *SyncMonitor, config BatchPosterConfigFetcher, deployInfo *chaininfo.RollupAddresses, transactOpts *bind.TransactOpts, daWriter das.DataAvailabilityServiceWriter) (*BatchPoster, error) {
seqInbox, err := bridgegen.NewSequencerInbox(deployInfo.SequencerInbox, l1Reader.Client())
type BatchPosterOpts struct {
DataPosterDB ethdb.Database
L1Reader *headerreader.HeaderReader
Inbox *InboxTracker
Streamer *TransactionStreamer
SyncMonitor *SyncMonitor
Config BatchPosterConfigFetcher
DeployInfo *chaininfo.RollupAddresses
TransactOpts *bind.TransactOpts
DAWriter das.DataAvailabilityServiceWriter
}

func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, error) {
seqInbox, err := bridgegen.NewSequencerInbox(opts.DeployInfo.SequencerInbox, opts.L1Reader.Client())
if err != nil {
return nil, err
}
bridge, err := bridgegen.NewBridge(deployInfo.Bridge, l1Reader.Client())
bridge, err := bridgegen.NewBridge(opts.DeployInfo.Bridge, opts.L1Reader.Client())
if err != nil {
return nil, err
}
if err = config().Validate(); err != nil {
if err = opts.Config().Validate(); err != nil {
return nil, err
}
seqInboxABI, err := bridgegen.SequencerInboxMetaData.GetAbi()
if err != nil {
return nil, err
}
redisClient, err := redisutil.RedisClientFromURL(config().RedisUrl)
redisClient, err := redisutil.RedisClientFromURL(opts.Config().RedisUrl)
if err != nil {
return nil, err
}
redisLockConfigFetcher := func() *redislock.SimpleCfg {
simpleRedisLockConfig := config().RedisLock
simpleRedisLockConfig := opts.Config().RedisLock
simpleRedisLockConfig.Key = batchPosterSimpleRedisLockKey
return &simpleRedisLockConfig
}
redisLock, err := redislock.NewSimple(redisClient, redisLockConfigFetcher, func() bool { return syncMonitor.Synced() })
redisLock, err := redislock.NewSimple(redisClient, redisLockConfigFetcher, func() bool { return opts.SyncMonitor.Synced() })
if err != nil {
return nil, err
}
b := &BatchPoster{
l1Reader: l1Reader,
inbox: inbox,
streamer: streamer,
syncMonitor: syncMonitor,
config: config,
l1Reader: opts.L1Reader,
inbox: opts.Inbox,
streamer: opts.Streamer,
syncMonitor: opts.SyncMonitor,
config: opts.Config,
bridge: bridge,
seqInbox: seqInbox,
seqInboxABI: seqInboxABI,
seqInboxAddr: deployInfo.SequencerInbox,
gasRefunderAddr: config().gasRefunder,
bridgeAddr: deployInfo.Bridge,
daWriter: daWriter,
seqInboxAddr: opts.DeployInfo.SequencerInbox,
gasRefunderAddr: opts.Config().gasRefunder,
bridgeAddr: opts.DeployInfo.Bridge,
daWriter: opts.DAWriter,
redisLock: redisLock,
accessList: func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList {
return AccessList(&AccessListOpts{
SequencerInboxAddr: deployInfo.SequencerInbox,
DataPosterAddr: transactOpts.From,
BridgeAddr: deployInfo.Bridge,
GasRefunderAddr: config().gasRefunder,
SequencerInboxAddr: opts.DeployInfo.SequencerInbox,
DataPosterAddr: opts.TransactOpts.From,
BridgeAddr: opts.DeployInfo.Bridge,
GasRefunderAddr: opts.Config().gasRefunder,
SequencerInboxAccs: SequencerInboxAccs,
AfterDelayedMessagesRead: AfterDelayedMessagesRead,
})
},
}
dataPosterConfigFetcher := func() *dataposter.DataPosterConfig {
return &config().DataPoster
return &(opts.Config().DataPoster)
}
b.dataPoster, err = dataposter.NewDataPoster(ctx,
&dataposter.DataPosterOpts{
Database: dataPosterDB,
HeaderReader: l1Reader,
Auth: transactOpts,
Database: opts.DataPosterDB,
HeaderReader: opts.L1Reader,
Auth: opts.TransactOpts,
RedisClient: redisClient,
RedisLock: redisLock,
Config: dataPosterConfigFetcher,
Expand Down
12 changes: 11 additions & 1 deletion arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -842,7 +842,17 @@ func createNodeImpl(
if txOptsBatchPoster == nil {
return nil, errors.New("batchposter, but no TxOpts")
}
batchPoster, err = NewBatchPoster(ctx, rawdb.NewTable(arbDb, storage.BatchPosterPrefix), l1Reader, inboxTracker, txStreamer, syncMonitor, func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, deployInfo, txOptsBatchPoster, daWriter)
batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{
DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix),
L1Reader: l1Reader,
Inbox: inboxTracker,
Streamer: txStreamer,
SyncMonitor: syncMonitor,
Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster },
DeployInfo: deployInfo,
TransactOpts: txOptsBatchPoster,
DAWriter: daWriter,
})
if err != nil {
return nil, err
}
Expand Down
16 changes: 15 additions & 1 deletion system_tests/batch_poster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,19 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) {
for i := 0; i < parallelBatchPosters; i++ {
// Make a copy of the batch poster config so NewBatchPoster calling Validate() on it doesn't race
batchPosterConfig := conf.BatchPoster
batchPoster, err := arbnode.NewBatchPoster(ctx, nil, nodeA.L1Reader, nodeA.InboxTracker, nodeA.TxStreamer, nodeA.SyncMonitor, func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, nodeA.DeployInfo, &seqTxOpts, nil)
batchPoster, err := arbnode.NewBatchPoster(ctx,
&arbnode.BatchPosterOpts{
DataPosterDB: nil,
L1Reader: nodeA.L1Reader,
Inbox: nodeA.InboxTracker,
Streamer: nodeA.TxStreamer,
SyncMonitor: nodeA.SyncMonitor,
Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig },
DeployInfo: nodeA.DeployInfo,
TransactOpts: &seqTxOpts,
DAWriter: nil,
},
)
Require(t, err)
batchPoster.Start(ctx)
defer batchPoster.StopAndWait()
Expand All @@ -104,6 +116,8 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) {
}
}

// TODO: factor this out in separate test case and skip it or delete this
// code entirely.
// I've locally confirmed that this passes when the clique period is set to 1.
// However, setting the clique period to 1 slows everything else (including the L1 deployment for this test) down to a crawl.
if false {
Expand Down

0 comments on commit 71cb8aa

Please sign in to comment.